signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _rebuild_all_command_chains ( self ) : """Rebuilds execution chain for all registered commands . This method is typically called when intercepters are changed . Because of that it is more efficient to register intercepters before registering commands ( typically it will be done in abstract classes ) . However , that performance penalty will be only once during creation time ."""
self . _commands_by_name = { } for command in self . _commands : self . _build_command_chain ( command )
def password ( self , value ) : """gets / sets the current password"""
if isinstance ( value , str ) : self . _password = value self . _handler = None
def parse_options ( self , kwargs ) : """Validate the provided kwargs and return options as json string ."""
kwargs = { camelize ( key ) : value for key , value in kwargs . items ( ) } for key in kwargs . keys ( ) : assert key in self . valid_options , ( 'The option {} is not in the available options: {}.' . format ( key , ', ' . join ( self . valid_options ) ) ) assert isinstance ( kwargs [ key ] , self . valid_options [ key ] ) , ( 'The option {} must be one of the following types: {}.' . format ( key , self . valid_options [ key ] ) ) return kwargs
def load ( args ) : """: param args : Will be used to infer the proper configuration name , or if args . ceph _ conf is passed in , that will take precedence"""
path = args . ceph_conf or '{cluster}.conf' . format ( cluster = args . cluster ) try : f = open ( path ) except IOError as e : raise exc . ConfigError ( "%s; has `ceph-deploy new` been run in this directory?" % e ) else : with contextlib . closing ( f ) : return parse ( f )
def send_async ( self , msg , persist = False ) : """Arrange for ` msg ` to be delivered to this context , with replies directed to a newly constructed receiver . : attr : ` dst _ id < Message . dst _ id > ` is set to the target context ID , and : attr : ` reply _ to < Message . reply _ to > ` is set to the newly constructed receiver ' s handle . : param bool persist : If : data : ` False ` , the handler will be unregistered after a single message has been received . : param mitogen . core . Message msg : The message . : returns : : class : ` Receiver ` configured to receive any replies sent to the message ' s ` reply _ to ` handle ."""
if self . router . broker . _thread == threading . currentThread ( ) : # TODO raise SystemError ( 'Cannot making blocking call on broker thread' ) receiver = Receiver ( self . router , persist = persist , respondent = self ) msg . dst_id = self . context_id msg . reply_to = receiver . handle _v and LOG . debug ( '%r.send_async(%r)' , self , msg ) self . send ( msg ) return receiver
def name ( cls ) : """Return the preferred name as which this command will be known ."""
name = cls . __name__ . replace ( "_" , "-" ) . lower ( ) name = name [ 4 : ] if name . startswith ( "cmd-" ) else name return name
def default_for ( self , style_type ) : """Return ` w : style [ @ w : type = " * { style _ type } * ] [ - 1 ] ` or | None | if not found ."""
default_styles_for_type = [ s for s in self . _iter_styles ( ) if s . type == style_type and s . default ] if not default_styles_for_type : return None # spec calls for last default in document order return default_styles_for_type [ - 1 ]
def get_sketch ( self , sketch_id ) : """Get information on the specified sketch . Args : sketch _ id ( int ) : ID of sketch Returns : dict : Dictionary of sketch information Raises : ValueError : Sketch is inaccessible"""
resource_url = '{0:s}/sketches/{1:d}/' . format ( self . api_base_url , sketch_id ) response = self . session . get ( resource_url ) response_dict = response . json ( ) try : response_dict [ 'objects' ] except KeyError : raise ValueError ( 'Sketch does not exist or you have no access' ) return response_dict
def from_merge_origin ( cls , tc ) : """Return instance created from merge - origin tc element ."""
other_tc = tc . tbl . tc ( tc . row_idx + tc . rowSpan - 1 , # - - - other _ row _ idx tc . col_idx + tc . gridSpan - 1 # - - - other _ col _ idx ) return cls ( tc , other_tc )
def assert_valid_execution_arguments ( schema : GraphQLSchema , document : DocumentNode , raw_variable_values : Dict [ str , Any ] = None , ) -> None : """Check that the arguments are acceptable . Essential assertions before executing to provide developer feedback for improper use of the GraphQL library ."""
if not document : raise TypeError ( "Must provide document" ) # If the schema used for execution is invalid , throw an error . assert_valid_schema ( schema ) # Variables , if provided , must be a dictionary . if not ( raw_variable_values is None or isinstance ( raw_variable_values , dict ) ) : raise TypeError ( "Variables must be provided as a dictionary where each property is a" " variable value. Perhaps look to see if an unparsed JSON string was" " provided." )
def set_id ( self , id = '$' ) : """Set the last - read message id for each stream in the consumer group . By default , this will be the special " $ " identifier , meaning all messages are marked as having been read . : param id : id of last - read message ( or " $ " ) ."""
accum = { } for key in self . keys : accum [ key ] = self . database . xgroup_setid ( key , self . name , id ) return accum
def main ( argv : typing . Optional [ typing . Sequence ] = None ) -> typing . NoReturn : """Main entry point for the konch CLI ."""
args = parse_args ( argv ) if args [ "--debug" ] : logging . basicConfig ( format = "%(levelname)s %(filename)s: %(message)s" , level = logging . DEBUG ) logger . debug ( args ) config_file : typing . Union [ Path , None ] if args [ "init" ] : config_file = Path ( args [ "<config_file>" ] or CONFIG_FILE ) init_config ( config_file ) else : config_file = Path ( args [ "<config_file>" ] ) if args [ "<config_file>" ] else None if args [ "edit" ] : edit_config ( config_file ) elif args [ "allow" ] : allow_config ( config_file ) elif args [ "deny" ] : deny_config ( config_file ) mod = use_file ( Path ( args [ "--file" ] ) if args [ "--file" ] else None ) if hasattr ( mod , "setup" ) : mod . setup ( ) # type : ignore if args [ "--name" ] : if args [ "--name" ] not in _config_registry : print_error ( f'Invalid --name: "{args["--name"]}"' ) sys . exit ( 1 ) config_dict = _config_registry [ args [ "--name" ] ] logger . debug ( f'Using named config: "{args["--name"]}"' ) logger . debug ( config_dict ) else : config_dict = _cfg # Allow default shell to be overriden by command - line argument shell_name = args [ "--shell" ] if shell_name : config_dict [ "shell" ] = SHELL_MAP . get ( shell_name . lower ( ) , AutoShell ) logger . debug ( f"Starting with config {config_dict}" ) start ( ** config_dict ) if hasattr ( mod , "teardown" ) : mod . teardown ( ) # type : ignore sys . exit ( 0 )
def arbitrary_object_to_string ( a_thing ) : """take a python object of some sort , and convert it into a human readable string . this function is used extensively to convert things like " subject " into " subject _ key , function - > function _ key , etc ."""
# is it None ? if a_thing is None : return '' # is it already a string ? if isinstance ( a_thing , six . string_types ) : return a_thing if six . PY3 and isinstance ( a_thing , six . binary_type ) : try : return a_thing . decode ( 'utf-8' ) except UnicodeDecodeError : pass # does it have a to _ str function ? try : return a_thing . to_str ( ) except ( AttributeError , KeyError , TypeError ) : # AttributeError - no to _ str function ? # KeyError - DotDict has no to _ str ? # TypeError - problem converting # nope , no to _ str function pass # is this a type proxy ? try : return arbitrary_object_to_string ( a_thing . a_type ) except ( AttributeError , KeyError , TypeError ) : # nope , no a _ type property pass # is it a built in ? try : return known_mapping_type_to_str [ a_thing ] except ( KeyError , TypeError ) : # nope , not a builtin pass # is it something from a loaded module ? try : if a_thing . __module__ not in ( '__builtin__' , 'builtins' , 'exceptions' ) : if a_thing . __module__ == "__main__" : module_name = ( sys . modules [ '__main__' ] . __file__ [ : - 3 ] . replace ( '/' , '.' ) . strip ( '.' ) ) else : module_name = a_thing . __module__ return "%s.%s" % ( module_name , a_thing . __name__ ) except AttributeError : # nope , not one of these pass # maybe it has a _ _ name _ _ attribute ? try : return a_thing . __name__ except AttributeError : # nope , not one of these pass # punt and see what happens if we just cast it to string return str ( a_thing )
def _is_significant ( stats , metrics = None ) : """Filter significant motifs based on several statistics . Parameters stats : dict Statistics disctionary object . metrics : sequence Metric with associated minimum values . The default is ( ( " max _ enrichment " , 3 ) , ( " roc _ auc " , 0.55 ) , ( " enr _ at _ fpr " , 0.55 ) ) Returns significant : bool"""
if metrics is None : metrics = ( ( "max_enrichment" , 3 ) , ( "roc_auc" , 0.55 ) , ( "enr_at_fpr" , 0.55 ) ) for stat_name , min_value in metrics : if stats . get ( stat_name , 0 ) < min_value : return False return True
def _make_txn ( signer , setting_key , payload ) : """Creates and signs a sawtooth _ settings transaction with with a payload ."""
serialized_payload = payload . SerializeToString ( ) header = TransactionHeader ( signer_public_key = signer . get_public_key ( ) . as_hex ( ) , family_name = 'sawtooth_settings' , family_version = '1.0' , inputs = _config_inputs ( setting_key ) , outputs = _config_outputs ( setting_key ) , dependencies = [ ] , payload_sha512 = hashlib . sha512 ( serialized_payload ) . hexdigest ( ) , batcher_public_key = signer . get_public_key ( ) . as_hex ( ) ) . SerializeToString ( ) return Transaction ( header = header , header_signature = signer . sign ( header ) , payload = serialized_payload )
def get_root ( self , ** kwargs ) : '''Returns this tree if it has no parents , or , alternatively , moves up via the parent links of this tree until reaching the tree with no parents , and returnes the parentless tree as the root .'''
if self . parent == None : return self else : return self . parent . get_root ( ** kwargs )
def _logpdf ( self , ** kwargs ) : """Returns the log of the pdf at the given values . The keyword arguments must contain all of parameters in self ' s params . Unrecognized arguments are ignored ."""
if kwargs not in self : return - numpy . inf return self . _lognorm + numpy . log ( self . _dfunc ( numpy . array ( [ kwargs [ p ] for p in self . _params ] ) ) ) . sum ( )
def endpoint ( self , * args ) : """endpoint : Decorates a function to make it a CLI endpoint The function must be called do _ < some > _ < action > and accept one ' args ' parameter . It will be converted into a . / cli some action commandline endpoint . A set of Arguments can be passed to the decorator , the syntax is the same than the argparse add _ argument function ."""
# Decorator function def decorator ( func ) : func_name = func . __name__ func_name = func_name . replace ( "do_" , "" ) actions = func_name . split ( "_" ) cmd_parser = None sub = self . subparsers wcount = 0 # For each word in the command we build the parsing tree for word in actions : parser_name = '_' . join ( actions [ : wcount + 1 ] ) # If the parser exist , we use it , otherwise we create it if self . parsers . has_key ( parser_name ) : cmd_parser = self . parsers [ parser_name ] else : cmd_parser = sub . add_parser ( word ) self . parsers [ parser_name ] = cmd_parser # We don ' t want to add a subparser to the final endpoint , # since it would require a void positional argument and # fuck up the whole thing . if wcount != len ( actions ) - 1 : # Same that with the parsers , it it exist we use it # otherwise we create it . It avoids overwrites if self . parsers . has_key ( "sub_" + parser_name ) : sub = self . parsers [ "sub_" + parser_name ] else : sub = cmd_parser . add_subparsers ( ) self . parsers [ "sub_" + parser_name ] = sub wcount += 1 # Bind the endpoint to the function cmd_parser . set_defaults ( func = func ) # We add the arguments to the function for argument in args : if type ( argument ) == Argument : cmd_parser . add_argument ( * argument . args , ** argument . kwargs ) elif type ( argument ) == ExclusiveGroup : group = cmd_parser . add_mutually_exclusive_group ( required = argument . required ) for arg in argument . args : group . add_argument ( * arg . args , ** arg . kwargs ) # Standard inner function def inner ( * args , ** kwargs ) : return func ( * args , ** kwargs ) return inner return decorator
def kde_plot_df ( df , xlims = None , ** kwargs ) : """Plots kde estimates of distributions of samples in each cell of the input pandas DataFrame . There is one subplot for each dataframe column , and on each subplot there is one kde line . Parameters df : pandas data frame Each cell must contain a 1d numpy array of samples . xlims : dict , optional Dictionary of xlimits - keys are column names and values are lists of length 2. num _ xticks : int , optional Number of xticks on each subplot . figsize : tuple , optional Size of figure in inches . nrows : int , optional Number of rows of subplots . ncols : int , optional Number of columns of subplots . normalize : bool , optional If true , kde plots are normalized to have the same area under their curves . If False , their max value is set to 1. legend : bool , optional Should a legend be added ? legend _ kwargs : dict , optional Additional kwargs for legend . Returns fig : matplotlib figure"""
assert xlims is None or isinstance ( xlims , dict ) figsize = kwargs . pop ( 'figsize' , ( 6.4 , 1.5 ) ) num_xticks = kwargs . pop ( 'num_xticks' , None ) nrows = kwargs . pop ( 'nrows' , 1 ) ncols = kwargs . pop ( 'ncols' , int ( np . ceil ( len ( df . columns ) / nrows ) ) ) normalize = kwargs . pop ( 'normalize' , True ) legend = kwargs . pop ( 'legend' , False ) legend_kwargs = kwargs . pop ( 'legend_kwargs' , { } ) if kwargs : raise TypeError ( 'Unexpected **kwargs: {0}' . format ( kwargs ) ) fig , axes = plt . subplots ( nrows = nrows , ncols = ncols , figsize = figsize ) for nax , col in enumerate ( df ) : if nrows == 1 : ax = axes [ nax ] else : ax = axes [ nax // ncols , nax % ncols ] supmin = df [ col ] . apply ( np . min ) . min ( ) supmax = df [ col ] . apply ( np . max ) . max ( ) support = np . linspace ( supmin - 0.1 * ( supmax - supmin ) , supmax + 0.1 * ( supmax - supmin ) , 200 ) handles = [ ] labels = [ ] for name , samps in df [ col ] . iteritems ( ) : pdf = scipy . stats . gaussian_kde ( samps ) ( support ) if not normalize : pdf /= pdf . max ( ) handles . append ( ax . plot ( support , pdf , label = name ) [ 0 ] ) labels . append ( name ) ax . set_ylim ( bottom = 0 ) ax . set_yticks ( [ ] ) if xlims is not None : try : ax . set_xlim ( xlims [ col ] ) except KeyError : pass ax . set_xlabel ( col ) if num_xticks is not None : ax . xaxis . set_major_locator ( matplotlib . ticker . MaxNLocator ( nbins = num_xticks ) ) if legend : fig . legend ( handles , labels , ** legend_kwargs ) return fig
def output_forecasts_json ( self , forecasts , condition_model_names , size_model_names , dist_model_names , track_model_names , json_data_path , out_path ) : """Output forecast values to geoJSON file format . : param forecasts : : param condition _ model _ names : : param size _ model _ names : : param track _ model _ names : : param json _ data _ path : : param out _ path : : return :"""
total_tracks = self . data [ "forecast" ] [ "total" ] for r in np . arange ( total_tracks . shape [ 0 ] ) : track_id = total_tracks . loc [ r , "Track_ID" ] print ( track_id ) track_num = track_id . split ( "_" ) [ - 1 ] ensemble_name = total_tracks . loc [ r , "Ensemble_Name" ] member = total_tracks . loc [ r , "Ensemble_Member" ] group = self . data [ "forecast" ] [ "member" ] . loc [ self . data [ "forecast" ] [ "member" ] [ "Ensemble_Member" ] == member , self . group_col ] . values [ 0 ] run_date = track_id . split ( "_" ) [ - 4 ] [ : 8 ] step_forecasts = { } for ml_model in condition_model_names : step_forecasts [ "condition_" + ml_model . replace ( " " , "-" ) ] = forecasts [ "condition" ] [ group ] . loc [ forecasts [ "condition" ] [ group ] [ "Track_ID" ] == track_id , ml_model ] for ml_model in size_model_names : step_forecasts [ "size_" + ml_model . replace ( " " , "-" ) ] = forecasts [ "size" ] [ group ] [ ml_model ] . loc [ forecasts [ "size" ] [ group ] [ ml_model ] [ "Track_ID" ] == track_id ] for ml_model in dist_model_names : step_forecasts [ "dist_" + ml_model . replace ( " " , "-" ) ] = forecasts [ "dist" ] [ group ] [ ml_model ] . loc [ forecasts [ "dist" ] [ group ] [ ml_model ] [ "Track_ID" ] == track_id ] for model_type in forecasts [ "track" ] . keys ( ) : for ml_model in track_model_names : mframe = forecasts [ "track" ] [ model_type ] [ group ] [ ml_model ] step_forecasts [ model_type + "_" + ml_model . replace ( " " , "-" ) ] = mframe . loc [ mframe [ "Track_ID" ] == track_id ] json_file_name = "{0}_{1}_{2}_model_track_{3}.json" . format ( ensemble_name , run_date , member , track_num ) full_json_path = json_data_path + "/" . join ( [ run_date , member ] ) + "/" + json_file_name with open ( full_json_path ) as json_file_obj : try : track_obj = json . load ( json_file_obj ) except FileNotFoundError : print ( full_json_path + " not found" ) continue for f , feature in enumerate ( track_obj [ 'features' ] ) : del feature [ 'properties' ] [ 'attributes' ] for model_name , fdata in step_forecasts . items ( ) : ml_model_name = model_name . split ( "_" ) [ 1 ] if "condition" in model_name : feature [ 'properties' ] [ model_name ] = fdata . values [ f ] else : predcols = [ ] for col in fdata . columns : if ml_model_name in col : predcols . append ( col ) feature [ 'properties' ] [ model_name ] = fdata . loc [ : , predcols ] . values [ f ] . tolist ( ) full_path = [ ] for part in [ run_date , member ] : full_path . append ( part ) if not os . access ( out_path + "/" . join ( full_path ) , os . R_OK ) : try : os . mkdir ( out_path + "/" . join ( full_path ) ) except OSError : print ( "directory already created" ) out_json_filename = out_path + "/" . join ( full_path ) + "/" + json_file_name with open ( out_json_filename , "w" ) as out_json_obj : json . dump ( track_obj , out_json_obj , indent = 1 , sort_keys = True ) return
def _convert_claripy_bool_ast ( self , cond ) : """Convert recovered reaching conditions from claripy ASTs to ailment Expressions : return : None"""
if isinstance ( cond , ailment . Expr . Expression ) : return cond if cond . op == "BoolS" and claripy . is_true ( cond ) : return cond if cond in self . _condition_mapping : return self . _condition_mapping [ cond ] _mapping = { 'Not' : lambda cond_ : ailment . Expr . UnaryOp ( None , 'Not' , self . _convert_claripy_bool_ast ( cond_ . args [ 0 ] ) ) , 'And' : lambda cond_ : ailment . Expr . BinaryOp ( None , 'LogicalAnd' , ( self . _convert_claripy_bool_ast ( cond_ . args [ 0 ] ) , self . _convert_claripy_bool_ast ( cond_ . args [ 1 ] ) , ) ) , 'Or' : lambda cond_ : ailment . Expr . BinaryOp ( None , 'LogicalOr' , ( self . _convert_claripy_bool_ast ( cond_ . args [ 0 ] ) , self . _convert_claripy_bool_ast ( cond_ . args [ 1 ] ) , ) ) , 'ULE' : lambda cond_ : ailment . Expr . BinaryOp ( None , 'CmpULE' , tuple ( map ( self . _convert_claripy_bool_ast , cond_ . args ) ) , ) , '__le__' : lambda cond_ : ailment . Expr . BinaryOp ( None , 'CmpLE' , tuple ( map ( self . _convert_claripy_bool_ast , cond_ . args ) ) , ) , 'UGT' : lambda cond_ : ailment . Expr . BinaryOp ( None , 'CmpUGT' , tuple ( map ( self . _convert_claripy_bool_ast , cond_ . args ) ) , ) , '__gt__' : lambda cond_ : ailment . Expr . BinaryOp ( None , 'CmpGT' , tuple ( map ( self . _convert_claripy_bool_ast , cond_ . args ) ) , ) , '__eq__' : lambda cond_ : ailment . Expr . BinaryOp ( None , 'CmpEQ' , tuple ( map ( self . _convert_claripy_bool_ast , cond_ . args ) ) , ) , '__ne__' : lambda cond_ : ailment . Expr . BinaryOp ( None , 'CmpNE' , tuple ( map ( self . _convert_claripy_bool_ast , cond_ . args ) ) , ) , '__xor__' : lambda cond_ : ailment . Expr . BinaryOp ( None , 'Xor' , tuple ( map ( self . _convert_claripy_bool_ast , cond_ . args ) ) , ) , 'BVV' : lambda cond_ : ailment . Expr . Const ( None , None , cond_ . args [ 0 ] , cond_ . size ( ) ) , 'BoolV' : lambda cond_ : ailment . Expr . Const ( None , None , True , 1 ) if cond_ . args [ 0 ] is True else ailment . Expr . Const ( None , None , False , 1 ) , } if cond . op in _mapping : return _mapping [ cond . op ] ( cond ) raise NotImplementedError ( ( "Condition variable %s has an unsupported operator %s. " "Consider implementing." ) % ( cond , cond . op ) )
def forever ( klass , * args , ** kws ) : """Create a server and block the calling thread until KeyboardInterrupt . Shorthand for : : : with Server ( * args , * * kws ) : try ; time . sleep ( 99999) except KeyboardInterrupt : pass"""
with klass ( * args , ** kws ) : _log . info ( "Running server" ) try : while True : time . sleep ( 100 ) except KeyboardInterrupt : pass finally : _log . info ( "Stopping server" )
def get_elements ( parent_to_parse , element_path ) : """: return : all elements by name from the parsed parent element . : see : get _ element ( parent _ to _ parse , element _ path )"""
element = get_element ( parent_to_parse ) if element is None or not element_path : return [ ] return element . findall ( element_path )
def retrieve ( self , id ) : """Retrieve a single lead Returns a single lead available to the user , according to the unique lead ID provided If the specified lead does not exist , this query returns an error : calls : ` ` get / leads / { id } ` ` : param int id : Unique identifier of a Lead . : return : Dictionary that support attriubte - style access and represent Lead resource . : rtype : dict"""
_ , _ , lead = self . http_client . get ( "/leads/{id}" . format ( id = id ) ) return lead
def register ( self , observers ) : """Concrete method of Subject . register ( ) . Register observers as an argument to self . observers ."""
if isinstance ( observers , list ) or isinstance ( observers , tuple ) : for observer in observers : # check whether inhelitance " base . Observer " if isinstance ( observer , base . Observer ) : self . _observers . append ( observer ) else : raise InhelitanceError ( base . Observer . __name__ ) elif isinstance ( observers , base . Observer ) : self . _observers . append ( observers )
def hierarchical_redundancy ( rdf , fix = False ) : """Check for and optionally remove extraneous skos : broader relations . : param Graph rdf : An rdflib . graph . Graph object . : param bool fix : Fix the problem by removing skos : broader relations between concepts that are otherwise connected by skos : broaderTransitive ."""
for conc , parent1 in rdf . subject_objects ( SKOS . broader ) : for parent2 in rdf . objects ( conc , SKOS . broader ) : if parent1 == parent2 : continue # must be different if parent2 in rdf . transitive_objects ( parent1 , SKOS . broader ) : if fix : logging . warning ( "Eliminating redundant hierarchical relationship: " "%s skos:broader %s" , conc , parent2 ) rdf . remove ( ( conc , SKOS . broader , parent2 ) ) rdf . remove ( ( conc , SKOS . broaderTransitive , parent2 ) ) rdf . remove ( ( parent2 , SKOS . narrower , conc ) ) rdf . remove ( ( parent2 , SKOS . narrowerTransitive , conc ) ) else : logging . warning ( "Redundant hierarchical relationship " "%s skos:broader %s found, but not eliminated " "because eliminate_redundancy is not set" , conc , parent2 )
def get_contents_to_filename ( self , filename , headers = None , cb = None , num_cb = 10 , torrent = False , version_id = None , res_download_handler = None , response_headers = None , callback = None ) : """Retrieve an object from S3 using the name of the Key object as the key in S3 . Store contents of the object to a file named by ' filename ' . See get _ contents _ to _ file method for details about the parameters . : type filename : string : param filename : The filename of where to put the file contents : type headers : dict : param headers : Any additional headers to send in the request : type cb : function : param cb : a callback function that will be called to report progress on the upload . The callback should accept two integer parameters , the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object . : type cb : int : param num _ cb : ( optional ) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer . : type torrent : bool : param torrent : If True , returns the contents of a torrent file as a string . : type res _ upload _ handler : ResumableDownloadHandler : param res _ download _ handler : If provided , this handler will perform the download . : type response _ headers : dict : param response _ headers : A dictionary containing HTTP headers / values that will override any headers associated with the stored object in the response . See http : / / goo . gl / EWOPb for details ."""
fp = open ( filename , 'wb' ) def got_contents_to_filename ( response ) : fp . close ( ) # if last _ modified date was sent from s3 , try to set file ' s timestamp if self . last_modified != None : try : modified_tuple = rfc822 . parsedate_tz ( self . last_modified ) modified_stamp = int ( rfc822 . mktime_tz ( modified_tuple ) ) os . utime ( fp . name , ( modified_stamp , modified_stamp ) ) except Exception : pass if callable ( callback ) : callback ( response ) self . get_contents_to_file ( fp , headers , cb , num_cb , torrent = torrent , version_id = version_id , res_download_handler = res_download_handler , response_headers = response_headers , callback = got_contents_to_filename )
def remote_read ( self , maxlength ) : """Called from remote worker to read at most L { maxlength } bytes of data @ type maxlength : C { integer } @ param maxlength : Maximum number of data bytes that can be returned @ return : Data read from L { fp } @ rtype : C { string } of bytes read from file"""
if self . fp is None : return '' data = self . fp . read ( maxlength ) return data
def get_id ( name = None , tags = None , region = None , key = None , keyid = None , profile = None , in_states = None , filters = None ) : '''Given instance properties , return the instance id if it exists . CLI Example : . . code - block : : bash salt myminion boto _ ec2 . get _ id myinstance'''
instance_ids = find_instances ( name = name , tags = tags , region = region , key = key , keyid = keyid , profile = profile , in_states = in_states , filters = filters ) if instance_ids : log . info ( "Instance ids: %s" , " " . join ( instance_ids ) ) if len ( instance_ids ) == 1 : return instance_ids [ 0 ] else : raise CommandExecutionError ( 'Found more than one instance ' 'matching the criteria.' ) else : log . warning ( 'Could not find instance.' ) return None
async def get_tree ( self , prefix , * , dc = None , separator = None , watch = None , consistency = None ) : """Gets all keys with a prefix of Key during the transaction . Parameters : prefix ( str ) : Prefix to fetch separator ( str ) : List only up to a given separator dc ( str ) : Specify datacenter that will be used . Defaults to the agent ' s local datacenter . watch ( Blocking ) : Do a blocking query consistency ( Consistency ) : Force consistency Returns : CollectionMeta : where value is a list of values This does not fail the transaction if the Key doesn ' t exist . Not all keys may be present in the results if ACLs do not permit them to be read ."""
response = await self . _read ( prefix , dc = dc , recurse = True , separator = separator , watch = watch , consistency = consistency ) result = response . body for data in result : data [ "Value" ] = decode_value ( data [ "Value" ] , data [ "Flags" ] ) return consul ( result , meta = extract_meta ( response . headers ) )
def scale ( input_value , input_min , input_max , out_min , out_max ) : """scale a value from one range to another"""
# Figure out how ' wide ' each range is input_span = input_max - input_min output_span = out_max - out_min # Convert the left range into a 0-1 range ( float ) valuescaled = float ( input_value - input_min ) / float ( input_span ) # Convert the 0-1 range into a value in the right range . return out_min + ( valuescaled * output_span )
def _sigma_pi_hiE ( self , Tp , a ) : """General expression for Tp > 5 GeV ( Eq 7)"""
m_p = self . _m_p csip = ( Tp - 3.0 ) / m_p m1 = a [ 0 ] * csip ** a [ 3 ] * ( 1 + np . exp ( - a [ 1 ] * csip ** a [ 4 ] ) ) m2 = 1 - np . exp ( - a [ 2 ] * csip ** 0.25 ) multip = m1 * m2 return self . _sigma_inel ( Tp ) * multip
def create ( self , req , driver ) : """Create a network Create a new netowrk on special cloud with : : Param req : Type object Request"""
response = driver . create_network ( req . params ) data = { 'action' : "create" , 'controller' : "network" , 'cloud' : req . environ [ 'calplus.cloud' ] , 'response' : response } return data
def create_asset_content ( self , asset_content_form = None ) : """Creates new ` ` AssetContent ` ` for a given asset . : param asset _ content _ form : the form for this ` ` AssetContent ` ` : type asset _ content _ form : ` ` osid . repository . AssetContentForm ` ` : return : the new ` ` AssetContent ` ` : rtype : ` ` osid . repository . AssetContent ` ` : raise : ` ` IllegalState ` ` - - ` ` asset _ content _ form ` ` already used in a create transaction : raise : ` ` InvalidArgument ` ` - - one or more of the form elements is invalid : raise : ` ` NullArgument ` ` - - ` ` asset _ content _ form ` ` is ` ` null ` ` : raise : ` ` OperationFailed ` ` - - unable to complete request : raise : ` ` PermissionDenied ` ` - - authorization failure : raise : ` ` Unsupported ` ` - - ` ` asset _ content _ form ` ` did not originate from ` ` get _ asset _ content _ form _ for _ create ( ) ` ` * compliance : mandatory - - This method must be implemented . *"""
if asset_content_form is None : raise NullArgument ( ) if not isinstance ( asset_content_form , abc_repository_objects . AssetContentForm ) : raise InvalidArgument ( 'argument type is not an AssetContentForm' ) if asset_content_form . is_for_update ( ) : raise InvalidArgument ( 'form is for update only, not create' ) try : if self . _forms [ asset_content_form . get_id ( ) . get_identifier ( ) ] == CREATED : raise IllegalState ( 'form already used in a create transaction' ) except KeyError : raise Unsupported ( 'form did not originate from this session' ) if not asset_content_form . is_valid ( ) : raise InvalidArgument ( 'one or more of the form elements is invalid' ) url_path = construct_url ( 'assets' , bank_id = self . _catalog_idstr , asset_id = asset_content_form . _asset_id ) asset = objects . Asset ( self . _get_request ( url_path ) ) previous_contents = asset . _my_map [ 'assetContents' ] previous_content_ids = [ c [ 'id' ] for c in previous_contents ] asset . _my_map [ 'assetContents' ] . append ( asset_content_form . _my_map ) url_path = construct_url ( 'assets' , bank_id = self . _catalog_idstr ) try : result = self . _put_request ( url_path , asset . _my_map ) except Exception : raise # OperationFailed self . _forms [ asset_content_form . get_id ( ) . get_identifier ( ) ] = CREATED content = result [ 'assetContents' ] if len ( content ) == 1 : return objects . AssetContent ( content [ 0 ] ) else : # Assumes that in the split second this requires , # no one else creates a new asset content for this # asset . . . for c in content : if c [ 'id' ] not in previous_content_ids : return objects . AssetContent ( c )
def has_style ( node ) : """Tells us if node element has defined styling . : Args : - node ( : class : ` ooxml . doc . Element ` ) : Element : Returns : True or False"""
elements = [ 'b' , 'i' , 'u' , 'strike' , 'color' , 'jc' , 'sz' , 'ind' , 'superscript' , 'subscript' , 'small_caps' ] return any ( [ True for elem in elements if elem in node . rpr ] )
def ExtractPathSpecs ( self , path_specs , find_specs = None , recurse_file_system = True , resolver_context = None ) : """Extracts path specification from a specific source . Args : path _ specs ( Optional [ list [ dfvfs . PathSpec ] ] ) : path specifications . find _ specs ( Optional [ list [ dfvfs . FindSpec ] ] ) : find specifications . recurse _ file _ system ( Optional [ bool ] ) : True if extraction should recurse into a file system . resolver _ context ( Optional [ dfvfs . Context ] ) : resolver context . Yields : dfvfs . PathSpec : path specification of a file entry found in the source ."""
for path_spec in path_specs : for extracted_path_spec in self . _ExtractPathSpecs ( path_spec , find_specs = find_specs , recurse_file_system = recurse_file_system , resolver_context = resolver_context ) : yield extracted_path_spec
def from_json ( cls , json_info ) : """Build a Trial instance from a json string ."""
if json_info is None : return None return TrialRecord ( trial_id = json_info [ "trial_id" ] , job_id = json_info [ "job_id" ] , trial_status = json_info [ "status" ] , start_time = json_info [ "start_time" ] , params = json_info [ "params" ] )
def render ( self , size ) : """render identicon to PIL . Image @ param size identicon patchsize . ( image size is 3 * [ size ] ) @ return PIL . Image"""
# decode the code middle , corner , side , foreColor , backColor = self . decode ( self . code ) size = int ( size ) # make image image = Image . new ( "RGB" , ( size * 3 , size * 3 ) ) draw = ImageDraw . Draw ( image ) # fill background draw . rectangle ( ( 0 , 0 , image . size [ 0 ] , image . size [ 1 ] ) , fill = 0 ) kwds = { 'draw' : draw , 'size' : size , 'foreColor' : foreColor , 'backColor' : backColor } # middle patch self . drawPatch ( ( 1 , 1 ) , middle [ 2 ] , middle [ 1 ] , middle [ 0 ] , ** kwds ) # side patch kwds [ 'type' ] = side [ 0 ] for i in range ( 4 ) : pos = [ ( 1 , 0 ) , ( 2 , 1 ) , ( 1 , 2 ) , ( 0 , 1 ) ] [ i ] self . drawPatch ( pos , side [ 2 ] + 1 + i , side [ 1 ] , ** kwds ) # corner patch kwds [ 'type' ] = corner [ 0 ] for i in range ( 4 ) : pos = [ ( 0 , 0 ) , ( 2 , 0 ) , ( 2 , 2 ) , ( 0 , 2 ) ] [ i ] self . drawPatch ( pos , corner [ 2 ] + 1 + i , corner [ 1 ] , ** kwds ) return image
def population ( self ) : "Class containing the population and all the individuals generated"
try : return self . _p except AttributeError : self . _p = self . _population_class ( base = self , tournament_size = self . _tournament_size , classifier = self . classifier , labels = self . _labels , es_extra_test = self . es_extra_test , popsize = self . _popsize , random_generations = self . _random_generations , negative_selection = self . _negative_selection ) return self . _p
def save_params ( self , fname ) : """Saves model parameters to file . Parameters fname : str Path to output param file . Examples > > > # An example of saving module parameters . > > > mod . save _ params ( ' myfile ' )"""
arg_params , aux_params = self . get_params ( ) save_dict = { ( 'arg:%s' % k ) : v . as_in_context ( cpu ( ) ) for k , v in arg_params . items ( ) } save_dict . update ( { ( 'aux:%s' % k ) : v . as_in_context ( cpu ( ) ) for k , v in aux_params . items ( ) } ) ndarray . save ( fname , save_dict )
def set_input_func ( self , input_func ) : """Set input _ func of device . Valid values depend on the device and should be taken from " input _ func _ list " . Return " True " on success and " False " on fail ."""
# For selection of sources other names then at receiving sources # have to be used # AVR - X receiver needs source mapping to set input _ func if self . _receiver_type in [ AVR_X . type , AVR_X_2016 . type ] : direct_mapping = False try : linp = CHANGE_INPUT_MAPPING [ self . _input_func_list [ input_func ] ] except KeyError : direct_mapping = True else : direct_mapping = True # AVR - nonX receiver and if no mapping was found get parameter for # setting input _ func directly if direct_mapping is True : try : linp = self . _input_func_list [ input_func ] except KeyError : _LOGGER . error ( "No mapping for input source %s" , input_func ) return False # Create command URL and send command via HTTP GET try : if linp in self . _favorite_func_list : command_url = self . _urls . command_fav_src + linp else : command_url = self . _urls . command_sel_src + linp if self . send_get_command ( command_url ) : self . _input_func = input_func return True else : return False except requests . exceptions . RequestException : _LOGGER . error ( "Connection error: input function %s not set." , input_func ) return False
def RunOnce ( self ) : """Run this once on init ."""
global WEBAUTH_MANAGER # pylint : disable = global - statement # pylint : disable = g - bad - name WEBAUTH_MANAGER = BaseWebAuthManager . GetPlugin ( config . CONFIG [ "AdminUI.webauth_manager" ] ) ( ) # pylint : enable = g - bad - name logging . info ( "Using webauth manager %s" , WEBAUTH_MANAGER )
def process_url ( url , server_name = "" , document_root = None , check_security = True ) : """Goes through the url and returns a dictionary of fields . For example : img / photos / 2008/05/12 / WIZARDS _ 0034_05022035 _ r329x151 . jpg ? e315d4515574cec417b1845392ba687dd98c17ce actions : [ ( ' r ' , ' 329x151 ' ) ] parent _ dir : img / photos / 2008/05/12/ ext : jpg base _ filename : WIZARDS _ 0034_05022035 security _ hash : e315d4515574cec417b1845392ba687dd98c17ce requested _ path / path / to / media _ root / img / photos / 2008/05/12 / WIZARDS _ 0034_05022035 _ r329x151 . jpg original _ file : / path / to / media _ root / img / photos / 2008/05/12 / WIZARDS _ 0034_05022035 . jpg is _ external : False The ` ` document _ root ` ` parameter overrides the ` ` BASE _ PATH ` ` setting ."""
from . network import Http404 from settings import ( BASE_PATH , ORIG_BASE_PATH , USE_VHOSTS , VHOST_DOC_BASE , EXTERNAL_PREFIX ) try : request_uri , security_hash = url . split ( "?" , 1 ) except ValueError : request_uri , security_hash = url , "" external_prefix = EXTERNAL_PREFIX is_external = request_uri . startswith ( external_prefix ) resolved_uri = resolve_request_path ( request_uri ) resolved_uri = resolved_uri . lstrip ( "/" ) resolved_uri = urllib . unquote ( resolved_uri ) if is_external : external_url = urllib . unquote ( resolved_uri . replace ( external_prefix . lstrip ( "/" ) , '' ) ) resolved_uri = resolved_uri . replace ( "http://" , '' ) . replace ( 'https://' , '' ) else : external_url = '' base_path = document_root or BASE_PATH orig_base_path = ORIG_BASE_PATH or base_path if USE_VHOSTS : if not os . path . exists ( os . path . join ( BASE_PATH , server_name ) ) : raise Http404 ( "Bad server: %s" % server_name ) parts = ( base_path , server_name , VHOST_DOC_BASE , resolved_uri ) requested_path = os . path . join ( * parts ) else : path = os . path . join ( base_path , resolved_uri ) if base_path . startswith ( 's3://' ) : requested_path = path else : requested_path = os . path . abspath ( path ) if not requested_path . startswith ( base_path ) : # Apparently , there was an attempt to put some directory traversal # hacks into the path . ( . . / . . / . . / vulnerable _ file . exe ) raise Http404 ( "Unknown file path." ) parent_dir , requested_file = os . path . split ( resolved_uri ) base_filename , ext = os . path . splitext ( requested_file ) base_file_name , action_tuples = parse_action_tuples ( requested_file ) if USE_VHOSTS : original_file = os . path . join ( orig_base_path , server_name , parent_dir , base_file_name + ext ) else : original_file = os . path . join ( orig_base_path , parent_dir , base_file_name + ext ) base_uri = os . path . dirname ( resolved_uri ) original_uri = urlparse . urljoin ( base_uri , base_filename + ext ) if original_file . startswith ( u's3://' ) : from filesystem import s3 original_is_missing = not s3 . file_exists ( original_file ) else : original_is_missing = not os . path . exists ( original_file ) if original_is_missing and is_external : try : download_url ( external_url , original_file ) except Exception as e : msg = "Error downloading external URL: %s" % e raise Http404 ( msg ) elif original_is_missing : msg = "Original file does not exist. %r %r" % ( url , original_file , ) raise Http404 ( msg ) if check_security and action_tuples and not is_valid_security ( action_tuples , security_hash ) : raise Http404 ( "Invalid security token." ) output = { 'actions' : action_tuples , 'parent_dir' : parent_dir , 'ext' : ext , 'base_filename' : base_filename , 'security_hash' : security_hash , 'requested_file' : requested_path , 'original_file' : original_file , 'orignial_uri' : original_uri , 'is_external' : is_external , 'external_url' : external_url , } logger . debug ( "Processed {0} into {1}" . format ( url , str ( output ) ) ) return output
def libvlc_audio_output_device_enum ( mp ) : '''Gets a list of potential audio output devices , See L { libvlc _ audio _ output _ device _ set } ( ) . @ note : Not all audio outputs support enumerating devices . The audio output may be functional even if the list is empty ( NULL ) . @ note : The list may not be exhaustive . @ warning : Some audio output devices in the list might not actually work in some circumstances . By default , it is recommended to not specify any explicit audio device . @ param mp : media player . @ return : A NULL - terminated linked list of potential audio output devices . It must be freed it with L { libvlc _ audio _ output _ device _ list _ release } ( ) . @ version : LibVLC 2.2.0 or later .'''
f = _Cfunctions . get ( 'libvlc_audio_output_device_enum' , None ) or _Cfunction ( 'libvlc_audio_output_device_enum' , ( ( 1 , ) , ) , None , ctypes . POINTER ( AudioOutputDevice ) , MediaPlayer ) return f ( mp )
def local_subset ( self , * args , ** kwargs ) : '''Run : ref : ` execution modules < all - salt . modules > ` against subsets of minions . . versionadded : : 2016.3.0 Wraps : py : meth : ` salt . client . LocalClient . cmd _ subset `'''
local = salt . client . get_local_client ( mopts = self . opts ) return local . cmd_subset ( * args , ** kwargs )
def get_formfield ( model , field ) : """Return the formfied associate to the field of the model"""
class_field = model . _meta . get_field ( field ) if hasattr ( class_field , "field" ) : formfield = class_field . field . formfield ( ) else : formfield = class_field . formfield ( ) # Otherwise the formfield contain the reverse relation if isinstance ( formfield , ChoiceField ) : formfield . choices = class_field . get_choices ( ) return formfield
def list_pp ( ll , separator = '|' , header_line = True , autonumber = True ) : """pretty print list of lists ll"""
if autonumber : for cnt , i in enumerate ( ll ) : i . insert ( 0 , cnt if cnt > 0 or not header_line else '#' ) def lenlst ( l ) : return [ len ( str ( i ) ) for i in l ] lst_len = [ lenlst ( i ) for i in ll ] lst_rot = zip ( * lst_len [ : : - 1 ] ) lst_len = [ max ( i ) for i in lst_rot ] frmt = separator + separator . join ( [ "{!s:" + str ( i ) + "}" for i in lst_len ] ) + separator if header_line : header_line = '-' * len ( frmt . format ( * ll [ 0 ] ) ) for cnt , l in enumerate ( ll ) : if cnt < 2 and header_line : print ( header_line ) print ( frmt . format ( * l ) ) if header_line : print ( header_line ) return lst_len
def strand_barplot ( self ) : """Plot a bargraph showing the strandedness of alignments"""
# Plot bar graph of groups keys = [ 'End 1 Sense' , 'End 1 Antisense' , 'End 2 Sense' , 'End 2 Antisense' ] # Config for the plot pconfig = { 'id' : 'rna_seqc_strandedness_plot' , 'title' : 'RNA-SeQC: Strand Specificity' , 'ylab' : '% Reads' , 'cpswitch_counts_label' : '# Reads' , 'cpswitch_percent_label' : '% Reads' , 'ymin' : 0 , 'cpswitch_c_active' : False } self . add_section ( name = 'Strand Specificity' , anchor = 'rna_seqc_strand_specificity' , helptext = 'End 1/2 Sense are the number of End 1 or 2 reads that were sequenced in the sense direction. ' 'Similarly, End 1/2 Antisense are the number of End 1 or 2 reads that were sequenced in the ' 'antisense direction' , plot = bargraph . plot ( self . rna_seqc_metrics , keys , pconfig ) )
def target_to_ipv4_short ( target ) : """Attempt to return a IPv4 short range list from a target string ."""
splitted = target . split ( '-' ) if len ( splitted ) != 2 : return None try : start_packed = inet_pton ( socket . AF_INET , splitted [ 0 ] ) end_value = int ( splitted [ 1 ] ) except ( socket . error , ValueError ) : return None start_value = int ( binascii . hexlify ( bytes ( start_packed [ 3 ] ) ) , 16 ) if end_value < 0 or end_value > 255 or end_value < start_value : return None end_packed = start_packed [ 0 : 3 ] + struct . pack ( 'B' , end_value ) return ipv4_range_to_list ( start_packed , end_packed )
def tobinary ( series , path , prefix = 'series' , overwrite = False , credentials = None ) : """Writes out data to binary format . Parameters series : Series The data to write path : string path or URI to directory to be created Output files will be written underneath path . Directory will be created as a result of this call . prefix : str , optional , default = ' series ' String prefix for files . overwrite : bool If true , path and all its contents will be deleted and recreated as partof this call ."""
from six import BytesIO from thunder . utils import check_path from thunder . writers import get_parallel_writer if not overwrite : check_path ( path , credentials = credentials ) overwrite = True def tobuffer ( kv ) : firstkey = None buf = BytesIO ( ) for k , v in kv : if firstkey is None : firstkey = k buf . write ( v . tostring ( ) ) val = buf . getvalue ( ) buf . close ( ) if firstkey is None : return iter ( [ ] ) else : label = prefix + '-' + getlabel ( firstkey ) + ".bin" return iter ( [ ( label , val ) ] ) writer = get_parallel_writer ( path ) ( path , overwrite = overwrite , credentials = credentials ) if series . mode == 'spark' : binary = series . values . tordd ( ) . sortByKey ( ) . mapPartitions ( tobuffer ) binary . foreach ( writer . write ) else : basedims = [ series . shape [ d ] for d in series . baseaxes ] def split ( k ) : ind = unravel_index ( k , basedims ) return ind , series . values [ ind ] buf = tobuffer ( [ split ( i ) for i in range ( prod ( basedims ) ) ] ) [ writer . write ( b ) for b in buf ] shape = series . shape dtype = series . dtype write_config ( path , shape = shape , dtype = dtype , overwrite = overwrite , credentials = credentials )
def funding ( self ) : """List of namedtuples parsed funding information in the form ( agency string id acronym country ) ."""
path = [ 'item' , 'xocs:meta' , 'xocs:funding-list' , 'xocs:funding' ] funds = listify ( chained_get ( self . _json , path , [ ] ) ) out = [ ] fund = namedtuple ( 'Funding' , 'agency string id acronym country' ) for item in funds : new = fund ( agency = item . get ( 'xocs:funding-agency' ) , string = item . get ( 'xocs:funding-agency-matched-string' ) , id = item . get ( 'xocs:funding-agency-id' ) , acronym = item . get ( 'xocs:funding-agency-acronym' ) , country = item . get ( 'xocs:funding-agency-country' ) ) out . append ( new ) return out or None
def round_to ( self , dt , hour , minute , second , mode = "floor" ) : """Round the given datetime to specified hour , minute and second . : param mode : ' floor ' or ' ceiling ' * * 中文文档 * * 将给定时间对齐到最近的一个指定了小时 , 分钟 , 秒的时间上 。"""
mode = mode . lower ( ) new_dt = datetime ( dt . year , dt . month , dt . day , hour , minute , second ) if mode == "floor" : if new_dt <= dt : return new_dt else : return rolex . add_days ( new_dt , - 1 ) elif mode == "ceiling" : if new_dt >= dt : return new_dt else : return rolex . add_days ( new_dt , 1 ) else : raise ValueError ( "'mode' has to be 'floor' or 'ceiling'!" )
def createDataFromFile ( self , filePath , inputEncoding = None , defaultFps = None ) : """Fetch a given filePath and parse its contents . May raise the following exceptions : * RuntimeError - generic exception telling that parsing was unsuccessfull * IOError - failed to open a file at given filePath @ return SubtitleData filled with non - empty , default datafields . Client should modify them and then perform an add / update operation"""
file_ = File ( filePath ) if inputEncoding is None : inputEncoding = file_ . detectEncoding ( ) inputEncoding = inputEncoding . lower ( ) videoInfo = VideoInfo ( defaultFps ) if defaultFps is not None else file_ . detectFps ( ) subtitles = self . _parseFile ( file_ , inputEncoding , videoInfo . fps ) data = SubtitleData ( ) data . subtitles = subtitles data . fps = videoInfo . fps data . inputEncoding = inputEncoding data . outputEncoding = inputEncoding data . outputFormat = self . _parser . parsedFormat ( ) data . videoPath = videoInfo . videoPath return data
def by_type ( blocks , slist = None ) : """Sort blocks into layout , internal volume , data or unknown Arguments : Obj : blocks - - List of block objects . List : slist - - ( optional ) List of block indexes . Returns : List : layout - - List of block indexes of blocks containing the volume table records . List : data - - List of block indexes containing filesystem data . List : int _ vol - - List of block indexes containing volume ids greater than UBI _ INTERNAL _ VOL _ START that are not layout volumes . List : unknown - - List of block indexes of blocks that failed validation of crc in ed _ hdr or vid _ hdr ."""
layout = [ ] data = [ ] int_vol = [ ] unknown = [ ] for i in blocks : if slist and i not in slist : continue if blocks [ i ] . is_vtbl and blocks [ i ] . is_valid : layout . append ( i ) elif blocks [ i ] . is_internal_vol and blocks [ i ] . is_valid : int_vol . append ( i ) elif blocks [ i ] . is_valid : data . append ( i ) else : unknown . append ( i ) return layout , data , int_vol , unknown
def Collect ( self , knowledge_base , artifact_definition , searcher , file_system ) : """Collects values using a file artifact definition . Args : knowledge _ base ( KnowledgeBase ) : to fill with preprocessing information . artifact _ definition ( artifacts . ArtifactDefinition ) : artifact definition . searcher ( dfvfs . FileSystemSearcher ) : file system searcher to preprocess the file system . file _ system ( dfvfs . FileSystem ) : file system to be preprocessed . Raises : PreProcessFail : if the preprocessing fails ."""
for source in artifact_definition . sources : if source . type_indicator not in ( artifact_definitions . TYPE_INDICATOR_FILE , artifact_definitions . TYPE_INDICATOR_PATH ) : continue for path in source . paths : # Make sure the path separators used in the artifact definition # correspond to those used by the file system . path_segments = path . split ( source . separator ) find_spec = file_system_searcher . FindSpec ( location_glob = path_segments [ 1 : ] , case_sensitive = False ) for path_specification in searcher . Find ( find_specs = [ find_spec ] ) : self . _ParsePathSpecification ( knowledge_base , searcher , file_system , path_specification , source . separator )
def output ( data , ** kwargs ) : # pylint : disable = unused - argument '''Print the output data in JSON'''
try : dump_opts = { 'indent' : 4 , 'default' : repr } if 'output_indent' in __opts__ : indent = __opts__ . get ( 'output_indent' ) sort_keys = False if indent == 'pretty' : indent = 4 sort_keys = True elif isinstance ( indent , six . integer_types ) : if indent >= 0 : indent = indent else : indent = None dump_opts [ 'indent' ] = indent dump_opts [ 'sort_keys' ] = sort_keys return dson . dumps ( data , ** dump_opts ) except UnicodeDecodeError as exc : log . error ( 'Unable to serialize output to dson' ) return dson . dumps ( { 'error' : 'Unable to serialize output to DSON' , 'message' : six . text_type ( exc ) } ) except TypeError : log . debug ( 'An error occurred while outputting DSON' , exc_info = True ) # Return valid JSON for unserializable objects return dson . dumps ( { } )
def _get_ordered_idx ( self , mask_missing_values ) : """Decide in what order we will update the features . As a homage to the MICE R package , we will have 4 main options of how to order the updates , and use a random order if anything else is specified . Also , this function skips features which have no missing values . Parameters mask _ missing _ values : array - like , shape ( n _ samples , n _ features ) Input data ' s missing indicator matrix , where " n _ samples " is the number of samples and " n _ features " is the number of features . Returns ordered _ idx : ndarray , shape ( n _ features , ) The order in which to impute the features ."""
frac_of_missing_values = mask_missing_values . mean ( axis = 0 ) missing_values_idx = np . nonzero ( frac_of_missing_values ) [ 0 ] if self . imputation_order == 'roman' : ordered_idx = missing_values_idx elif self . imputation_order == 'arabic' : ordered_idx = missing_values_idx [ : : - 1 ] elif self . imputation_order == 'ascending' : n = len ( frac_of_missing_values ) - len ( missing_values_idx ) ordered_idx = np . argsort ( frac_of_missing_values , kind = 'mergesort' ) [ n : ] [ : : - 1 ] elif self . imputation_order == 'descending' : n = len ( frac_of_missing_values ) - len ( missing_values_idx ) ordered_idx = np . argsort ( frac_of_missing_values , kind = 'mergesort' ) [ n : ] elif self . imputation_order == 'random' : ordered_idx = missing_values_idx self . random_state_ . shuffle ( ordered_idx ) else : raise ValueError ( "Got an invalid imputation order: '{0}'. It must " "be one of the following: 'roman', 'arabic', " "'ascending', 'descending', or " "'random'." . format ( self . imputation_order ) ) return ordered_idx
def _parse_resource ( self , resource ) : """Ensure compliance with the spec ' s resource objects section : param resource : dict JSON API resource object"""
link = 'jsonapi.org/format/#document-resource-objects' rid = isinstance ( resource . get ( 'id' ) , unicode ) rtype = isinstance ( resource . get ( 'type' ) , unicode ) if not rtype or ( self . req . is_patching and not rid ) : self . fail ( 'JSON API requires that every resource object MUST ' 'contain a `type` top-level key. Additionally, when ' 'modifying an existing resource object an `id` ' 'top-level key is required. The values of both keys ' 'MUST be strings. Your request did not comply with ' 'one or more of these 3 rules' , link ) elif 'attributes' not in resource and 'relationships' not in resource : self . fail ( 'Modifiying or creating resources require at minimum ' 'an attributes object and/or relationship object.' , link ) elif rid and self . req . is_posting : abort ( exceptions . ModificationDenied ( ** { 'detail' : 'Our API does not support client-generated ID\'s ' 'when creating NEW resources. Instead, our API ' 'will generate one for you & return it in the ' 'response.' , 'links' : 'jsonapi.org/format/#crud-creating-client-ids' , } ) )
def parse ( self ) : """parse geojson and ensure is collection"""
try : self . parsed_data = json . loads ( self . data ) except UnicodeError as e : self . parsed_data = json . loads ( self . data . decode ( 'latin1' ) ) except Exception as e : raise Exception ( 'Error while converting response from JSON to python. %s' % e ) if self . parsed_data . get ( 'type' , '' ) != 'FeatureCollection' : raise Exception ( 'GeoJson synchronizer expects a FeatureCollection object at root level' ) self . parsed_data = self . parsed_data [ 'features' ]
def get_ssid ( _ , data ) : """http : / / git . kernel . org / cgit / linux / kernel / git / jberg / iw . git / tree / util . c ? id = v3.17 # n313. Positional arguments : data - - bytearray data to read . Returns : String ."""
converted = list ( ) for i in range ( len ( data ) ) : try : c = unichr ( data [ i ] ) except NameError : c = chr ( data [ i ] ) if unicodedata . category ( c ) != 'Cc' and c not in ( ' ' , '\\' ) : converted . append ( c ) elif c == '\0' : converted . append ( c ) elif c == ' ' and i not in ( 0 , len ( data ) ) : converted . append ( ' ' ) else : converted . append ( '\\{0:02x}' . format ( data [ i ] ) ) return '' . join ( converted )
def _try_get_state_scope ( name , mark_name_scope_used = True ) : """Returns a fresh variable / name scope for a module ' s state . In order to import a module into a given scope without major complications we require the scope to be empty . This function deals with deciding an unused scope where to define the module state . This is non trivial in cases where name _ scope and variable _ scopes are out of sync , e . g . tpus or re - entering scopes . Args : name : A string with the name of the module as supplied by the client . mark _ name _ scope _ used : a boolean , indicating whether to mark the name scope of the returned value as used . Raises : RuntimeError : if the name scope of the freshly created variable scope is already used ."""
tmp_scope_name = tf_v1 . get_variable_scope ( ) . name if tmp_scope_name : tmp_scope_name += "/" with tf . name_scope ( tmp_scope_name ) : # Pick an unused variable scope . with tf_v1 . variable_scope ( None , default_name = name , auxiliary_name_scope = False ) as vs : abs_state_scope = vs . name + "/" # Verify that the name scope is available and mark it used if requested . graph = tf_v1 . get_default_graph ( ) unique_name_scope = graph . unique_name ( name , mark_name_scope_used ) + "/" if unique_name_scope != abs_state_scope : raise RuntimeError ( "variable_scope %s was unused but the corresponding " "name_scope was already taken." % abs_state_scope ) return abs_state_scope
def from_data ( source ) : """Infers a table / view schema from its JSON representation , a list of records , or a Pandas dataframe . Args : source : the Pandas Dataframe , a dictionary representing a record , a list of heterogeneous data ( record ) or homogeneous data ( list of records ) from which to infer the schema , or a definition of the schema as a list of dictionaries with ' name ' and ' type ' entries and possibly ' mode ' and ' description ' entries . Only used if no data argument was provided . ' mode ' can be ' NULLABLE ' , ' REQUIRED ' or ' REPEATED ' . For the allowed types , see : https : / / cloud . google . com / bigquery / preparing - data - for - bigquery # datatypes Note that there is potential ambiguity when passing a list of lists or a list of dicts between whether that should be treated as a list of records or a single record that is a list . The heuristic used is to check the length of the entries in the list ; if they are equal then a list of records is assumed . To avoid this ambiguity you can instead use the Schema . from _ record method which assumes a single record , in either list of values or dictionary of key - values form . Returns : A Schema for the data ."""
if isinstance ( source , pandas . DataFrame ) : bq_schema = Schema . _from_dataframe ( source ) elif isinstance ( source , list ) : if len ( source ) == 0 : bq_schema = source elif all ( isinstance ( d , dict ) for d in source ) : if all ( 'name' in d and 'type' in d for d in source ) : # It looks like a bq _ schema ; use it as - is . bq_schema = source elif all ( len ( d ) == len ( source [ 0 ] ) for d in source ) : bq_schema = Schema . _from_dict_record ( source [ 0 ] ) else : raise Exception ( ( 'Cannot create a schema from heterogeneous list %s; perhaps you meant ' + 'to use Schema.from_record?' ) % str ( source ) ) elif isinstance ( source [ 0 ] , list ) and all ( [ isinstance ( l , list ) and len ( l ) == len ( source [ 0 ] ) for l in source ] ) : # A list of lists all of the same length ; treat first entry as a list record . bq_schema = Schema . _from_record ( source [ 0 ] ) else : # A heterogeneous list ; treat as a record . raise Exception ( ( 'Cannot create a schema from heterogeneous list %s; perhaps you meant ' + 'to use Schema.from_record?' ) % str ( source ) ) elif isinstance ( source , dict ) : raise Exception ( ( 'Cannot create a schema from dict %s; perhaps you meant to use ' + 'Schema.from_record?' ) % str ( source ) ) else : raise Exception ( 'Cannot create a schema from %s' % str ( source ) ) return Schema ( bq_schema )
def restructuredtext ( text , ** kwargs ) : """Applies reStructuredText conversion to a string , and returns the HTML ."""
from docutils import core parts = core . publish_parts ( source = text , writer_name = 'html4css1' , ** kwargs ) return parts [ 'fragment' ]
def adapt_datetimefield_value ( self , value ) : """Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns ."""
if value is None : return None if self . connection . _DJANGO_VERSION >= 14 and settings . USE_TZ : if timezone . is_aware ( value ) : # pyodbc donesn ' t support datetimeoffset value = value . astimezone ( timezone . utc ) if not self . connection . features . supports_microsecond_precision : value = value . replace ( microsecond = 0 ) return value
def remove_all ( self , filter , force = False , timeout = - 1 ) : """Deletes the set of datacenters according to the specified parameters . A filter is required to identify the set of resources to be deleted . Args : filter : A general filter / query string to narrow the list of items that will be removed . force : If set to true , the operation completes despite any problems with network connectivity or errors on the resource itself . The default is false . timeout : Timeout in seconds . Wait for task completion by default . The timeout does not abort the operation in OneView ; it just stops waiting for its completion . Returns : bool : operation success"""
return self . _client . delete_all ( filter = filter , force = force , timeout = timeout )
def dlogpdf_df_dtheta ( self , f , y , Y_metadata = None ) : """TODO : Doc strings"""
if self . size > 0 : if self . not_block_really : raise NotImplementedError ( "Need to make a decorator for this!" ) if isinstance ( self . gp_link , link_functions . Identity ) : return self . dlogpdf_dlink_dtheta ( f , y , Y_metadata = Y_metadata ) else : inv_link_f = self . gp_link . transf ( f ) dlink_df = self . gp_link . dtransf_df ( f ) dlogpdf_dlink_dtheta = self . dlogpdf_dlink_dtheta ( inv_link_f , y , Y_metadata = Y_metadata ) dlogpdf_df_dtheta = np . zeros ( ( self . size , f . shape [ 0 ] , f . shape [ 1 ] ) ) # Chain each parameter of hte likelihood seperately for p in range ( self . size ) : dlogpdf_df_dtheta [ p , : , : ] = chain_1 ( dlogpdf_dlink_dtheta [ p , : , : ] , dlink_df ) return dlogpdf_df_dtheta # return chain _ 1 ( dlogpdf _ dlink _ dtheta , dlink _ df ) else : # There are no parameters so return an empty array for derivatives return np . zeros ( ( 0 , f . shape [ 0 ] , f . shape [ 1 ] ) )
def mmi_to_delimited_text ( self ) : """Return the mmi data as a delimited test string . : returns : A delimited text string that can easily be written to disk for e . g . use by gdal _ grid . : rtype : str The returned string will look like this : : 123.0750,01.7900,1 123.1000,01.7900,1.14 123.1250,01.7900,1.15 123.1500,01.7900,1.16 etc . . ."""
delimited_text = 'lon,lat,mmi\n' for row in self . mmi_data : delimited_text += '%s,%s,%s\n' % ( row [ 0 ] , row [ 1 ] , row [ 2 ] ) return delimited_text
def write_memory ( self , session , space , offset , data , width , extended = False ) : """Write in an 8 - bit , 16 - bit , 32 - bit , 64 - bit value to the specified memory space and offset . Corresponds to viOut * functions of the VISA library . : param session : Unique logical identifier to a session . : param space : Specifies the address space . ( Constants . * SPACE * ) : param offset : Offset ( in bytes ) of the address or register from which to read . : param data : Data to write to bus . : param width : Number of bits to read . : param extended : Use 64 bits offset independent of the platform . : return : return value of the library call . : rtype : : class : ` pyvisa . constants . StatusCode `"""
if width == 8 : return self . out_8 ( session , space , offset , data , extended ) elif width == 16 : return self . out_16 ( session , space , offset , data , extended ) elif width == 32 : return self . out_32 ( session , space , offset , data , extended ) elif width == 64 : return self . out_64 ( session , space , offset , data , extended ) raise ValueError ( '%s is not a valid size. Valid values are 8, 16, 32, or 64' % width )
def flush ( self ) : """Flush pending items to Dynamo"""
items = [ ] for data in self . _to_put : items . append ( encode_put ( self . connection . dynamizer , data ) ) for data in self . _to_delete : items . append ( encode_delete ( self . connection . dynamizer , data ) ) self . _write ( items ) self . _to_put = [ ] self . _to_delete = [ ]
def upper_key ( fn ) : """: param fn : a key function : return : a function that wraps around the supplied key function to ensure the returned key is in uppercase ."""
def upper ( key ) : try : return key . upper ( ) except AttributeError : return key return process_key ( upper , fn )
def lomb_scargle_fast ( t , y , dy = 1 , f0 = 0 , df = None , Nf = None , center_data = True , fit_offset = True , use_fft = True , freq_oversampling = 5 , nyquist_factor = 2 , trig_sum_kwds = None ) : """Compute a lomb - scargle periodogram for the given data This implements both an O [ N ^ 2 ] method if use _ fft = = False , or an O [ NlogN ] method if use _ fft = = True . Parameters t , y , dy : array _ like times , values , and errors of the data points . These should be broadcastable to the same shape . If dy is not specified , a constant error will be used . f0 , df , Nf : ( float , float , int ) parameters describing the frequency grid , f = f0 + df * arange ( Nf ) . Defaults , with T = t . max ( ) - t . min ( ) : - f0 = 0 - df is set such that there are ` ` freq _ oversampling ` ` points per peak width . ` ` freq _ oversampling ` ` defaults to 5. - Nf is set such that the highest frequency is ` ` nyquist _ factor ` ` times the so - called " average Nyquist frequency " . ` ` nyquist _ factor ` ` defaults to 2. Note that for unevenly - spaced data , the periodogram can be sensitive to frequencies far higher than the average Nyquist frequency . center _ data : bool ( default = True ) Specify whether to subtract the mean of the data before the fit fit _ offset : bool ( default = True ) If True , then compute the floating - mean periodogram ; i . e . let the mean vary with the fit . use _ fft : bool ( default = True ) If True , then use the Press & Rybicki O [ NlogN ] algorithm to compute the result . Otherwise , use a slower O [ N ^ 2 ] algorithm Other Parameters freq _ oversampling : float ( default = 5) Oversampling factor for the frequency bins . Only referenced if ` ` df ` ` is not specified nyquist _ factor : float ( default = 2) Parameter controlling the highest probed frequency . Only referenced if ` ` Nf ` ` is not specified . trig _ sum _ kwds : dict or None ( optional ) extra keyword arguments to pass to the ` ` trig _ sum ` ` utility . Options are ` ` oversampling ` ` and ` ` Mfft ` ` . See documentation of ` ` trig _ sum ` ` for details . Notes Note that the ` ` use _ fft = True ` ` algorithm is an approximation to the true Lomb - Scargle periodogram , and as the number of points grows this approximation improves . On the other hand , for very small datasets ( < ~ 50 points or so ) this approximation may not be useful . References . . [ 1 ] Press W . H . and Rybicki , G . B , " Fast algorithm for spectral analysis of unevenly sampled data " . ApJ 1:338 , p277 , 1989 . . [ 2 ] M . Zechmeister and M . Kurster , A & A 496 , 577-584 ( 2009) . . [ 3 ] W . Press et al , Numerical Recipies in C ( 2002)"""
# Validate and setup input data t , y , dy = map ( np . ravel , np . broadcast_arrays ( t , y , dy ) ) w = 1. / ( dy ** 2 ) w /= w . sum ( ) # Validate and setup frequency grid if df is None : peak_width = 1. / ( t . max ( ) - t . min ( ) ) df = peak_width / freq_oversampling if Nf is None : avg_Nyquist = 0.5 * len ( t ) / ( t . max ( ) - t . min ( ) ) Nf = max ( 16 , ( nyquist_factor * avg_Nyquist - f0 ) / df ) Nf = int ( Nf ) assert ( df > 0 ) assert ( Nf > 0 ) freq = f0 + df * np . arange ( Nf ) # Center the data . Even if we ' re fitting the offset , # this step makes the expressions below more succinct if center_data or fit_offset : y = y - np . dot ( w , y ) # set up arguments to trig _ sum kwargs = dict . copy ( trig_sum_kwds or { } ) kwargs . update ( f0 = f0 , df = df , use_fft = use_fft , N = Nf ) # 1 . compute functions of the time - shift tau at each frequency Sh , Ch = trig_sum ( t , w * y , ** kwargs ) S2 , C2 = trig_sum ( t , w , freq_factor = 2 , ** kwargs ) if fit_offset : S , C = trig_sum ( t , w , ** kwargs ) with warnings . catch_warnings ( ) : # Filter " invalid value in divide " warnings for zero - frequency if f0 == 0 : warnings . simplefilter ( "ignore" ) tan_2omega_tau = ( S2 - 2 * S * C ) / ( C2 - ( C * C - S * S ) ) # fix NaN at zero frequency if np . isnan ( tan_2omega_tau [ 0 ] ) : tan_2omega_tau [ 0 ] = 0 else : tan_2omega_tau = S2 / C2 # slower / less stable way : we ' ll use trig identities instead # omega _ tau = 0.5 * np . arctan ( tan _ 2omega _ tau ) # S2w , C2w = np . sin ( 2 * omega _ tau ) , np . cos ( 2 * omega _ tau ) # Sw , Cw = np . sin ( omega _ tau ) , np . cos ( omega _ tau ) S2w = tan_2omega_tau / np . sqrt ( 1 + tan_2omega_tau * tan_2omega_tau ) C2w = 1 / np . sqrt ( 1 + tan_2omega_tau * tan_2omega_tau ) Cw = np . sqrt ( 0.5 ) * np . sqrt ( 1 + C2w ) Sw = np . sqrt ( 0.5 ) * np . sign ( S2w ) * np . sqrt ( 1 - C2w ) # 2 . Compute the periodogram , following Zechmeister & Kurster # and using tricks from Press & Rybicki . YY = np . dot ( w , y ** 2 ) YC = Ch * Cw + Sh * Sw YS = Sh * Cw - Ch * Sw CC = 0.5 * ( 1 + C2 * C2w + S2 * S2w ) SS = 0.5 * ( 1 - C2 * C2w - S2 * S2w ) if fit_offset : CC -= ( C * Cw + S * Sw ) ** 2 SS -= ( S * Cw - C * Sw ) ** 2 with warnings . catch_warnings ( ) : # Filter " invalid value in divide " warnings for zero - frequency if fit_offset and f0 == 0 : warnings . simplefilter ( "ignore" ) power = ( YC * YC / CC + YS * YS / SS ) / YY # fix NaN and INF at zero frequency if np . isnan ( power [ 0 ] ) or np . isinf ( power [ 0 ] ) : power [ 0 ] = 0 return freq , power
def _load_ini ( self , namespace , config_file ) : """Load INI style configuration ."""
self . LOG . debug ( "Loading %r..." % ( config_file , ) ) ini_file = ConfigParser . SafeConfigParser ( ) ini_file . optionxform = str # case - sensitive option names if ini_file . read ( config_file ) : self . _set_from_ini ( namespace , ini_file ) else : self . LOG . warning ( "Configuration file %r not found," " use the command 'pyroadmin --create-config' to create it!" % ( config_file , ) )
def trigger_events ( events , loop ) : """Trigger event callbacks ( functions or async ) : param events : one or more sync or async functions to execute : param loop : event loop"""
for event in events : result = event ( loop ) if isawaitable ( result ) : loop . run_until_complete ( result )
def delete ( self ) : """Deletes the resource ."""
return self . _client . _delete ( self . __class__ . base_url ( self . sys [ 'space' ] . id , self . sys [ 'id' ] , environment_id = self . _environment_id ) )
def make_data ( ) : """creates example data set"""
I , d = multidict ( { 1 : 80 , 2 : 270 , 3 : 250 , 4 : 160 , 5 : 180 } ) # demand J , M , f = multidict ( { 1 : [ 500 , 1000 ] , 2 : [ 500 , 1000 ] , 3 : [ 500 , 1000 ] } ) # capacity , fixed costs c = { ( 1 , 1 ) : 4 , ( 1 , 2 ) : 6 , ( 1 , 3 ) : 9 , # transportation costs ( 2 , 1 ) : 5 , ( 2 , 2 ) : 4 , ( 2 , 3 ) : 7 , ( 3 , 1 ) : 6 , ( 3 , 2 ) : 3 , ( 3 , 3 ) : 4 , ( 4 , 1 ) : 8 , ( 4 , 2 ) : 5 , ( 4 , 3 ) : 3 , ( 5 , 1 ) : 10 , ( 5 , 2 ) : 8 , ( 5 , 3 ) : 4 , } return I , J , d , M , f , c
def _method_complete ( self , result ) : """Called after a registered method with the result ."""
if isinstance ( result , ( PrettyTensor , Loss , PrettyTensorTupleMixin ) ) : return result elif ( isinstance ( result , collections . Sequence ) and not isinstance ( result , six . string_types ) ) : return self . with_sequence ( result ) else : return self . with_tensor ( result )
def delete ( self , context , plan ) : """Include a delete operation to the given plan . : param execution . Context context : Current execution context . : param list plan : List of : class : ` execution . Operation ` instances ."""
op = execution . Delete ( self . __comp_name , self . __comp ( ) ) if op not in plan and self . available ( context ) != False : for dep_stub in self . __dependents ( self . __comp_stub_reg . get ( None ) ) : dep_stub . delete ( context , plan ) plan . append ( op )
def anonymous_login_view ( request ) : '''View for an admin to log her / himself out and login the anonymous user .'''
logout ( request ) try : spineless = User . objects . get ( username = ANONYMOUS_USERNAME ) except User . DoesNotExist : random_password = User . objects . make_random_password ( ) spineless = User . objects . create_user ( username = ANONYMOUS_USERNAME , first_name = "Anonymous" , last_name = "Coward" , password = random_password ) spineless . is_active = False spineless . save ( ) spineless_profile = UserProfile . objects . get ( user = spineless ) spineless_profile . status = UserProfile . ALUMNUS spineless_profile . save ( ) spineless . backend = 'django.contrib.auth.backends.ModelBackend' login ( request , spineless ) request . session [ 'ANONYMOUS_SESSION' ] = True messages . add_message ( request , messages . INFO , MESSAGES [ 'ANONYMOUS_LOGIN' ] ) return HttpResponseRedirect ( reverse ( 'homepage' ) )
def text_to_edtf ( text ) : """Generate EDTF string equivalent of a given natural language date string ."""
if not text : return t = text . lower ( ) # try parsing the whole thing result = text_to_edtf_date ( t ) if not result : # split by list delims and move fwd with the first thing that returns a non - empty string . # TODO : assemble multiple dates into a { } or [ ] structure . for split in [ "," , ";" , "or" ] : for list_item in t . split ( split ) : # try parsing as an interval - split by ' - ' toks = list_item . split ( "-" ) if len ( toks ) == 2 : d1 = toks [ 0 ] . strip ( ) d2 = toks [ 1 ] . strip ( ) # match looks from the beginning of the string , search # looks anywhere . if re . match ( r'\d\D\b' , d2 ) : # 1 - digit year partial e . g . 1868-9 if re . search ( r'\b\d\d\d\d$' , d1 ) : # TODO : evaluate it and see if it ' s a year d2 = d1 [ - 4 : - 1 ] + d2 elif re . match ( r'\d\d\b' , d2 ) : # 2 - digit year partial e . g . 1809-10 if re . search ( r'\b\d\d\d\d$' , d1 ) : d2 = d1 [ - 4 : - 2 ] + d2 else : century_range_match = re . search ( r'\b(\d\d)(th|st|nd|rd|)-(\d\d)(th|st|nd|rd) [cC]' , "%s-%s" % ( d1 , d2 ) ) if century_range_match : g = century_range_match . groups ( ) d1 = "%sC" % g [ 0 ] d2 = "%sC" % g [ 2 ] r1 = text_to_edtf_date ( d1 ) r2 = text_to_edtf_date ( d2 ) if r1 and r2 : result = r1 + "/" + r2 return result # is it an either / or year " 1838/1862 " - that has a different # representation in EDTF . If it ' s ' both ' , then we use { } . If # it ' s ' or ' then we use [ ] . Assuming the latter for now . # This whole section could be more friendly . else : int_match = re . search ( r"(\d\d\d\d)\/(\d\d\d\d)" , list_item ) if int_match : return "[%s, %s]" % ( int_match . group ( 1 ) , int_match . group ( 2 ) ) result = text_to_edtf_date ( list_item ) if result : break if result : break is_before = re . findall ( r'\bbefore\b' , t ) is_before = is_before or re . findall ( r'\bearlier\b' , t ) is_after = re . findall ( r'\bafter\b' , t ) is_after = is_after or re . findall ( r'\bsince\b' , t ) is_after = is_after or re . findall ( r'\blater\b' , t ) if is_before : result = u"unknown/%s" % result elif is_after : result = u"%s/unknown" % result return result
def _read_para_host_id ( self , code , cbit , clen , * , desc , length , version ) : """Read HIP HOST _ ID parameter . Structure of HIP HOST _ ID parameter [ RFC 7401 ] : 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | Type | Length | | HI Length | DI - Type | DI Length | | Algorithm | Host Identity / / | Domain Identifier / / | Padding | Octets Bits Name Description 0 0 host _ id . type Parameter Type 1 15 host _ id . critical Critical Bit 2 16 host _ id . length Length of Contents 4 32 host _ id . id _ len Host Identity Length 6 48 host _ id . di _ type Domain Identifier Type 6 52 host _ id . di _ len Domain Identifier Length 8 64 host _ id . algorithm Algorithm 10 80 host _ id . host _ id Host Identity ? ? host _ id . domain _ id Domain Identifier ? ? - Padding"""
def _read_host_identifier ( length , code ) : algorithm = _HI_ALGORITHM . get ( code , 'Unassigned' ) if algorithm == 'ECDSA' : host_id = dict ( curve = _ECDSA_CURVE . get ( self . _read_unpack ( 2 ) ) , pubkey = self . _read_fileng ( length - 2 ) , ) elif algorithm == 'ECDSA_LOW' : host_id = dict ( curve = _ECDSA_LOW_CURVE . get ( self . _read_unpack ( 2 ) ) , pubkey = self . _read_fileng ( length - 2 ) , ) else : host_id = self . _read_fileng ( length ) return algorithm , host_id def _read_domain_identifier ( di_data ) : di_type = _DI_TYPE . get ( int ( di_data [ : 4 ] , base = 2 ) , 'Unassigned' ) di_len = int ( di_data [ 4 : ] , base = 2 ) domain_id = self . _read_fileng ( di_len ) return di_type , di_len , domain_id _hlen = self . _read_unpack ( 2 ) _didt = self . _read_binary ( 2 ) _algo = self . _read_unpack ( 2 ) _hidf = _read_host_identifier ( _hlen , _algo ) _didf = _read_domain_identifier ( _didt ) host_id = dict ( type = desc , critical = cbit , length = clen , id_len = _hlen , di_type = _didf [ 0 ] , di_len = _didf [ 1 ] , algorithm = _hidf [ 0 ] , host_id = _hidf [ 1 ] , domain_id = _didf [ 2 ] , ) _plen = length - clen if _plen : self . _read_fileng ( _plen ) return host_id
def __get_stack_trace ( self , depth = 16 , bUseLabels = True , bMakePretty = True ) : """Tries to get a stack trace for the current function using the debug helper API ( dbghelp . dll ) . @ type depth : int @ param depth : Maximum depth of stack trace . @ type bUseLabels : bool @ param bUseLabels : C { True } to use labels , C { False } to use addresses . @ type bMakePretty : bool @ param bMakePretty : C { True } for user readable labels , C { False } for labels that can be passed to L { Process . resolve _ label } . " Pretty " labels look better when producing output for the user to read , while pure labels are more useful programatically . @ rtype : tuple of tuple ( int , int , str ) @ return : Stack trace of the thread as a tuple of ( return address , frame pointer address , module filename ) when C { bUseLabels } is C { True } , or a tuple of ( return address , frame pointer label ) when C { bUseLabels } is C { False } . @ raise WindowsError : Raises an exception on error ."""
aProcess = self . get_process ( ) arch = aProcess . get_arch ( ) bits = aProcess . get_bits ( ) if arch == win32 . ARCH_I386 : MachineType = win32 . IMAGE_FILE_MACHINE_I386 elif arch == win32 . ARCH_AMD64 : MachineType = win32 . IMAGE_FILE_MACHINE_AMD64 elif arch == win32 . ARCH_IA64 : MachineType = win32 . IMAGE_FILE_MACHINE_IA64 else : msg = "Stack walking is not available for this architecture: %s" raise NotImplementedError ( msg % arch ) hProcess = aProcess . get_handle ( win32 . PROCESS_VM_READ | win32 . PROCESS_QUERY_INFORMATION ) hThread = self . get_handle ( win32 . THREAD_GET_CONTEXT | win32 . THREAD_QUERY_INFORMATION ) StackFrame = win32 . STACKFRAME64 ( ) StackFrame . AddrPC = win32 . ADDRESS64 ( self . get_pc ( ) ) StackFrame . AddrFrame = win32 . ADDRESS64 ( self . get_fp ( ) ) StackFrame . AddrStack = win32 . ADDRESS64 ( self . get_sp ( ) ) trace = list ( ) while win32 . StackWalk64 ( MachineType , hProcess , hThread , StackFrame ) : if depth <= 0 : break fp = StackFrame . AddrFrame . Offset ra = aProcess . peek_pointer ( fp + 4 ) if ra == 0 : break lib = aProcess . get_module_at_address ( ra ) if lib is None : lib = "" else : if lib . fileName : lib = lib . fileName else : lib = "%s" % HexDump . address ( lib . lpBaseOfDll , bits ) if bUseLabels : label = aProcess . get_label_at_address ( ra ) if bMakePretty : label = '%s (%s)' % ( HexDump . address ( ra , bits ) , label ) trace . append ( ( fp , label ) ) else : trace . append ( ( fp , ra , lib ) ) fp = aProcess . peek_pointer ( fp ) return tuple ( trace )
def __Connection_End_lineEdit_set_ui ( self ) : """Fills * * Connection _ End _ lineEdit * * Widget ."""
# Adding settings key if it doesn ' t exists . self . __settings . get_key ( self . __settings_section , "connection_end" ) . isNull ( ) and self . __settings . set_key ( self . __settings_section , "connection_end" , self . __connection_end ) connection_end = self . __settings . get_key ( self . __settings_section , "connection_end" ) . toString ( ) LOGGER . debug ( "> Setting '{0}' with value '{1}'." . format ( "Connection_End_lineEdit" , connection_end ) ) self . __connection_end = connection_end self . Connection_End_lineEdit . setText ( connection_end )
def create_initial ( self , address_values ) : """Create futures from inputs with the current value for that address at the start of that context . Args : address _ values ( list of tuple ) : The tuple is string , bytes of the address and value ."""
with self . _lock : for add , val in address_values : self . _state [ add ] = _ContextFuture ( address = add , result = val )
def put_event_multi_touch ( self , count , contacts , scan_time ) : """Sends a multi - touch pointer event . The coordinates are expressed in pixels and start from [ 1,1 ] which corresponds to the top left corner of the virtual display . The guest may not understand or may choose to ignore this event . : py : func : ` multi _ touch _ supported ` in count of type int Number of contacts in the event . in contacts of type int Each array element contains packed information about one contact . Bits 0 . . 15 : X coordinate in pixels . Bits 16 . . 31 : Y coordinate in pixels . Bits 32 . . 39 : contact identifier . Bit 40 : " in contact " flag , which indicates that there is a contact with the touch surface . Bit 41 : " in range " flag , the contact is close enough to the touch surface . All other bits are reserved for future use and must be set to 0. in scan _ time of type int Timestamp of the event in milliseconds . Only relative time between events is important . raises : class : ` OleErrorAccessdenied ` Console not powered up . raises : class : ` VBoxErrorIprtError ` Could not send event to virtual device ."""
if not isinstance ( count , baseinteger ) : raise TypeError ( "count can only be an instance of type baseinteger" ) if not isinstance ( contacts , list ) : raise TypeError ( "contacts can only be an instance of type list" ) for a in contacts [ : 10 ] : if not isinstance ( a , baseinteger ) : raise TypeError ( "array can only contain objects of type baseinteger" ) if not isinstance ( scan_time , baseinteger ) : raise TypeError ( "scan_time can only be an instance of type baseinteger" ) self . _call ( "putEventMultiTouch" , in_p = [ count , contacts , scan_time ] )
def show_fibrechannel_interface_info_output_show_fibrechannel_interface_show_fibrechannel_info_port_interface ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) show_fibrechannel_interface_info = ET . Element ( "show_fibrechannel_interface_info" ) config = show_fibrechannel_interface_info output = ET . SubElement ( show_fibrechannel_interface_info , "output" ) show_fibrechannel_interface = ET . SubElement ( output , "show-fibrechannel-interface" ) portsgroup_rbridgeid_key = ET . SubElement ( show_fibrechannel_interface , "portsgroup-rbridgeid" ) portsgroup_rbridgeid_key . text = kwargs . pop ( 'portsgroup_rbridgeid' ) show_fibrechannel_info = ET . SubElement ( show_fibrechannel_interface , "show-fibrechannel-info" ) port_index_key = ET . SubElement ( show_fibrechannel_info , "port-index" ) port_index_key . text = kwargs . pop ( 'port_index' ) port_interface = ET . SubElement ( show_fibrechannel_info , "port-interface" ) port_interface . text = kwargs . pop ( 'port_interface' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def random_points ( self , n , minmass = None , maxmass = None , minage = None , maxage = None , minfeh = None , maxfeh = None ) : """Returns n random mass , age , feh points , none of which are out of range . : param n : Number of desired points . : param minmass , maxmass : ( optional ) Desired allowed range . Default is mass range of ` ` self ` ` . : param minage , maxage : ( optional ) Desired allowed range . Default is log10 ( age ) range of ` ` self ` ` . : param minfehs , maxfeh : ( optional ) Desired allowed range . Default is feh range of ` ` self ` ` . : return : : class : ` np . ndarray ` arrays of randomly selected mass , log10 ( age ) , and feh values within allowed ranges . Used , e . g . , to initialize random walkers for : class : ` StarModel ` fits . . . todo : : Should change this to drawing from priors ! Current implementation is a bit outdated ."""
if minmass is None : minmass = self . minmass if maxmass is None : maxmass = self . maxmass if minage is None : minage = self . minage if maxage is None : maxage = self . maxage if minfeh is None : minfeh = self . minfeh if maxfeh is None : maxfeh = self . maxfeh ms = rand . uniform ( minmass , maxmass , size = n ) ages = rand . uniform ( minage , maxage , size = n ) fehs = rand . uniform ( minage , maxage , size = n ) Rs = self . radius ( ms , ages , fehs ) bad = np . isnan ( Rs ) nbad = bad . sum ( ) while nbad > 0 : ms [ bad ] = rand . uniform ( minmass , maxmass , size = nbad ) ages [ bad ] = rand . uniform ( minage , maxage , size = nbad ) fehs [ bad ] = rand . uniform ( minfeh , maxfeh , size = nbad ) Rs = self . radius ( ms , ages , fehs ) bad = np . isnan ( Rs ) nbad = bad . sum ( ) return ms , ages , fehs
def p_jr ( p ) : """asm : JR jr _ flags COMMA expr | JR jr _ flags COMMA pexpr"""
p [ 4 ] = Expr . makenode ( Container ( '-' , p . lineno ( 3 ) ) , p [ 4 ] , Expr . makenode ( Container ( MEMORY . org + 2 , p . lineno ( 1 ) ) ) ) p [ 0 ] = Asm ( p . lineno ( 1 ) , 'JR %s,N' % p [ 2 ] , p [ 4 ] )
def kill ( self , container , signal = None ) : """Kill a container or send a signal to a container . Args : container ( str ) : The container to kill signal ( str or int ) : The signal to send . Defaults to ` ` SIGKILL ` ` Raises : : py : class : ` docker . errors . APIError ` If the server returns an error ."""
url = self . _url ( "/containers/{0}/kill" , container ) params = { } if signal is not None : if not isinstance ( signal , six . string_types ) : signal = int ( signal ) params [ 'signal' ] = signal res = self . _post ( url , params = params ) self . _raise_for_status ( res )
def __return_json ( url ) : """Returns JSON data which is returned by querying the API service Called by - meaning ( ) - synonym ( ) : param url : the complete formatted url which is then queried using requests : returns : json content being fed by the API"""
with try_URL ( ) : response = requests . get ( url ) if response . status_code == 200 : return response . json ( ) else : return False
def ne ( self , value ) : """Construct a not equal to ( ` ` ! = ` ` ) filter . : param value : Filter value : return : : class : ` filters . Field < filters . Field > ` object : rtype : filters . Field"""
self . op = '!=' self . negate_op = '=' self . value = self . _value ( value ) return self
def draw_polygon ( self , * pts , close_path : bool = True , stroke : Color = None , stroke_width : float = 1 , stroke_dash : typing . Sequence = None , fill : Color = None ) -> None : """Draws the given linear path ."""
pass
def run ( self ) : """Executed by Sphinx . : returns : Single DisqusNode instance with config values passed as arguments . : rtype : list"""
disqus_shortname = self . get_shortname ( ) disqus_identifier = self . get_identifier ( ) return [ DisqusNode ( disqus_shortname , disqus_identifier ) ]
def ensure_state ( default_getter , exc_class , default_msg = None ) : """Create a decorator factory function ."""
def decorator ( getter = default_getter , msg = default_msg ) : def ensure_decorator ( f ) : @ wraps ( f ) def inner ( self , * args , ** kwargs ) : if not getter ( self ) : raise exc_class ( msg ) if msg else exc_class ( ) return f ( self , * args , ** kwargs ) return inner return ensure_decorator return decorator
def load_project_metrics ( ) : """Create project metrics for financial indicator Updates them if already exists"""
all_metrics = FinancialIndicator . METRICS for key in all_metrics : df = getattr ( data , key ) pronac = 'PRONAC' if key == 'planilha_captacao' : pronac = 'Pronac' pronacs = df [ pronac ] . unique ( ) . tolist ( ) create_finance_metrics ( all_metrics [ key ] , pronacs )
def remove_root_objective_bank ( self , alias = None , objective_bank_id = None ) : """Removes a root objective bank . arg : objective _ bank _ id ( osid . id . Id ) : the ` ` Id ` ` of an objective bank raise : NotFound - ` ` objective _ bank _ id ` ` is not a root raise : NullArgument - ` ` objective _ bank _ id ` ` is ` ` null ` ` raise : OperationFailed - unable to complete request raise : PermissionDenied - authorization failure * compliance : mandatory - - This method must be implemented . *"""
url_path = self . _urls . roots ( alias = alias ) current_root_ids = self . _get_request ( url_path ) [ 'ids' ] modified_list = [ ] for root_id in current_root_ids : if root_id != str ( objective_bank_id ) : modified_list . append ( root_id ) new_root_ids = { 'ids' : modified_list } return self . _put_request ( url_path , new_root_ids )
def backup_restore ( cls , block_id , impl , working_dir ) : """Restore from a backup , given the virutalchain implementation module and block number . NOT THREAD SAFE . DO NOT CALL WHILE INDEXING . Return True on success Raise exception on error , i . e . if a backup file is missing"""
backup_dir = config . get_backups_directory ( impl , working_dir ) backup_paths = cls . get_backup_paths ( block_id , impl , working_dir ) for p in backup_paths : assert os . path . exists ( p ) , "No such backup file: {}" . format ( p ) for p in cls . get_state_paths ( impl , working_dir ) : pbase = os . path . basename ( p ) backup_path = os . path . join ( backup_dir , pbase + ( ".bak.{}" . format ( block_id ) ) ) log . debug ( "Restoring '{}' to '{}'" . format ( backup_path , p ) ) shutil . copy ( backup_path , p ) return True
def _split_file ( self , data = '' ) : """Splits SAR output or SAR output file ( in ASCII format ) in order to extract info we need for it , in the format we want . : param data : Input data instead of file : type data : str . : return : ` ` List ` ` - style of SAR file sections separated by the type of info they contain ( SAR file sections ) without parsing what is exactly what at this point"""
# Filename passed checks through _ _ init _ _ if ( ( self . __filename and os . access ( self . __filename , os . R_OK ) ) or data != '' ) : fhandle = None if data == '' : try : fhandle = os . open ( self . __filename , os . O_RDONLY ) except OSError : print ( ( "Couldn't open file %s" % self . __filename ) ) fhandle = None if fhandle or data != '' : datalength = 0 # Dealing with mmap difference on Windows and Linux if platform . system ( ) == 'Windows' : dataprot = mmap . ACCESS_READ else : dataprot = mmap . PROT_READ if data != '' : fhandle = - 1 datalength = len ( data ) if platform . system ( ) == 'Windows' : dataprot = mmap . ACCESS_READ | mmap . ACCESS_WRITE else : dataprot = mmap . PROT_READ | mmap . PROT_WRITE try : if platform . system ( ) == 'Windows' : sarmap = mmap . mmap ( fhandle , length = datalength , access = dataprot ) else : sarmap = mmap . mmap ( fhandle , length = datalength , prot = dataprot ) if data != '' : sarmap . write ( data ) sarmap . flush ( ) sarmap . seek ( 0 , os . SEEK_SET ) except ( TypeError , IndexError ) : if data == '' : os . close ( fhandle ) traceback . print_exc ( ) # sys . exit ( - 1) return False # Here we ' ll store chunks of SAR file , unparsed searchunks = [ ] oldchunkpos = 0 dlpos = sarmap . find ( '\n\n' , 0 ) size = 0 if data == '' : # We can do mmap . size ( ) only on read - only mmaps size = sarmap . size ( ) else : # Otherwise , if data was passed to us , # we measure its length len ( data ) # oldchunkpos = dlpos while dlpos > - 1 : # mmap . find ( ) returns - 1 on failure . tempchunk = sarmap . read ( dlpos - oldchunkpos ) searchunks . append ( tempchunk . strip ( ) ) # We remember position , add 2 for 2 DD ' s # ( newspaces in production ) . We have to remember # relative value oldchunkpos += ( dlpos - oldchunkpos ) + 2 # We position to new place , to be behind \ n \ n # we ' ve looked for . try : sarmap . seek ( 2 , os . SEEK_CUR ) except ValueError : print ( ( 'Out of bounds (%s)!\n' % ( sarmap . tell ( ) ) ) ) # Now we repeat find . dlpos = sarmap . find ( "\n\n" ) # If it wasn ' t the end of file , we want last piece of it if oldchunkpos < size : tempchunk = sarmap [ oldchunkpos : ] searchunks . append ( tempchunk . strip ( ) ) sarmap . close ( ) if fhandle != - 1 : os . close ( fhandle ) if searchunks : return searchunks else : return False return False
def predict ( self , * args , ** kwargs ) : """Predict given DataFrame using the given model . Actual prediction steps will not be executed till an operational step is called . After execution , three columns will be appended to the table : | Field name | Type | Comments | | prediction _ result | string | field indicating the predicted label , absent if | | | | the model is a regression model | | prediction _ score | double | field indicating the score value if the model is | | | | a classification model , or the predicted value if | | | | the model is a regression model . | | prediction _ detail | string | field in JSON format indicating the score for | | | | every class . | : type df : DataFrame : rtype : DataFrame : Example : > > > model = PmmlModel ( odps . get _ offline _ model ( ' model _ name ' ) ) > > > data = DataFrame ( odps . get _ table ( ' table _ name ' ) ) > > > # prediction below will not be executed till predicted . persist is called > > > predicted = model . predict ( data ) > > > predicted . persist ( ' predicted ' )"""
return super ( PmmlModel , self ) . predict ( * args , ** kwargs )
def get_stp_mst_detail_output_cist_port_oper_bpdu_filter ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) get_stp_mst_detail = ET . Element ( "get_stp_mst_detail" ) config = get_stp_mst_detail output = ET . SubElement ( get_stp_mst_detail , "output" ) cist = ET . SubElement ( output , "cist" ) port = ET . SubElement ( cist , "port" ) oper_bpdu_filter = ET . SubElement ( port , "oper-bpdu-filter" ) oper_bpdu_filter . text = kwargs . pop ( 'oper_bpdu_filter' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def redis ( self ) : """Return instance of Redis ."""
if self . _redis is None : self . _redis = redis . StrictRedis ( host = self . args . redis_host , port = self . args . redis_port ) return self . _redis