signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def info ( docgraph ) : """print node and edge statistics of a document graph"""
print networkx . info ( docgraph ) , '\n' node_statistics ( docgraph ) print edge_statistics ( docgraph )
def save_weights ( weights_list , filename ) : """Save the model weights to the given filename using numpy ' s " . npz " format . Parameters weights _ list : list of array filename : string Should end in " . npz " ."""
numpy . savez ( filename , ** dict ( ( ( "array_%d" % i ) , w ) for ( i , w ) in enumerate ( weights_list ) ) )
def create_all_recommendations ( self , cores , ip_views = False ) : """Calculate the recommendations for all records ."""
global _store _store = self . store _create_all_recommendations ( cores , ip_views , self . config )
def upvote_num ( self ) : """θŽ·ε–ζ”Άεˆ°ηš„ηš„θ΅žεŒζ•°ι‡ . : return : ζ”Άεˆ°ηš„ηš„θ΅žεŒζ•°ι‡ : rtype : int"""
if self . url is None : return 0 else : number = int ( self . soup . find ( 'span' , class_ = 'zm-profile-header-user-agree' ) . strong . text ) return number
def check_exclude_rec ( self ) : # pylint : disable = access - member - before - definition """Check if this timeperiod is tagged : return : if tagged return false , if not true : rtype : bool"""
if self . rec_tag : msg = "[timeentry::%s] is in a loop in exclude parameter" % ( self . get_name ( ) ) self . add_error ( msg ) return False self . rec_tag = True for timeperiod in self . exclude : timeperiod . check_exclude_rec ( ) return True
def _check_for_dictionary_key ( self , logical_id , dictionary , keys ) : """Checks a dictionary to make sure it has a specific key . If it does not , an InvalidResourceException is thrown . : param string logical _ id : logical id of this resource : param dict dictionary : the dictionary to check : param list keys : list of keys that should exist in the dictionary"""
for key in keys : if key not in dictionary : raise InvalidResourceException ( logical_id , 'Resource is missing the required [{}] ' 'property.' . format ( key ) )
def load_file ( path ) : """Open a file on your local drive , using its extension to guess its type . This routine only works on ` ` . bsp ` ` ephemeris files right now , but will gain support for additional file types in the future . : : from skyfield . api import load _ file planets = load _ file ( ' ~ / Downloads / de421 . bsp ' )"""
path = os . path . expanduser ( path ) base , ext = os . path . splitext ( path ) if ext == '.bsp' : return SpiceKernel ( path ) raise ValueError ( 'unrecognized file extension: {}' . format ( path ) )
def strategy ( self , * names , ** kwargs ) : """StrategyDict wrapping method for adding a new strategy . Parameters * names : Positional arguments with all names ( strings ) that could be used to call the strategy to be added , to be used both as key items and as attribute names . keep _ name : Boolean keyword - only parameter for choosing whether the ` ` _ _ name _ _ ` ` attribute of the decorated / wrapped function should be changed or kept . Defaults to False ( i . e . , changes the name by default ) . Returns A decorator / wrapper function to be used once on the new strategy to be added . Example Let ' s create a StrategyDict that knows its name : > > > txt _ proc = StrategyDict ( " txt _ proc " ) Add a first strategy ` ` swapcase ` ` , using this method as a decorator factory : > > > @ txt _ proc . strategy ( " swapcase " ) . . . def txt _ proc ( txt ) : . . . return txt . swapcase ( ) Let ' s do it again , but wrapping the strategy functions inline . First two strategies have multiple names , the last keeps the function name , which would otherwise be replaced by the first given name : > > > txt _ proc . strategy ( " lower " , " low " ) ( lambda txt : txt . lower ( ) ) { ( . . . ) : < function . . . at 0x . . . > , ( . . . ) : < function . . . at 0x . . . > } > > > txt _ proc . strategy ( " upper " , " up " ) ( lambda txt : txt . upper ( ) ) > > > txt _ proc . strategy ( " keep " , keep _ name = True ) ( lambda txt : txt ) We can now iterate through the strategies to call them or see their function names > > > sorted ( st ( " Just a Test " ) for st in txt _ proc ) [ ' JUST A TEST ' , ' Just a Test ' , ' jUST A tEST ' , ' just a test ' ] > > > sorted ( st . _ _ name _ _ for st in txt _ proc ) # Just the first name [ ' < lambda > ' , ' lower ' , ' swapcase ' , ' upper ' ] Calling a single strategy : > > > txt _ proc . low ( " TeStInG " ) ' testing ' > > > txt _ proc [ " upper " ] ( " TeStInG " ) ' TESTING ' > > > txt _ proc ( " TeStInG " ) # Default is the first : swapcase ' tEsTiNg ' > > > txt _ proc . default ( " TeStInG " ) ' tEsTiNg ' > > > txt _ proc . default = txt _ proc . up # Manually changing the default > > > txt _ proc ( " TeStInG " ) ' TESTING ' Hint Default strategy is the one stored as the ` ` default ` ` attribute , you can change or remove it at any time . When removing all keys that are assigned to the default strategy , the default attribute will be removed from the StrategyDict instance as well . The first strategy added afterwards is the one that will become the new default , unless the attribute is created or changed manually ."""
def decorator ( func ) : keep_name = kwargs . pop ( "keep_name" , False ) if kwargs : key = next ( iter ( kwargs ) ) raise TypeError ( "Unknown keyword argument '{}'" . format ( key ) ) if not keep_name : func . __name__ = str ( names [ 0 ] ) self [ names ] = func return self return decorator
def IsBinary ( self , filename ) : """Returns true if the guessed mimetyped isnt ' t in text group ."""
mimetype = mimetypes . guess_type ( filename ) [ 0 ] if not mimetype : return False # e . g . README , " real " binaries usually have an extension # special case for text files which don ' t start with text / if mimetype in TEXT_MIMETYPES : return False return not mimetype . startswith ( "text/" )
def can_update_asset_contents ( self , asset_id = None ) : """Tests if this user can update ` ` AssetContent ` ` . A return of true does not guarantee successful authorization . A return of false indicates that it is known updating an ` ` AssetContent ` ` will result in a ` ` PermissionDenied ` ` . This is intended as a hint to an application that may opt not to offer update operations to an unauthorized user . : param asset _ id : the ` ` Id ` ` of an ` ` Asset ` ` : type asset _ id : ` ` osid . id . Id ` ` : return : ` ` false ` ` if ` ` AssetContent ` ` modification is not authorized , ` ` true ` ` otherwise : rtype : ` ` boolean ` ` : raise : ` ` NullArgument ` ` - - ` ` asset _ id ` ` is ` ` null ` ` * compliance : mandatory - - This method must be implemented . *"""
url_path = construct_url ( 'authorization' , bank_id = self . _catalog_idstr ) return self . _get_request ( url_path ) [ 'assetHints' ] [ 'canUpdate' ]
def delete ( self , block_type , block_num ) : """Deletes a block : param block _ type : Type of block : param block _ num : Bloc number"""
logger . info ( "deleting block" ) blocktype = snap7 . snap7types . block_types [ block_type ] result = self . library . Cli_Delete ( self . pointer , blocktype , block_num ) return result
def show_terms_if_not_agreed ( context , field = TERMS_HTTP_PATH_FIELD ) : """Displays a modal on a current page if a user has not yet agreed to the given terms . If terms are not specified , the default slug is used . A small snippet is included into your template if a user who requested the view has not yet agreed the terms . The snippet takes care of displaying a respective modal ."""
request = context [ 'request' ] url = urlparse ( request . META [ field ] ) not_agreed_terms = TermsAndConditions . get_active_terms_not_agreed_to ( request . user ) if not_agreed_terms and is_path_protected ( url . path ) : return { 'not_agreed_terms' : not_agreed_terms , 'returnTo' : url . path } else : return { }
def _load_equipment_data ( self ) : """Load equipment data for transformers , cables etc . Returns : obj : ` dict ` of : pandas : ` pandas . DataFrame < dataframe > `"""
package_path = edisgo . __path__ [ 0 ] equipment_dir = self . config [ 'system_dirs' ] [ 'equipment_dir' ] data = { } equipment = { 'mv' : [ 'trafos' , 'lines' , 'cables' ] , 'lv' : [ 'trafos' , 'cables' ] } for voltage_level , eq_list in equipment . items ( ) : for i in eq_list : equipment_parameters = self . config [ 'equipment' ] [ 'equipment_{}_parameters_{}' . format ( voltage_level , i ) ] data [ '{}_{}' . format ( voltage_level , i ) ] = pd . read_csv ( os . path . join ( package_path , equipment_dir , equipment_parameters ) , comment = '#' , index_col = 'name' , delimiter = ',' , decimal = '.' ) return data
def datagramReceived ( self , datagram , address ) : """After receiving a datagram , generate the deferreds and add myself to it ."""
def write ( result ) : print "Writing %r" % result self . transport . write ( result , address ) d = self . d ( ) # d . addCallbacks ( write , log . err ) d . addCallback ( write ) # errors are silently ignored ! d . callback ( datagram )
def query_all ( ** kwargs ) : '''query all the posts .'''
kind = kwargs . get ( 'kind' , '1' ) limit = kwargs . get ( 'limit' , 10 ) return TabPost . select ( ) . where ( ( TabPost . kind == kind ) & ( TabPost . valid == 1 ) ) . order_by ( TabPost . time_update . desc ( ) ) . limit ( limit )
def _init_v ( self , t , w0 , dt ) : """Leapfrog updates the velocities offset a half - step from the position updates . If we ' re given initial conditions aligned in time , e . g . the positions and velocities at the same 0th step , then we have to initially scoot the velocities forward by a half step to prime the integrator . Parameters dt : numeric The first timestep ."""
# here is where we scoot the velocity at t = t1 to v ( t + 1/2) F0 = self . F ( t . copy ( ) , w0 . copy ( ) , * self . _func_args ) a0 = F0 [ self . ndim : ] v_1_2 = w0 [ self . ndim : ] + a0 * dt / 2. return v_1_2
def request ( self , path , method = 'GET' , headers = None , ** kwargs ) : """Perform a HTTP request . Given a relative Bugzilla URL path , an optional request method , and arguments suitable for requests . Request ( ) , perform a HTTP request ."""
headers = { } if headers is None else headers . copy ( ) headers [ "User-Agent" ] = "Bugsy" kwargs [ 'headers' ] = headers url = '%s/%s' % ( self . bugzilla_url , path ) return self . _handle_errors ( self . session . request ( method , url , ** kwargs ) )
def _execute ( self , native , command , data = None , returning = True , mapper = dict ) : """Executes the inputted command into the current connection cursor . : param command | < str > data | < dict > | | None : return [ { < str > key : < variant > , . . } , . . ] , < int > count"""
if data is None : data = { } with native . cursor ( ) as cursor : log . debug ( '***********************' ) log . debug ( command % data ) log . debug ( '***********************' ) try : rowcount = 0 for cmd in command . split ( ';' ) : cmd = cmd . strip ( ) if cmd : cursor . execute ( cmd . strip ( ';' ) + ';' , data ) rowcount += cursor . rowcount # look for a disconnection error except pymysql . InterfaceError : raise orb . errors . ConnectionLost ( ) # look for integrity errors except ( pymysql . IntegrityError , pymysql . OperationalError ) as err : native . rollback ( ) # look for a duplicate error if err [ 0 ] == 1062 : raise orb . errors . DuplicateEntryFound ( err [ 1 ] ) # look for a reference error reference_error = re . search ( 'Key .* is still referenced from table ".*"' , nstr ( err ) ) if reference_error : msg = 'Cannot remove this record, it is still being referenced.' raise orb . errors . CannotDelete ( msg ) # unknown error log . debug ( traceback . print_exc ( ) ) raise orb . errors . QueryFailed ( command , data , nstr ( err ) ) # connection has closed underneath the hood except pymysql . Error as err : native . rollback ( ) log . error ( traceback . print_exc ( ) ) raise orb . errors . QueryFailed ( command , data , nstr ( err ) ) try : raw = cursor . fetchall ( ) results = [ mapper ( record ) for record in raw ] except pymysql . ProgrammingError : results = [ ] return results , rowcount
def clone ( self , ** kw ) : """Return a new instance with specified attributes changed . The new instance has the same attribute values as the current object , except for the changes passed in as keyword arguments ."""
newpolicy = self . __class__ . __new__ ( self . __class__ ) for attr , value in self . __dict__ . items ( ) : object . __setattr__ ( newpolicy , attr , value ) for attr , value in kw . items ( ) : if not hasattr ( self , attr ) : raise TypeError ( "{!r} is an invalid keyword argument for {}" . format ( attr , self . __class__ . __name__ ) ) object . __setattr__ ( newpolicy , attr , value ) return newpolicy
def notify ( self , notices ) : """Send notifications to the users via . the provided methods Args : notices ( : obj : ` dict ` of ` str ` : ` dict ` ) : List of the notifications to send Returns : ` None `"""
issues_html = get_template ( 'unattached_ebs_volume.html' ) issues_text = get_template ( 'unattached_ebs_volume.txt' ) for recipient , issues in list ( notices . items ( ) ) : if issues : message_html = issues_html . render ( issues = issues ) message_text = issues_text . render ( issues = issues ) send_notification ( subsystem = self . name , recipients = [ recipient ] , subject = self . subject , body_html = message_html , body_text = message_text )
def flex_api ( self ) : """Access the FlexApi Twilio Domain : returns : FlexApi Twilio Domain : rtype : twilio . rest . flex _ api . FlexApi"""
if self . _flex_api is None : from twilio . rest . flex_api import FlexApi self . _flex_api = FlexApi ( self ) return self . _flex_api
def HandleGetBlocksMessageReceived ( self , payload ) : """Process a GetBlocksPayload payload . Args : payload ( neo . Network . Payloads . GetBlocksPayload ) :"""
if not self . leader . ServiceEnabled : return inventory = IOHelper . AsSerializableWithType ( payload , 'neo.Network.Payloads.GetBlocksPayload.GetBlocksPayload' ) if not inventory : return blockchain = BC . Default ( ) hash = inventory . HashStart [ 0 ] if not blockchain . GetHeader ( hash ) : return hashes = [ ] hcount = 0 while hash != inventory . HashStop and hcount < 500 : hash = blockchain . GetNextBlockHash ( hash ) if hash is None : break hashes . append ( hash ) hcount += 1 if hcount > 0 : self . SendSerializedMessage ( Message ( 'inv' , InvPayload ( type = InventoryType . Block , hashes = hashes ) ) )
def main ( argv = None ) : """Main program entry point for parsing command line arguments"""
parser = argparse . ArgumentParser ( description = "Benchmark driver" ) parser . add_argument ( "--max-records" , type = int , default = 100 * 1000 ) parser . add_argument ( "--engine" , type = str , choices = ( "vcfpy" , "pyvcf" ) , default = "vcfpy" ) parser . add_argument ( "--input-vcf" , type = str , required = True , help = "Path to VCF file to read" ) parser . add_argument ( "--output-vcf" , type = str , required = False , help = "Path to VCF file to write if given" ) args = parser . parse_args ( argv ) if args . engine == "vcfpy" : VCFPyRunner ( args ) . run ( ) else : PyVCFRunner ( args ) . run ( )
def get_changes ( self , fixer = str . lower , task_handle = taskhandle . NullTaskHandle ( ) ) : """Fix module names ` fixer ` is a function that takes and returns a ` str ` . Given the name of a module , it should return the fixed name ."""
stack = changestack . ChangeStack ( self . project , 'Fixing module names' ) jobset = task_handle . create_jobset ( 'Fixing module names' , self . _count_fixes ( fixer ) + 1 ) try : while True : for resource in self . _tobe_fixed ( fixer ) : jobset . started_job ( resource . path ) renamer = rename . Rename ( self . project , resource ) changes = renamer . get_changes ( fixer ( self . _name ( resource ) ) ) stack . push ( changes ) jobset . finished_job ( ) break else : break finally : jobset . started_job ( 'Reverting to original state' ) stack . pop_all ( ) jobset . finished_job ( ) return stack . merged ( )
def get_object_data ( self , ref ) : """As get _ object _ header , but returns object data as well : return : ( hexsha , type _ string , size _ as _ int , data _ string ) : note : not threadsafe"""
hexsha , typename , size , stream = self . stream_object_data ( ref ) data = stream . read ( size ) del ( stream ) return ( hexsha , typename , size , data )
def get_comments_by_ids ( self , comment_ids ) : """Gets a ` ` CommentList ` ` corresponding to the given ` ` IdList ` ` . arg : comment _ ids ( osid . id . IdList ) : the list of ` ` Ids ` ` to retrieve return : ( osid . commenting . CommentList ) - the returned ` ` Comment list ` ` raise : NotFound - an ` ` Id was ` ` not found raise : NullArgument - ` ` comment _ ids ` ` is ` ` null ` ` raise : OperationFailed - unable to complete request raise : PermissionDenied - authorization failure * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for # osid . resource . ResourceLookupSession . get _ resources _ by _ ids # NOTE : This implementation currently ignores plenary view collection = JSONClientValidated ( 'commenting' , collection = 'Comment' , runtime = self . _runtime ) object_id_list = [ ] for i in comment_ids : object_id_list . append ( ObjectId ( self . _get_id ( i , 'commenting' ) . get_identifier ( ) ) ) result = collection . find ( dict ( { '_id' : { '$in' : object_id_list } } , ** self . _view_filter ( ) ) ) result = list ( result ) sorted_result = [ ] for object_id in object_id_list : for object_map in result : if object_map [ '_id' ] == object_id : sorted_result . append ( object_map ) break return objects . CommentList ( sorted_result , runtime = self . _runtime , proxy = self . _proxy )
def cat_trials ( x3d ) : """Concatenate trials along time axis . Parameters x3d : array , shape ( t , m , n ) Segmented input data with t trials , m signals , and n samples . Returns x2d : array , shape ( m , t * n ) Trials are concatenated along the second axis . See also cut _ segments : Cut segments from continuous data . Examples > > > x = np . random . randn ( 6 , 4 , 150) > > > y = cat _ trials ( x ) > > > y . shape (4 , 900)"""
x3d = atleast_3d ( x3d ) t = x3d . shape [ 0 ] return np . concatenate ( np . split ( x3d , t , 0 ) , axis = 2 ) . squeeze ( 0 )
def basic_animation ( frames = 100 , interval = 30 ) : """Plot a basic sine wave with oscillating amplitude"""
fig = plt . figure ( ) ax = plt . axes ( xlim = ( 0 , 10 ) , ylim = ( - 2 , 2 ) ) line , = ax . plot ( [ ] , [ ] , lw = 2 ) x = np . linspace ( 0 , 10 , 1000 ) def init ( ) : line . set_data ( [ ] , [ ] ) return line , def animate ( i ) : y = np . cos ( i * 0.02 * np . pi ) * np . sin ( x - i * 0.02 * np . pi ) line . set_data ( x , y ) return line , return animation . FuncAnimation ( fig , animate , init_func = init , frames = frames , interval = interval )
def result_report_class_wise_average ( self ) : """Report class - wise averages Returns str result report in string format"""
results = self . results_class_wise_average_metrics ( ) output = self . ui . section_header ( 'Class-wise average metrics (macro-average)' , indent = 2 ) + '\n' if 'f_measure' in results and results [ 'f_measure' ] : if results [ 'f_measure' ] [ 'f_measure' ] is not None : f_measure = results [ 'f_measure' ] [ 'f_measure' ] * 100 else : f_measure = None if results [ 'f_measure' ] [ 'precision' ] is not None : precision = results [ 'f_measure' ] [ 'precision' ] * 100 else : precision = None if results [ 'f_measure' ] [ 'recall' ] is not None : recall = results [ 'f_measure' ] [ 'recall' ] * 100 else : recall = None output += self . ui . line ( 'F-measure' , indent = 2 ) + '\n' output += self . ui . data ( field = 'F-measure (F1)' , value = f_measure , unit = '%' , indent = 4 ) + '\n' output += self . ui . data ( field = 'Precision' , value = precision , unit = '%' , indent = 4 ) + '\n' output += self . ui . data ( field = 'Recall' , value = recall , unit = '%' , indent = 4 ) + '\n' if 'eer' in results and results [ 'eer' ] : if results [ 'eer' ] [ 'eer' ] is not None : eer = results [ 'eer' ] [ 'eer' ] * 100 else : eer = None output += self . ui . line ( 'Equal error rate' , indent = 2 ) + '\n' output += self . ui . data ( field = 'Equal error rate (EER)' , value = eer , unit = '%' , indent = 4 ) + '\n' return output
def set_exp ( self , claim = 'exp' , from_time = None , lifetime = None ) : """Updates the expiration time of a token ."""
if from_time is None : from_time = self . current_time if lifetime is None : lifetime = self . lifetime self . payload [ claim ] = datetime_to_epoch ( from_time + lifetime )
def _proxy ( self ) : """Generate an instance context for the instance , the context is capable of performing various actions . All instance actions are proxied to the context : returns : EngagementContextContext for this EngagementContextInstance : rtype : twilio . rest . studio . v1 . flow . engagement . engagement _ context . EngagementContextContext"""
if self . _context is None : self . _context = EngagementContextContext ( self . _version , flow_sid = self . _solution [ 'flow_sid' ] , engagement_sid = self . _solution [ 'engagement_sid' ] , ) return self . _context
def savefig ( viz , name , gallery = GALLERY ) : """Saves the figure to the gallery directory"""
if not path . exists ( gallery ) : os . makedirs ( gallery ) # Must save as png if len ( name . split ( "." ) ) > 1 : raise ValueError ( "name should not specify extension" ) outpath = path . join ( gallery , name + ".png" ) viz . poof ( outpath = outpath ) print ( "created {}" . format ( outpath ) )
def _get_grad_method ( self , data ) : r"""Get the gradient This method calculates the gradient step from the input data Parameters data : np . ndarray Input data array Notes Implements the following equation : . . math : : \ nabla F ( x ) = \ mathbf { H } ^ T ( \ mathbf { H } \ mathbf { x } - \ mathbf { y } )"""
self . grad = self . trans_op ( self . op ( data ) - self . obs_data )
def write_config_files ( self , host , hyperparameters , input_data_config ) : """Write the config files for the training containers . This method writes the hyperparameters , resources and input data configuration files . Args : host ( str ) : Host to write the configuration for hyperparameters ( dict ) : Hyperparameters for training . input _ data _ config ( dict ) : Training input channels to be used for training . Returns : None"""
config_path = os . path . join ( self . container_root , host , 'input' , 'config' ) resource_config = { 'current_host' : host , 'hosts' : self . hosts } json_input_data_config = { } for c in input_data_config : channel_name = c [ 'ChannelName' ] json_input_data_config [ channel_name ] = { 'TrainingInputMode' : 'File' } if 'ContentType' in c : json_input_data_config [ channel_name ] [ 'ContentType' ] = c [ 'ContentType' ] _write_json_file ( os . path . join ( config_path , 'hyperparameters.json' ) , hyperparameters ) _write_json_file ( os . path . join ( config_path , 'resourceconfig.json' ) , resource_config ) _write_json_file ( os . path . join ( config_path , 'inputdataconfig.json' ) , json_input_data_config )
def annotate_rule_violation ( self , rule : ValidationRule ) -> None : """Takes note of a rule validation failure by collecting its error message . : param rule : Rule that failed validation . : type rule : ValidationRule : return : None"""
if self . errors . get ( rule . label ) is None : self . errors [ rule . label ] = [ ] self . errors [ rule . label ] . append ( rule . get_error_message ( ) )
def prepare_cached_fields ( self , flist ) : """Prepare the cached fields of the fields _ desc dict"""
cls_name = self . __class__ # Fields cache initialization if flist : Packet . class_default_fields [ cls_name ] = dict ( ) Packet . class_default_fields_ref [ cls_name ] = list ( ) Packet . class_fieldtype [ cls_name ] = dict ( ) Packet . class_packetfields [ cls_name ] = list ( ) # Fields initialization for f in flist : if isinstance ( f , MultipleTypeField ) : del Packet . class_default_fields [ cls_name ] del Packet . class_default_fields_ref [ cls_name ] del Packet . class_fieldtype [ cls_name ] del Packet . class_packetfields [ cls_name ] self . class_dont_cache [ cls_name ] = True self . do_init_fields ( self . fields_desc ) break tmp_copy = copy . deepcopy ( f . default ) Packet . class_default_fields [ cls_name ] [ f . name ] = tmp_copy Packet . class_fieldtype [ cls_name ] [ f . name ] = f if f . holds_packets : Packet . class_packetfields [ cls_name ] . append ( f ) # Remember references if isinstance ( f . default , ( list , dict , set , RandField , Packet ) ) : Packet . class_default_fields_ref [ cls_name ] . append ( f . name )
def _MetaPythonBase ( ) : """Return a metaclass which implements _ _ getitem _ _ , allowing e . g . P [ . . . ] instead of P ( ) [ . . . ]"""
class MagicGetItem ( type ) : def __new__ ( mcs , name , bases , dict ) : klass = type . __new__ ( mcs , name , bases , dict ) mcs . __getitem__ = lambda _ , k : klass ( ) [ k ] return klass return MagicGetItem
def _register_data_plane_account_arguments ( self , command_name ) : """Add parameters required to create a storage client"""
from azure . cli . core . commands . parameters import get_resource_name_completion_list from . _validators import validate_client_parameters command = self . command_loader . command_table . get ( command_name , None ) if not command : return group_name = 'Storage Account' command . add_argument ( 'account_name' , '--account-name' , required = False , default = None , arg_group = group_name , completer = get_resource_name_completion_list ( 'Microsoft.Storage/storageAccounts' ) , help = 'Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be ' 'used in conjunction with either storage account key or a SAS token. If neither are ' 'present, the command will try to query the storage account key using the ' 'authenticated Azure account. If a large number of storage commands are executed the ' 'API quota may be hit' ) command . add_argument ( 'account_key' , '--account-key' , required = False , default = None , arg_group = group_name , help = 'Storage account key. Must be used in conjunction with storage account name. ' 'Environment variable: AZURE_STORAGE_KEY' ) command . add_argument ( 'connection_string' , '--connection-string' , required = False , default = None , validator = validate_client_parameters , arg_group = group_name , help = 'Storage account connection string. Environment variable: ' 'AZURE_STORAGE_CONNECTION_STRING' ) command . add_argument ( 'sas_token' , '--sas-token' , required = False , default = None , arg_group = group_name , help = 'A Shared Access Signature (SAS). Must be used in conjunction with storage account ' 'name. Environment variable: AZURE_STORAGE_SAS_TOKEN' )
def get_parameter_dd ( self , parameter ) : """This method returns parameters as nested dicts in case of decision diagram parameter ."""
dag = defaultdict ( list ) dag_elem = parameter . find ( 'DAG' ) node = dag_elem . find ( 'Node' ) root = node . get ( 'var' ) def get_param ( node ) : edges = defaultdict ( list ) for edge in node . findall ( 'Edge' ) : if edge . find ( 'Terminal' ) is not None : edges [ edge . get ( 'val' ) ] = edge . find ( 'Terminal' ) . text elif edge . find ( 'Node' ) is not None : node_cpd = defaultdict ( list ) node_cpd [ edge . find ( 'Node' ) . get ( 'var' ) ] = get_param ( edge . find ( 'Node' ) ) edges [ edge . get ( 'val' ) ] = node_cpd elif edge . find ( 'SubDAG' ) is not None : subdag_attribute = defaultdict ( list ) subdag_attribute [ 'type' ] = edge . find ( 'SubDAG' ) . get ( 'type' ) if subdag_attribute [ 'type' ] == 'template' : subdag_attribute [ 'idref' ] = edge . find ( 'SubDAG' ) . get ( 'idref' ) if edge . find ( 'SubDAG' ) . get ( 'var' ) : subdag_attribute [ 'var' ] = edge . find ( 'SubDAG' ) . get ( 'var' ) if edge . find ( 'SubDAG' ) . get ( 'val' ) : subdag_attribute [ 'val' ] = edge . find ( 'SubDAG' ) . get ( 'val' ) edges [ edge . get ( 'val' ) ] = subdag_attribute return edges if parameter . find ( 'SubDAGTemplate' ) is not None : SubDAGTemplate = parameter . find ( 'SubDAGTemplate' ) subdag_root = SubDAGTemplate . find ( 'Node' ) subdag_node = subdag_root . get ( 'var' ) subdag_dict = defaultdict ( list ) subdag_dict [ subdag_node ] = get_param ( subdag_root ) dag [ 'SubDAGTemplate' ] = subdag_dict dag [ 'id' ] = SubDAGTemplate . get ( 'id' ) dag [ root ] = get_param ( node ) return dag
def _read_requirements ( filename , extra_packages ) : """Returns a list of package requirements read from the file ."""
requirements_file = open ( filename ) . read ( ) hard_requirements = [ ] for line in requirements_file . splitlines ( ) : if _is_requirement ( line ) : if line . find ( ';' ) > - 1 : dep , condition = tuple ( line . split ( ';' ) ) extra_packages [ condition . strip ( ) ] . append ( dep . strip ( ) ) else : hard_requirements . append ( line . strip ( ) ) return hard_requirements , extra_packages
def color_text ( text , color ) : r"""SeeAlso : highlight _ text lexer _ shortnames = sorted ( ut . flatten ( ut . take _ column ( pygments . lexers . LEXERS . values ( ) , 2 ) ) )"""
import utool as ut if color is None or not ENABLE_COLORS : return text elif color == 'python' : return highlight_text ( text , color ) elif color == 'sql' : return highlight_text ( text , 'sql' ) try : import pygments import pygments . console # if color = = ' guess ' : # import linguist # NOQA # pygments . lexers . guess _ lexer ( text ) # return highlight _ text ( text , color ) ansi_text = pygments . console . colorize ( color , text ) if ut . WIN32 : import colorama ansi_reset = ( colorama . Style . RESET_ALL ) else : ansi_reset = pygments . console . colorize ( 'reset' , '' ) ansi_text = ansi_text + ansi_reset return ansi_text except ImportError : return text
def apps_location_show ( self , id , ** kwargs ) : "https : / / developer . zendesk . com / rest _ api / docs / core / app _ locations # show - location"
api_path = "/api/v2/apps/locations/{id}.json" api_path = api_path . format ( id = id ) return self . call ( api_path , ** kwargs )
def item_selection_changed ( self ) : """Item selection has changed"""
is_selection = len ( self . selectedItems ( ) ) > 0 self . expand_selection_action . setEnabled ( is_selection ) self . collapse_selection_action . setEnabled ( is_selection )
def DecompressMessageList ( cls , packed_message_list ) : """Decompress the message data from packed _ message _ list . Args : packed _ message _ list : A PackedMessageList rdfvalue with some data in it . Returns : a MessageList rdfvalue . Raises : DecodingError : If decompression fails ."""
compression = packed_message_list . compression if compression == rdf_flows . PackedMessageList . CompressionType . UNCOMPRESSED : data = packed_message_list . message_list elif ( compression == rdf_flows . PackedMessageList . CompressionType . ZCOMPRESSION ) : try : data = zlib . decompress ( packed_message_list . message_list ) except zlib . error as e : raise DecodingError ( "Failed to decompress: %s" % e ) else : raise DecodingError ( "Compression scheme not supported" ) try : result = rdf_flows . MessageList . FromSerializedString ( data ) except rdfvalue . DecodeError : raise DecodingError ( "RDFValue parsing failed." ) return result
def scalar_summary ( tag , scalar ) : """Outputs a ` Summary ` protocol buffer containing a single scalar value . The generated Summary has a Tensor . proto containing the input Tensor . Adapted from the TensorFlow function ` scalar ( ) ` at https : / / github . com / tensorflow / tensorflow / blob / r1.6 / tensorflow / python / summary / summary . py Parameters tag : str A name for the generated summary . Will also serve as the series name in TensorBoard . scalar : int , MXNet ` NDArray ` , or ` numpy . ndarray ` A scalar value or an ndarray of shape ( 1 , ) . Returns A ` Summary ` protobuf of the ` scalar ` value . Raises ValueError : If the scalar has the wrong shape or type ."""
tag = _clean_tag ( tag ) scalar = _make_numpy_array ( scalar ) assert ( scalar . squeeze ( ) . ndim == 0 ) , 'scalar should be 0D' scalar = float ( scalar ) return Summary ( value = [ Summary . Value ( tag = tag , simple_value = scalar ) ] )
def reset ( self ) : """Reset the environment . Returns : Tensor of the current observation ."""
observ_dtype = self . _parse_dtype ( self . _env . observation_space ) observ = tf . py_func ( self . _env . reset , [ ] , observ_dtype , name = 'reset' ) observ = tf . check_numerics ( observ , 'observ' ) with tf . control_dependencies ( [ self . _observ . assign ( observ ) , self . _reward . assign ( 0 ) , self . _done . assign ( False ) ] ) : return tf . identity ( observ )
def _make_order ( field_path , direction ) : """Helper for : meth : ` order _ by ` ."""
return query_pb2 . StructuredQuery . Order ( field = query_pb2 . StructuredQuery . FieldReference ( field_path = field_path ) , direction = _enum_from_direction ( direction ) , )
def has_path_sum ( root , sum ) : """: type root : TreeNode : type sum : int : rtype : bool"""
if root is None : return False if root . left is None and root . right is None and root . val == sum : return True sum -= root . val return has_path_sum ( root . left , sum ) or has_path_sum ( root . right , sum )
def _analyze_case ( model_dir , bench_dir , config ) : """Generates statistics from the timing summaries"""
model_timings = set ( glob . glob ( os . path . join ( model_dir , "*" + config [ "timing_ext" ] ) ) ) if bench_dir is not None : bench_timings = set ( glob . glob ( os . path . join ( bench_dir , "*" + config [ "timing_ext" ] ) ) ) else : bench_timings = set ( ) if not len ( model_timings ) : return dict ( ) model_stats = generate_timing_stats ( model_timings , config [ 'timing_vars' ] ) bench_stats = generate_timing_stats ( bench_timings , config [ 'timing_vars' ] ) return dict ( model = model_stats , bench = bench_stats )
def helical_turbulent_fd_Czop ( Re , Di , Dc ) : r'''Calculates Darcy friction factor for a fluid flowing inside a curved pipe such as a helical coil under turbulent conditions , using the method of Czop [ 1 ] _ , also shown in [ 2 ] _ . . . math : : f _ { curv } = 0.096De ^ { - 0.1517} Parameters Re : float Reynolds number with ` D = Di ` , [ - ] Di : float Inner diameter of the coil , [ m ] Dc : float Diameter of the helix / coil measured from the center of the tube on one side to the center of the tube on the other side , [ m ] Returns fd : float Darcy friction factor for a curved pipe [ - ] Notes Valid for : math : ` 2 \ times10 ^ 4 < Re < 1.5 \ times10 ^ { 5 } ` . Does not use a straight pipe correlation , and so will not converge on the straight pipe result at very low curvature . Examples > > > helical _ turbulent _ fd _ Czop ( 1E4 , 0.01 , . 2) 0.02979575250574106 References . . [ 1 ] Czop , V . , D . Barbier , and S . Dong . " Pressure Drop , Void Fraction and Shear Stress Measurements in an Adiabatic Two - Phase Flow in a Coiled Tube . " Nuclear Engineering and Design 149 , no . 1 ( September 1 , 1994 ) : 323-33 . doi : 10.1016/0029-5493(94)90298-4. . . [ 2 ] El - Genk , Mohamed S . , and Timothy M . Schriener . " A Review and Correlations for Convection Heat Transfer and Pressure Losses in Toroidal and Helically Coiled Tubes . " Heat Transfer Engineering 0 , no . 0 ( June 7 , 2016 ) : 1-28 . doi : 10.1080/01457632.2016.1194693.'''
De = Dean ( Re = Re , Di = Di , D = Dc ) return 0.096 * De ** - 0.1517
def hash_nt_password_hash ( password_hash ) : """HashNtPasswordHash"""
md4_context = md4 . new ( ) md4_context . update ( password_hash ) return md4_context . digest ( )
def _region_from_key_id ( key_id , default_region = None ) : """Determine the target region from a key ID , falling back to a default region if provided . : param str key _ id : AWS KMS key ID : param str default _ region : Region to use if no region found in key _ id : returns : region name : rtype : str : raises UnknownRegionError : if no region found in key _ id and no default _ region provided"""
try : region_name = key_id . split ( ":" , 4 ) [ 3 ] except IndexError : if default_region is None : raise UnknownRegionError ( "No default region found and no region determinable from key id: {}" . format ( key_id ) ) region_name = default_region return region_name
def output_markdown ( markdown_cont , output_file ) : """Writes to an output file if ` outfile ` is a valid path ."""
if output_file : with open ( output_file , 'w' ) as out : out . write ( markdown_cont )
def new_rater ( self ) : """Action : add a new rater ."""
if self . annot is None : # remove if buttons are disabled self . parent . statusBar ( ) . showMessage ( 'No score file loaded' ) return newuser = NewUserDialog ( self . parent . value ( 'scoring_window' ) ) answer = newuser . exec_ ( ) if answer == QDialog . Rejected : return rater_name = newuser . rater_name . text ( ) if rater_name != '' : self . annot . add_rater ( rater_name , newuser . epoch_length . value ( ) ) self . display_notes ( ) self . parent . create_menubar ( )
def render_data_uri ( self , ** kwargs ) : """Output a base 64 encoded data uri"""
# Force protocol as data uri have none kwargs . setdefault ( 'force_uri_protocol' , 'https' ) return "data:image/svg+xml;charset=utf-8;base64,%s" % ( base64 . b64encode ( self . render ( ** kwargs ) ) . decode ( 'utf-8' ) . replace ( '\n' , '' ) )
def to_dict ( self ) : '''Return a : class : ` dict ` of errors with keys as the type of error and values as a list of errors . : returns dict : a dictionary of errors'''
errors = copy . deepcopy ( self . _errors ) for key , val in iteritems ( self . _errors ) : if not len ( val ) : errors . pop ( key ) return errors
def overrideable_partial ( func , * args , ** default_kwargs ) : """like partial , but given kwargs can be overrideden at calltime"""
import functools @ functools . wraps ( func ) def partial_wrapper ( * given_args , ** given_kwargs ) : kwargs = default_kwargs . copy ( ) kwargs . update ( given_kwargs ) return func ( * ( args + given_args ) , ** kwargs ) return partial_wrapper
def emit ( event , * args , ** kwargs ) : """Emit a SocketIO event . This function emits a SocketIO event to one or more connected clients . A JSON blob can be attached to the event as payload . This is a function that can only be called from a SocketIO event handler , as in obtains some information from the current client context . Example : : @ socketio . on ( ' my event ' ) def handle _ my _ custom _ event ( json ) : emit ( ' my response ' , { ' data ' : 42 } ) : param event : The name of the user event to emit . : param args : A dictionary with the JSON data to send as payload . : param namespace : The namespace under which the message is to be sent . Defaults to the namespace used by the originating event . A ` ` ' / ' ` ` can be used to explicitly specify the global namespace . : param callback : Callback function to invoke with the client ' s acknowledgement . : param broadcast : ` ` True ` ` to send the message to all clients , or ` ` False ` ` to only reply to the sender of the originating event . : param room : Send the message to all the users in the given room . If this argument is set , then broadcast is implied to be ` ` True ` ` . : param include _ self : ` ` True ` ` to include the sender when broadcasting or addressing a room , or ` ` False ` ` to send to everyone but the sender . : param ignore _ queue : Only used when a message queue is configured . If set to ` ` True ` ` , the event is emitted to the clients directly , without going through the queue . This is more efficient , but only works when a single server process is used , or when there is a single addresee . It is recommended to always leave this parameter with its default value of ` ` False ` ` ."""
if 'namespace' in kwargs : namespace = kwargs [ 'namespace' ] else : namespace = flask . request . namespace callback = kwargs . get ( 'callback' ) broadcast = kwargs . get ( 'broadcast' ) room = kwargs . get ( 'room' ) if room is None and not broadcast : room = flask . request . sid include_self = kwargs . get ( 'include_self' , True ) ignore_queue = kwargs . get ( 'ignore_queue' , False ) socketio = flask . current_app . extensions [ 'socketio' ] return socketio . emit ( event , * args , namespace = namespace , room = room , include_self = include_self , callback = callback , ignore_queue = ignore_queue )
def get_seqstarts ( bamfile , N ) : """Go through the SQ headers and pull out all sequences with size greater than the resolution settings , i . e . contains at least a few cells"""
import pysam bamfile = pysam . AlignmentFile ( bamfile , "rb" ) seqsize = { } for kv in bamfile . header [ "SQ" ] : if kv [ "LN" ] < 10 * N : continue seqsize [ kv [ "SN" ] ] = kv [ "LN" ] / N + 1 allseqs = natsorted ( seqsize . keys ( ) ) allseqsizes = np . array ( [ seqsize [ x ] for x in allseqs ] ) seqstarts = np . cumsum ( allseqsizes ) seqstarts = np . roll ( seqstarts , 1 ) total_bins = seqstarts [ 0 ] seqstarts [ 0 ] = 0 seqstarts = dict ( zip ( allseqs , seqstarts ) ) return seqstarts , seqsize , total_bins
def _get_rank_limits ( comm , arrlen ) : """Determine the chunk of the grid that has to be computed per process . The grid has been ' flattened ' and has arrlen length . The chunk assigned to each process depends on its rank in the MPI communicator . Parameters comm : MPI communicator object Describes topology of network : number of processes , rank arrlen : int Number of points in grid search . Returns begin : int Index , with respect to ' flattened ' grid , where the chunk for this process starts . end : int Index , with respect to ' flattened ' grid , where the chunk for this process ends ."""
rank = comm . Get_rank ( ) # Id of this process size = comm . Get_size ( ) # Total number of processes in communicator end = 0 # The scan should be done with ints , not floats ranklen = int ( arrlen / size ) if rank < arrlen % size : ranklen += 1 # Compute upper limit based on the sizes covered by the processes # with less rank end = comm . scan ( sendobj = ranklen , op = MPI . SUM ) begin = end - ranklen return ( begin , end )
def dataframe ( self , filtered_dims = { } , unstack = False , df_class = None , add_code = False ) : """Yield rows in a reduced format , with one dimension as an index , one measure column per secondary dimension , and all other dimensions filtered . : param measure : The column names of one or more measures : param p _ dim : The primary dimension . This will be the index of the dataframe . : param s _ dim : a secondary dimension . The returned frame will be unstacked on this dimension : param unstack : : param filtered _ dims : A dict of dimension columns names that are filtered , mapped to the dimension value to select . : param add _ code : When substitution a label for a column , also add the code value . : return :"""
measure = self . table . column ( measure ) p_dim = self . table . column ( p_dim ) assert measure assert p_dim if s_dim : s_dim = self . table . column ( s_dim ) from six import text_type def maybe_quote ( v ) : from six import string_types if isinstance ( v , string_types ) : return '"{}"' . format ( v ) else : return v all_dims = [ p_dim . name ] + filtered_dims . keys ( ) if s_dim : all_dims . append ( s_dim . name ) if filtered_dims : all_dims += filtered_dims . keys ( ) all_dims = [ text_type ( c ) for c in all_dims ] # " primary _ dimensions " means something different here , all of the dimensions in the # dataset that do not have children . primary_dims = [ text_type ( c . name ) for c in self . primary_dimensions ] if set ( all_dims ) != set ( primary_dims ) : raise ValueError ( "The primary, secondary and filtered dimensions must cover all dimensions" + " {} != {}" . format ( sorted ( all_dims ) , sorted ( primary_dims ) ) ) columns = [ ] p_dim_label = None s_dim_label = None if p_dim . label : # For geographic datasets , also need the gvid if p_dim . type_is_gvid : columns . append ( p_dim . name ) p_dim = p_dim_label = p_dim . label columns . append ( p_dim_label . name ) else : columns . append ( p_dim . name ) if s_dim : if s_dim . label : s_dim = s_dim_label = s_dim . label columns . append ( s_dim_label . name ) else : columns . append ( s_dim . name ) columns . append ( measure . name ) # Create the predicate to filter out the filtered dimensions if filtered_dims : code = ' and ' . join ( "row.{} == {}" . format ( k , maybe_quote ( v ) ) for k , v in filtered_dims . items ( ) ) predicate = eval ( 'lambda row: {}' . format ( code ) ) else : predicate = lambda row : True df = self . analysis . dataframe ( predicate , columns = columns , df_class = df_class ) if unstack : # Need to set the s _ dim in the index to get a hierarchical index , required for unstacking . # The final df will have only the p _ dim as an index . if s_dim : df = df . set_index ( [ p_dim . name , s_dim . name ] ) df = df . unstack ( ) df . columns = df . columns . get_level_values ( 1 ) # [ ' ' . join ( col ) . strip ( ) for col in df . columns . values ] else : # Can ' t actually unstack without a second dimension . df = df . set_index ( p_dim . name ) df . reset_index ( ) return df
def map ( cls , obj , mode = 'data' , backend = None ) : """Applies compositor operations to any HoloViews element or container using the map method ."""
from . overlay import CompositeOverlay element_compositors = [ c for c in cls . definitions if len ( c . _pattern_spec ) == 1 ] overlay_compositors = [ c for c in cls . definitions if len ( c . _pattern_spec ) > 1 ] if overlay_compositors : obj = obj . map ( lambda obj : cls . collapse_element ( obj , mode = mode , backend = backend ) , [ CompositeOverlay ] ) element_patterns = [ c . pattern for c in element_compositors ] if element_compositors and obj . traverse ( lambda x : x , element_patterns ) : obj = obj . map ( lambda obj : cls . collapse_element ( obj , mode = mode , backend = backend ) , element_patterns ) return obj
async def volume ( gc : GroupControl , volume ) : """Adjust volume [ - 100 , 100]"""
click . echo ( "Setting volume to %s" % volume ) click . echo ( await gc . set_group_volume ( volume ) )
def make_varname ( tree ) : """< left > tree < / left >"""
if tree . tag == 'identifier' : return tree . attrib [ 'name' ] if tree . tag in ( 'string' , 'boolean' ) : return tree . text if tree . tag == 'number' : return tree . attrib [ 'value' ] if tree . tag in ( 'property' , 'object' ) : return make_varname ( _xpath_one ( tree , '*' ) ) if tree . tag . endswith ( 'accessor' ) : kind = tree . tag [ : - len ( 'accessor' ) ] obj = make_varname ( _xpath_one ( tree , 'object' ) ) prop = make_varname ( _xpath_one ( tree , 'property' ) ) if kind == 'dot' : fmt = '%s.%s' elif kind == 'bracket' : fmt = '%s[%s]' else : raise ValueError ( "Unknown accessor: %s" % tree . tag ) return fmt % ( obj , prop ) raise ValueError ( "Unknown tag: %s" % tree . tag )
def get_strings ( soup , tag ) : """Get all the string children from an html tag ."""
tags = soup . find_all ( tag ) strings = [ s . string for s in tags if s . string ] return strings
def plotwrapper ( f ) : """This decorator allows for PyMC arguments of various types to be passed to the plotting functions . It identifies the type of object and locates its trace ( s ) , then passes the data to the wrapped plotting function ."""
def wrapper ( pymc_obj , * args , ** kwargs ) : start = 0 if 'start' in kwargs : start = kwargs . pop ( 'start' ) # Figure out what type of object it is try : # First try Model type for variable in pymc_obj . _variables_to_tally : # Plot object if variable . _plot is not False : data = pymc_obj . trace ( variable . __name__ ) [ start : ] if size ( data [ - 1 ] ) >= 10 and variable . _plot != True : continue elif variable . dtype is dtype ( 'object' ) : continue name = variable . __name__ if args : name = '%s_%s' % ( args [ 0 ] , variable . __name__ ) f ( data , name , * args , ** kwargs ) return except AttributeError : pass try : # Then try Trace type data = pymc_obj ( ) [ : ] name = pymc_obj . name f ( data , name , * args , ** kwargs ) return except ( AttributeError , TypeError ) : pass try : # Then try Node type if pymc_obj . _plot is not False : data = pymc_obj . trace ( ) [ start : ] # This is deprecated . DH name = pymc_obj . __name__ f ( data , name , * args , ** kwargs ) return except AttributeError : pass if isinstance ( pymc_obj , dict ) : # Then try dictionary for i in pymc_obj : data = pymc_obj [ i ] [ start : ] if args : i = '%s_%s' % ( args [ 0 ] , i ) elif 'name' in kwargs : i = '%s_%s' % ( kwargs . pop ( 'name' ) , i ) f ( data , i , * args , ** kwargs ) return # If others fail , assume that raw data is passed f ( pymc_obj , * args , ** kwargs ) wrapper . __doc__ = f . __doc__ wrapper . __name__ = f . __name__ return wrapper
def _create_binary_mathfunction ( name , doc = "" ) : """Create a binary mathfunction by name"""
def _ ( col1 , col2 ) : sc = SparkContext . _active_spark_context # For legacy reasons , the arguments here can be implicitly converted into floats , # if they are not columns or strings . if isinstance ( col1 , Column ) : arg1 = col1 . _jc elif isinstance ( col1 , basestring ) : arg1 = _create_column_from_name ( col1 ) else : arg1 = float ( col1 ) if isinstance ( col2 , Column ) : arg2 = col2 . _jc elif isinstance ( col2 , basestring ) : arg2 = _create_column_from_name ( col2 ) else : arg2 = float ( col2 ) jc = getattr ( sc . _jvm . functions , name ) ( arg1 , arg2 ) return Column ( jc ) _ . __name__ = name _ . __doc__ = doc return _
def init ( ) : """Initialize foreground and background attributes ."""
global _default_foreground , _default_background , _default_style try : attrs = GetConsoleScreenBufferInfo ( ) . wAttributes except ( ArgumentError , WindowsError ) : _default_foreground = GREY _default_background = BLACK _default_style = NORMAL else : _default_foreground = attrs & 7 _default_background = ( attrs >> 4 ) & 7 _default_style = attrs & BRIGHT
def get_app_main_kwargs ( self , kwargs , keep = False ) : """Extract the keyword arguments for the : meth : ` app _ main ` method Parameters kwargs : dict A mapping containing keyword arguments for the : meth : ` app _ main ` method keep : bool If True , the keywords are kept in the ` kwargs ` . Otherwise , they are removed Returns dict The keyword arguments for the : meth : ` app _ main ` method Notes The returned keyword arguments are deleted from ` kwargs `"""
if not keep : return { key : kwargs . pop ( key ) for key in list ( kwargs ) if key in inspect . getargspec ( self . app_main ) [ 0 ] } else : return { key : kwargs [ key ] for key in list ( kwargs ) if key in inspect . getargspec ( self . app_main ) [ 0 ] }
def close_sids ( self , rec , trt , mag ) : """: param rec : a record with fields minlon , minlat , maxlon , maxlat : param trt : tectonic region type string : param mag : magnitude : returns : the site indices within the bounding box enlarged by the integration distance for the given TRT and magnitude"""
if self . sitecol is None : return [ ] elif not self . integration_distance : # do not filter return self . sitecol . sids if hasattr ( rec , 'dtype' ) : bbox = rec [ 'minlon' ] , rec [ 'minlat' ] , rec [ 'maxlon' ] , rec [ 'maxlat' ] else : bbox = rec # assume it is a 4 - tuple maxdist = self . integration_distance ( trt , mag ) a1 = min ( maxdist * KM_TO_DEGREES , 90 ) a2 = min ( angular_distance ( maxdist , bbox [ 1 ] , bbox [ 3 ] ) , 180 ) bb = bbox [ 0 ] - a2 , bbox [ 1 ] - a1 , bbox [ 2 ] + a2 , bbox [ 3 ] + a1 if hasattr ( self , 'index' ) : # RtreeFilter return within ( bb , self . index ) return self . sitecol . within_bbox ( bb )
def read_from_file ( self , filename ) : """Read from an existing json file . : param filename : The file to be written to . : type filename : basestring , str : returns : Success status . - 1 for unsuccessful 0 for success : rtype : int"""
if not exists ( filename ) : return - 1 with open ( filename ) as fd : needs_json = fd . read ( ) try : minimum_needs = json . loads ( needs_json ) except ( TypeError , ValueError ) : minimum_needs = None if not minimum_needs : return - 1 return self . update_minimum_needs ( minimum_needs )
def response ( credentials , password , request ) : """Compile digest auth response If the qop directive ' s value is " auth " or " auth - int " , then compute the response as follows : RESPONSE = MD5 ( HA1 : nonce : nonceCount : clienNonce : qop : HA2) Else if the qop directive is unspecified , then compute the response as follows : RESPONSE = MD5 ( HA1 : nonce : HA2) Arguments : - ` credentials ` : credentials dict - ` password ` : request user password - ` request ` : request dict"""
response = None algorithm = credentials . get ( 'algorithm' ) HA1_value = HA1 ( credentials . get ( 'realm' ) , credentials . get ( 'username' ) , password , algorithm ) HA2_value = HA2 ( credentials , request , algorithm ) if credentials . get ( 'qop' ) is None : response = H ( b":" . join ( [ HA1_value . encode ( 'utf-8' ) , credentials . get ( 'nonce' , '' ) . encode ( 'utf-8' ) , HA2_value . encode ( 'utf-8' ) ] ) , algorithm ) elif credentials . get ( 'qop' ) == 'auth' or credentials . get ( 'qop' ) == 'auth-int' : for k in 'nonce' , 'nc' , 'cnonce' , 'qop' : if k not in credentials : raise ValueError ( "%s required for response H" % k ) response = H ( b":" . join ( [ HA1_value . encode ( 'utf-8' ) , credentials . get ( 'nonce' ) . encode ( 'utf-8' ) , credentials . get ( 'nc' ) . encode ( 'utf-8' ) , credentials . get ( 'cnonce' ) . encode ( 'utf-8' ) , credentials . get ( 'qop' ) . encode ( 'utf-8' ) , HA2_value . encode ( 'utf-8' ) ] ) , algorithm ) else : raise ValueError ( "qop value are wrong" ) return response
def png ( contents , kvs ) : """Creates a png if needed ."""
outfile = os . path . join ( IMAGEDIR , sha ( contents + str ( kvs [ 'staffsize' ] ) ) ) src = outfile + '.png' if not os . path . isfile ( src ) : try : os . mkdir ( IMAGEDIR ) stderr . write ( 'Created directory ' + IMAGEDIR + '\n' ) except OSError : pass ly2png ( contents , outfile , kvs ) stderr . write ( 'Created image ' + src + '\n' ) return src
def checkPermissions ( permissions = [ ] , obj = None ) : """Checks if a user has permissions for a given object . Args : permissions : The permissions the current user must be compliant with obj : The object for which the permissions apply Returns : 1 if the user complies with all the permissions for the given object . Otherwise , it returns empty ."""
if not obj : return False sm = getSecurityManager ( ) for perm in permissions : if not sm . checkPermission ( perm , obj ) : return '' return True
def engage ( args , password ) : """Construct payloads and POST to Red October"""
if args [ 'create' ] : payload = { 'Name' : args [ '--user' ] , 'Password' : password } goodquit_json ( api_call ( 'create' , args , payload ) ) elif args [ 'delegate' ] : payload = { 'Name' : args [ '--user' ] , 'Password' : password , 'Time' : args [ '--time' ] , 'Uses' : args [ '--uses' ] } goodquit_json ( api_call ( 'delegate' , args , payload ) ) elif args [ 'encrypt' ] : payload = { 'Name' : args [ '--user' ] , 'Password' : password , 'Minimum' : args [ '--min' ] , 'Owners' : args [ '--owners' ] . split ( ',' ) , 'Data' : ( args [ '--str' ] if args [ '--file' ] is None else read_file ( args [ '--file' ] ) ) } goodquit_json ( api_call ( 'encrypt' , args , payload ) ) elif args [ 'decrypt' ] : payload = { 'Name' : args [ '--user' ] , 'Password' : password , 'Data' : ( args [ '--str' ] if args [ '--file' ] is None else read_file ( args [ '--file' ] ) ) } goodquit_json ( api_call ( 'decrypt' , args , payload ) ) elif args [ 'summary' ] : payload = { 'Name' : args [ '--user' ] , 'Password' : password } goodquit_json ( api_call ( 'summary' , args , payload ) ) elif args [ 'change-password' ] : args [ 'newpass' ] = getpass . getpass ( 'New Password: ' ) payload = { 'Name' : args [ '--user' ] , 'Password' : password , 'NewPassword' : args [ 'newpass' ] } goodquit_json ( api_call ( 'password' , args , payload ) ) elif args [ 'modify' ] : payload = { 'Name' : args [ '--user' ] , 'Password' : password , 'Command' : args [ '--action' ] , 'ToModify' : args [ '--target' ] } goodquit_json ( api_call ( 'modify' , args , payload ) )
def _get_labels ( self , frequency , subject_id = None , visit_id = None ) : """Returns the labels for the XNAT subject and sessions given the frequency and provided IDs ."""
if frequency == 'per_session' : subj_label = '{}_{}' . format ( self . project_id , subject_id ) sess_label = '{}_{}_{}' . format ( self . project_id , subject_id , visit_id ) elif frequency == 'per_subject' : subj_label = '{}_{}' . format ( self . project_id , subject_id ) sess_label = '{}_{}_{}' . format ( self . project_id , subject_id , self . SUMMARY_NAME ) elif frequency == 'per_visit' : subj_label = '{}_{}' . format ( self . project_id , self . SUMMARY_NAME ) sess_label = '{}_{}_{}' . format ( self . project_id , self . SUMMARY_NAME , visit_id ) elif frequency == 'per_study' : subj_label = '{}_{}' . format ( self . project_id , self . SUMMARY_NAME ) sess_label = '{}_{}_{}' . format ( self . project_id , self . SUMMARY_NAME , self . SUMMARY_NAME ) else : assert False return ( subj_label , sess_label )
def plot ( network , margin = 0.05 , ax = None , geomap = True , projection = None , bus_colors = 'b' , line_colors = 'g' , bus_sizes = 10 , line_widths = 2 , title = "" , line_cmap = None , bus_cmap = None , boundaries = None , geometry = False , branch_components = [ 'Line' , 'Link' ] , jitter = None , basemap = None ) : """Plot the network buses and lines using matplotlib and Basemap . Parameters margin : float Margin at the sides as proportion of distance between max / min x , y ax : matplotlib ax , defaults to plt . gca ( ) Axis to which to plot the network geomap : bool / str , default True Switch to use Basemap or Cartopy ( depends on what is installed ) . If string is passed , it will be used as a resolution argument . For Basemap users ' c ' ( crude ) , ' l ' ( low ) , ' i ' ( intermediate ) , ' h ' ( high ) , ' f ' ( full ) are valid resolutions options . For Cartopy users ' 10m ' , ' 50m ' , ' 110m ' are valid resolutions options . projection : cartopy . crs . Projection , defaults to None Define the projection of your geomap , only valid if cartopy is installed . If None ( default ) is passed the projection for cartropy is set to cartopy . crs . PlateCarree bus _ colors : dict / pandas . Series Colors for the buses , defaults to " b " bus _ sizes : dict / pandas . Series Sizes of bus points , defaults to 10 line _ colors : dict / pandas . Series Colors for the lines , defaults to " g " for Lines and " cyan " for Links . Colors for branches other than Lines can be specified using a pandas Series with a MultiIndex . line _ widths : dict / pandas . Series Widths of lines , defaults to 2 . Widths for branches other than Lines can be specified using a pandas Series with a MultiIndex . title : string Graph title line _ cmap : plt . cm . ColorMap / str | dict If line _ colors are floats , this color map will assign the colors . Use a dict to specify colormaps for more than one branch type . bus _ cmap : plt . cm . ColorMap / str If bus _ colors are floats , this color map will assign the colors boundaries : list of four floats Boundaries of the plot in format [ x1 , x2 , y1 , y2] branch _ components : list of str Branch components to be plotted , defaults to Line and Link . jitter : None | float Amount of random noise to add to bus positions to distinguish overlapping buses Returns bus _ collection , branch _ collection1 , . . . : tuple of Collections Collections for buses and branches ."""
defaults_for_branches = { 'Link' : dict ( color = "cyan" , width = 2 ) , 'Line' : dict ( color = "b" , width = 2 ) , 'Transformer' : dict ( color = 'green' , width = 2 ) } if not plt_present : logger . error ( "Matplotlib is not present, so plotting won't work." ) return if basemap is not None : logger . warning ( "argument `basemap` is deprecated, " "use `geomap` instead." ) geomap = basemap if cartopy_present and geomap : if projection is None : projection = get_projection_from_crs ( network . srid ) if ax is None : ax = plt . gca ( projection = projection ) else : assert isinstance ( ax , cartopy . mpl . geoaxes . GeoAxesSubplot ) , ( 'The passed axis is not a GeoAxesSubplot. You can ' 'create one with: \nimport cartopy.crs as ccrs \n' 'fig, ax = plt.subplots(' 'subplot_kw={"projection":ccrs.PlateCarree()})' ) elif ax is None : ax = plt . gca ( ) x , y = network . buses [ "x" ] , network . buses [ "y" ] if jitter is not None : x = x + np . random . uniform ( low = - jitter , high = jitter , size = len ( x ) ) y = y + np . random . uniform ( low = - jitter , high = jitter , size = len ( y ) ) if geomap : transform = draw_map ( network , x , y , ax , boundaries , margin , geomap ) else : transform = ax . transData if isinstance ( bus_sizes , pd . Series ) and isinstance ( bus_sizes . index , pd . MultiIndex ) : # We are drawing pies to show all the different shares assert len ( bus_sizes . index . levels [ 0 ] . difference ( network . buses . index ) ) == 0 , "The first MultiIndex level of bus_sizes must contain buses" assert ( isinstance ( bus_colors , dict ) and set ( bus_colors ) . issuperset ( bus_sizes . index . levels [ 1 ] ) ) , "bus_colors must be a dictionary defining a color for each element " "in the second MultiIndex level of bus_sizes" bus_sizes = bus_sizes . sort_index ( level = 0 , sort_remaining = False ) * projected_area_factor ( ax , network . srid ) ** 2 patches = [ ] for b_i in bus_sizes . index . levels [ 0 ] : s = bus_sizes . loc [ b_i ] radius = s . sum ( ) ** 0.5 if radius == 0.0 : ratios = s else : ratios = s / s . sum ( ) start = 0.25 for i , ratio in ratios . iteritems ( ) : patches . append ( Wedge ( ( x . at [ b_i ] , y . at [ b_i ] ) , radius , 360 * start , 360 * ( start + ratio ) , facecolor = bus_colors [ i ] ) ) start += ratio bus_collection = PatchCollection ( patches , match_original = True , transform = transform ) ax . add_collection ( bus_collection ) else : c = pd . Series ( bus_colors , index = network . buses . index ) s = pd . Series ( bus_sizes , index = network . buses . index , dtype = "float" ) . fillna ( 10 ) bus_collection = ax . scatter ( x , y , c = c , s = s , cmap = bus_cmap , edgecolor = 'face' , transform = transform ) def as_branch_series ( ser ) : if isinstance ( ser , dict ) and set ( ser ) . issubset ( branch_components ) : return pd . Series ( ser ) elif isinstance ( ser , pd . Series ) : if isinstance ( ser . index , pd . MultiIndex ) : return ser index = ser . index ser = ser . values else : index = network . lines . index return pd . Series ( ser , index = pd . MultiIndex ( levels = ( [ "Line" ] , index ) , codes = ( np . zeros ( len ( index ) ) , np . arange ( len ( index ) ) ) ) ) line_colors = as_branch_series ( line_colors ) line_widths = as_branch_series ( line_widths ) if not isinstance ( line_cmap , dict ) : line_cmap = { 'Line' : line_cmap } branch_collections = [ ] for c in network . iterate_components ( branch_components ) : l_defaults = defaults_for_branches [ c . name ] l_widths = line_widths . get ( c . name , l_defaults [ 'width' ] ) l_nums = None l_colors = line_colors . get ( c . name , l_defaults [ 'color' ] ) if isinstance ( l_colors , pd . Series ) : if issubclass ( l_colors . dtype . type , np . number ) : l_nums = l_colors l_colors = None else : l_colors . fillna ( l_defaults [ 'color' ] , inplace = True ) if not geometry : segments = ( np . asarray ( ( ( c . df . bus0 . map ( x ) , c . df . bus0 . map ( y ) ) , ( c . df . bus1 . map ( x ) , c . df . bus1 . map ( y ) ) ) ) . transpose ( 2 , 0 , 1 ) ) else : from shapely . wkt import loads from shapely . geometry import LineString linestrings = c . df . geometry . map ( loads ) assert all ( isinstance ( ls , LineString ) for ls in linestrings ) , ( "The WKT-encoded geometry in the 'geometry' column must be " "composed of LineStrings" ) segments = np . asarray ( list ( linestrings . map ( np . asarray ) ) ) l_collection = LineCollection ( segments , linewidths = l_widths , antialiaseds = ( 1 , ) , colors = l_colors , transOffset = ax . transData , transform = transform ) if l_nums is not None : l_collection . set_array ( np . asarray ( l_nums ) ) l_collection . set_cmap ( line_cmap . get ( c . name , None ) ) l_collection . autoscale ( ) ax . add_collection ( l_collection ) l_collection . set_zorder ( 1 ) branch_collections . append ( l_collection ) bus_collection . set_zorder ( 2 ) ax . update_datalim ( compute_bbox_with_margins ( margin , x , y ) ) ax . autoscale_view ( ) if geomap : if cartopy_present : ax . outline_patch . set_visible ( False ) ax . axis ( 'off' ) ax . set_title ( title ) return ( bus_collection , ) + tuple ( branch_collections )
def padded_cross_entropy_loss ( logits , labels , smoothing , vocab_size ) : """Calculate cross entropy loss while ignoring padding . Args : logits : Tensor of size [ batch _ size , length _ logits , vocab _ size ] labels : Tensor of size [ batch _ size , length _ labels ] smoothing : Label smoothing constant , used to determine the on and off values vocab _ size : int size of the vocabulary Returns : Returns a float32 tensor with shape [ batch _ size , max ( length _ logits , length _ labels ) ]"""
with tf . name_scope ( "loss" , [ logits , labels ] ) : logits , labels = _pad_tensors_to_same_length ( logits , labels ) # Calculate smoothing cross entropy with tf . name_scope ( "smoothing_cross_entropy" , [ logits , labels ] ) : confidence = 1.0 - smoothing low_confidence = ( 1.0 - confidence ) / tf . to_float ( vocab_size - 1 ) soft_targets = tf . one_hot ( tf . cast ( labels , tf . int32 ) , depth = vocab_size , on_value = confidence , off_value = low_confidence ) xentropy = tf . nn . softmax_cross_entropy_with_logits_v2 ( logits = logits , labels = soft_targets ) # Calculate the best ( lowest ) possible value of cross entropy , and # subtract from the cross entropy loss . normalizing_constant = - ( confidence * tf . log ( confidence ) + tf . to_float ( vocab_size - 1 ) * low_confidence * tf . log ( low_confidence + 1e-20 ) ) xentropy -= normalizing_constant weights = tf . to_float ( tf . not_equal ( labels , 0 ) ) return xentropy * weights , weights
def _mean_dict ( dict_list ) : """Compute the mean value across a list of dictionaries"""
return { k : np . array ( [ d [ k ] for d in dict_list ] ) . mean ( ) for k in dict_list [ 0 ] . keys ( ) }
def _encodeRelativeValidityPeriod ( validityPeriod ) : """Encodes the specified relative validity period timedelta into an integer for use in an SMS PDU ( based on the table in section 9.2.3.12 of GSM 03.40) : param validityPeriod : The validity period to encode : type validityPeriod : datetime . timedelta : rtype : int"""
# Python 2.6 does not have timedelta . total _ seconds ( ) , so compute it manually # seconds = validityPeriod . total _ seconds ( ) seconds = validityPeriod . seconds + ( validityPeriod . days * 24 * 3600 ) if seconds <= 43200 : # 12 hours tpVp = int ( seconds / 300 ) - 1 # divide by 5 minutes , subtract 1 elif seconds <= 86400 : # 24 hours tpVp = int ( ( seconds - 43200 ) / 1800 ) + 143 # subtract 12 hours , divide by 30 minutes . add 143 elif validityPeriod . days <= 30 : # 30 days tpVp = validityPeriod . days + 166 # amount of days + 166 elif validityPeriod . days <= 441 : # max value of tpVp is 255 tpVp = int ( validityPeriod . days / 7 ) + 192 # amount of weeks + 192 else : raise ValueError ( 'Validity period too long; tpVp limited to 1 octet (max value: 255)' ) return tpVp
def AddService ( self , new_service ) : """Add a new service to the list of ones we know about . Args : new _ service ( WindowsService ) : the service to add ."""
for service in self . _services : if new_service == service : # If this service is the same as one we already know about , we # just want to add where it came from . service . sources . append ( new_service . sources [ 0 ] ) return # We only add a new object to our list if we don ' t have # an identical one already . self . _services . append ( new_service )
def close ( self , code : int = None , reason : str = None ) -> None : """Closes the websocket connection . ` ` code ` ` and ` ` reason ` ` are documented under ` WebSocketHandler . close ` . . . versionadded : : 3.2 . . versionchanged : : 4.0 Added the ` ` code ` ` and ` ` reason ` ` arguments ."""
if self . protocol is not None : self . protocol . close ( code , reason ) self . protocol = None
def interpolate_delays ( augmented_stop_times , dist_threshold , delay_threshold = 3600 , delay_cols = None ) : """Given an augment stop times DataFrame as output by the function : func : ` build _ augmented _ stop _ times ` , a distance threshold ( float ) in the same units as the ` ` ' shape _ dist _ traveled ' ` ` column of ` ` augmented _ stop _ times ` ` , if that column is present , and a delay threshold ( integer number of seconds ) , alter the delay values of the augmented stop times as follows . Drop all delays with absolute value more than ` ` delay _ threshold ` ` seconds . For each trip and for each delay type ( arrival delay or departure delay ) do the following . If the trip has all null values for the delay type , then leave the values as is . Otherwise : - If the first delay is more than ` ` dist _ threshold ` ` distance units from the first stop , then set the first stop delay to zero ( charitably ) ; otherwise set the first stop delay to the first delay . - If the last delay is more than ` ` dist _ threshold ` ` distance units from the last stop , then set the last stop delay to zero ( charitably ) ; otherwise set the last stop delay to the last delay . - Linearly interpolate the remaining stop delays by distance . Return the resulting DataFrame . If a list of delay column names is given in ` ` delay _ cols ` ` , then alter those columns instead of the ` ` arrival _ delay ` ` and ` ` departure _ delay ` ` columns . This is useful if the given stop times have extra delay columns ."""
f = augmented_stop_times . copy ( ) if delay_cols is None or not set ( delay_cols ) <= set ( f . columns ) : delay_cols = [ 'arrival_delay' , 'departure_delay' ] # Return f if nothing to do if 'shape_dist_traveled' not in f . columns or not f [ 'shape_dist_traveled' ] . notnull ( ) . any ( ) or all ( [ f [ col ] . count ( ) == f [ col ] . shape [ 0 ] for col in delay_cols ] ) : return f # Nullify fishy delays for col in delay_cols : f . loc [ abs ( f [ col ] ) > delay_threshold , col ] = np . nan # Fill null delays def fill ( group ) : # Only columns that have at least one nonnull value . fill_cols = [ ] for col in delay_cols : if group [ col ] . count ( ) >= 1 : fill_cols . append ( col ) for col in fill_cols : # Set first and last delays for i in [ 0 , - 1 ] : j = group [ col ] . dropna ( ) . index [ i ] dist_diff = ( abs ( group [ 'shape_dist_traveled' ] . iat [ i ] - group [ 'shape_dist_traveled' ] . ix [ j ] ) ) if dist_diff > dist_threshold : group [ col ] . iat [ i ] = 0 else : group [ col ] . iat [ i ] = group [ col ] . ix [ j ] # Interpolate remaining delays ind = np . where ( group [ col ] . notnull ( ) ) [ 0 ] group [ col ] = np . interp ( group [ 'shape_dist_traveled' ] , group . iloc [ ind ] [ 'shape_dist_traveled' ] , group . iloc [ ind ] [ col ] ) return group f = f . groupby ( 'trip_id' ) . apply ( fill ) # Round f [ delay_cols ] = f [ delay_cols ] . round ( 0 ) return f
def reset ( self ) : """Reset the value to the default"""
if self . resetable : for i in range ( len ( self ) ) : self [ i ] = self . default
def get_platform_settings ( ) : """Returns the content of ` settings . PLATFORMS ` with a twist . The platforms settings was created to stay compatible with the old way of declaring the FB configuration , in order not to break production bots . This function will convert the legacy configuration into the new configuration if required . As a result , it should be the only used way to access the platform configuration ."""
s = settings . PLATFORMS if hasattr ( settings , 'FACEBOOK' ) and settings . FACEBOOK : s . append ( { 'class' : 'bernard.platforms.facebook.platform.Facebook' , 'settings' : settings . FACEBOOK , } ) return s
def _compute_f0_factor ( self , rrup ) : """Compute and return factor f0 - see equation ( 5 ) , 6th term , p . 2191."""
# f0 = max ( log10 ( R0 / rrup ) , 0) f0 = np . log10 ( self . COEFFS_IMT_INDEPENDENT [ 'R0' ] / rrup ) f0 [ f0 < 0 ] = 0.0 return f0
def auto_close ( self , state = None ) : """Get or set automatic TCP close mode ( after each request ) : param state : auto _ close state or None for get value : type state : bool or None : returns : auto _ close state or None if set fail : rtype : bool or None"""
if state is None : return self . __auto_close self . __auto_close = bool ( state ) return self . __auto_close
def get_queryset ( self ) : """Returns a custom : class : ` QuerySet ` which provides the CTE functionality for all queries concerning : class : ` CTENode ` objects . This method overrides the default : meth : ` get _ queryset ` method of the : class : ` Manager ` class . : returns : a custom : class : ` QuerySet ` which provides the CTE functionality for all queries concerning : class : ` CTENode ` objects ."""
# The CTEQuerySet uses _ cte _ node _ * attributes from the Model , so ensure # they exist . self . _ensure_parameters ( ) return CTEQuerySet ( self . model , using = self . _db )
def _set_get_mpls_ldp_session_brief ( self , v , load = False ) : """Setter method for get _ mpls _ ldp _ session _ brief , mapped from YANG variable / brocade _ mpls _ rpc / get _ mpls _ ldp _ session _ brief ( rpc ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ get _ mpls _ ldp _ session _ brief is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ get _ mpls _ ldp _ session _ brief ( ) directly ."""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = get_mpls_ldp_session_brief . get_mpls_ldp_session_brief , is_leaf = True , yang_name = "get-mpls-ldp-session-brief" , rest_name = "get-mpls-ldp-session-brief" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'full' , u'actionpoint' : u'showMplsLdpSession' } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls' , defining_module = 'brocade-mpls' , yang_type = 'rpc' , is_config = True ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """get_mpls_ldp_session_brief must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=get_mpls_ldp_session_brief.get_mpls_ldp_session_brief, is_leaf=True, yang_name="get-mpls-ldp-session-brief", rest_name="get-mpls-ldp-session-brief", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsLdpSession'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""" , } ) self . __get_mpls_ldp_session_brief = t if hasattr ( self , '_set' ) : self . _set ( )
def sumai ( array ) : """Return the sum of the elements of an integer array . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / sumai _ c . html : param array : Input Array . : type array : Array of ints : return : The sum of the array . : rtype : int"""
n = ctypes . c_int ( len ( array ) ) array = stypes . toIntVector ( array ) return libspice . sumai_c ( array , n )
def parse_MML ( self , mml ) : '''parse the MML structure'''
hashes_c = [ ] mentions_c = [ ] soup = BeautifulSoup ( mml , "lxml" ) hashes = soup . find_all ( 'hash' , { "tag" : True } ) for hashe in hashes : hashes_c . append ( hashe [ 'tag' ] ) mentions = soup . find_all ( 'mention' , { "uid" : True } ) for mention in mentions : mentions_c . append ( mention [ 'uid' ] ) msg_string = soup . messageml . text . strip ( ) self . logger . debug ( '%s : %s : %s' % ( hashes_c , mentions_c , msg_string ) ) return hashes_c , mentions_c , msg_string
def get_gateway_addr ( ) : """Use netifaces to get the gateway address , if we can ' t import it then fall back to a hack to obtain the current gateway automatically , since Python has no interface to sysctl ( ) . This may or may not be the gateway we should be contacting . It does not guarantee correct results . This function requires the presence of netstat on the path on POSIX and NT ."""
try : import netifaces return netifaces . gateways ( ) [ "default" ] [ netifaces . AF_INET ] [ 0 ] except ImportError : shell_command = 'netstat -rn' if os . name == "posix" : pattern = re . compile ( '(?:default|0\.0\.0\.0|::/0)\s+([\w\.:]+)\s+.*UG' ) elif os . name == "nt" : if platform . version ( ) . startswith ( "6.1" ) : pattern = re . compile ( ".*?0.0.0.0[ ]+0.0.0.0[ ]+(.*?)[ ]+?.*?\n" ) else : pattern = re . compile ( ".*?Default Gateway:[ ]+(.*?)\n" ) system_out = os . popen ( shell_command , 'r' ) . read ( ) if not system_out : raise NATPMPNetworkError ( NATPMP_GATEWAY_CANNOT_FIND , error_str ( NATPMP_GATEWAY_CANNOT_FIND ) ) match = pattern . search ( system_out ) if not match : raise NATPMPNetworkError ( NATPMP_GATEWAY_CANNOT_FIND , error_str ( NATPMP_GATEWAY_CANNOT_FIND ) ) addr = match . groups ( ) [ 0 ] . strip ( ) return addr
def get_turbine_types ( print_out = True ) : r"""Get the names of all possible wind turbine types for which the power coefficient curve or power curve is provided in the OpenEnergy Data Base ( oedb ) . Parameters print _ out : boolean Directly prints a tabular containing the turbine types in column ' turbine _ type ' , the manufacturer in column ' manufacturer ' and information about whether a power ( coefficient ) curve exists ( True ) or not ( False ) in columns ' has _ power _ curve ' and ' has _ cp _ curve ' . Default : True . Returns curves _ df : pd . DataFrame Contains turbine types in column ' turbine _ type ' , the manufacturer in column ' manufacturer ' and information about whether a power ( coefficient ) curve exists ( True ) or not ( False ) in columns ' has _ power _ curve ' and ' has _ cp _ curve ' . Examples > > > from windpowerlib import wind _ turbine > > > df = wind _ turbine . get _ turbine _ types ( print _ out = False ) > > > print ( df [ df [ " turbine _ type " ] . str . contains ( " E - 126 " ) ] . iloc [ 0 ] ) manufacturer Enercon turbine _ type E - 126/4200 has _ power _ curve True has _ cp _ curve True Name : 5 , dtype : object > > > print ( df [ df [ " manufacturer " ] . str . contains ( " Enercon " ) ] . iloc [ 0 ] ) manufacturer Enercon turbine _ type E - 101/3050 has _ power _ curve True has _ cp _ curve True Name : 1 , dtype : object"""
df = load_turbine_data_from_oedb ( ) cp_curves_df = df . iloc [ df . loc [ df [ 'has_cp_curve' ] ] . index ] [ [ 'manufacturer' , 'turbine_type' , 'has_cp_curve' ] ] p_curves_df = df . iloc [ df . loc [ df [ 'has_power_curve' ] ] . index ] [ [ 'manufacturer' , 'turbine_type' , 'has_power_curve' ] ] curves_df = pd . merge ( p_curves_df , cp_curves_df , how = 'outer' , sort = True ) . fillna ( False ) if print_out : pd . set_option ( 'display.max_rows' , len ( curves_df ) ) print ( curves_df ) pd . reset_option ( 'display.max_rows' ) return curves_df
def symbol_bollinger ( symbol = 'GOOG' , start = datetime . datetime ( 2008 , 1 , 1 ) , end = datetime . datetime ( 2009 , 12 , 31 ) , price_type = 'close' , cleaner = clean_dataframe , window = 20 , sigma = 1. ) : """Calculate the Bolinger indicator value > > > symbol _ bollinger ( " goog " , ' 2008-1-1 ' , ' 2008-2-1 ' ) [ - 1 ] # doctest : + ELLIPSIS , + NORMALIZE _ WHITESPACE -1.8782 . . ."""
symbols = normalize_symbols ( symbol ) prices = price_dataframe ( symbols , start = start , end = end , price_type = price_type , cleaner = cleaner ) return series_bollinger ( prices [ symbols [ 0 ] ] , window = window , sigma = sigma , plot = False )
def chain ( args ) : """% prog chain bedfile Chain BED segments together ."""
p = OptionParser ( chain . __doc__ ) p . add_option ( "--dist" , default = 100000 , help = "Chaining distance" ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args cmd = "sort -k4,4 -k1,1 -k2,2n -k3,3n {0} -o {0}" . format ( bedfile ) sh ( cmd ) bed = Bed ( bedfile , sorted = False ) newbed = Bed ( ) for accn , bb in groupby ( bed , key = lambda x : x . accn ) : bb = list ( bb ) g = Grouper ( ) for a in bb : g . join ( a ) for a , b in pairwise ( bb ) : if a . seqid == b . seqid and b . start - a . end < opts . dist : g . join ( a , b ) data = [ ] for p in g : seqid = p [ 0 ] . seqid start = min ( x . start for x in p ) end = max ( x . end for x in p ) score = sum ( x . span for x in p ) data . append ( ( seqid , start - 1 , end , accn , score ) ) d = max ( data , key = lambda x : x [ - 1 ] ) newbed . append ( BedLine ( "\t" . join ( str ( x ) for x in d ) ) ) newbed . print_to_file ( opts . outfile , sorted = True )
def _resolve_process_count ( self ) : """Calculate amount of process resources . : return : Nothing , adds results to self . _ process _ count"""
length = len ( [ d for d in self . _dut_requirements if d . get ( "type" ) == "process" ] ) self . _process_count = length
def get_model_config_value ( self , obj , name ) : """Get config value for given model"""
config = models_config . get_config ( obj ) return getattr ( config , name )
def _reset_syslog_config_params ( host , username , password , cmd , resets , valid_resets , protocol = None , port = None , esxi_host = None , credstore = None ) : '''Helper function for reset _ syslog _ config that resets the config and populates the return dictionary .'''
ret_dict = { } all_success = True if not isinstance ( resets , list ) : resets = [ resets ] for reset_param in resets : if reset_param in valid_resets : ret = salt . utils . vmware . esxcli ( host , username , password , cmd + reset_param , protocol = protocol , port = port , esxi_host = esxi_host , credstore = credstore ) ret_dict [ reset_param ] = { } ret_dict [ reset_param ] [ 'success' ] = ret [ 'retcode' ] == 0 if ret [ 'retcode' ] != 0 : all_success = False ret_dict [ reset_param ] [ 'message' ] = ret [ 'stdout' ] else : all_success = False ret_dict [ reset_param ] = { } ret_dict [ reset_param ] [ 'success' ] = False ret_dict [ reset_param ] [ 'message' ] = 'Invalid syslog ' 'configuration parameter' ret_dict [ 'success' ] = all_success return ret_dict
def time_average_vel ( self , depth ) : """Calculate the time - average velocity . Parameters depth : float Depth over which the average velocity is computed . Returns avg _ vel : float Time averaged velocity ."""
depths = [ l . depth for l in self ] # Final layer is infinite and is treated separately travel_times = [ 0 ] + [ l . travel_time for l in self [ : - 1 ] ] # If needed , add the final layer to the required depth if depths [ - 1 ] < depth : depths . append ( depth ) travel_times . append ( ( depth - self [ - 1 ] . depth ) / self [ - 1 ] . shear_vel ) total_travel_times = np . cumsum ( travel_times ) # Interpolate the travel time to the depth of interest avg_shear_vel = depth / np . interp ( depth , depths , total_travel_times ) return avg_shear_vel
def process ( self , quoted = False ) : '''Parse an URL'''
self . p = urlparse ( self . raw ) self . scheme = self . p . scheme self . netloc = self . p . netloc self . opath = self . p . path if not quoted else quote ( self . p . path ) self . path = [ x for x in self . opath . split ( '/' ) if x ] self . params = self . p . params self . query = parse_qs ( self . p . query , keep_blank_values = True ) self . fragment = self . p . fragment