signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def in_subnet ( cidr , addr = None ) : '''Returns True if host or ( any of ) addrs is within specified subnet , otherwise False'''
try : cidr = ipaddress . ip_network ( cidr ) except ValueError : log . error ( 'Invalid CIDR \'%s\'' , cidr ) return False if addr is None : addr = ip_addrs ( ) addr . extend ( ip_addrs6 ( ) ) elif not isinstance ( addr , ( list , tuple ) ) : addr = ( addr , ) return any ( ipaddress . ip_address ( item ) in cidr for item in addr )
def make_file_url ( self , share_name , directory_name , file_name , protocol = None , sas_token = None ) : '''Creates the url to access a file . : param str share _ name : Name of share . : param str directory _ name : The path to the directory . : param str file _ name : Name of file . : param str protocol : Protocol to use : ' http ' or ' https ' . If not specified , uses the protocol specified when FileService was initialized . : param str sas _ token : Shared access signature token created with generate _ shared _ access _ signature . : return : file access URL . : rtype : str'''
if directory_name is None : url = '{}://{}/{}/{}' . format ( protocol or self . protocol , self . primary_endpoint , share_name , file_name , ) else : url = '{}://{}/{}/{}/{}' . format ( protocol or self . protocol , self . primary_endpoint , share_name , directory_name , file_name , ) if sas_token : url += '?' + sas_token return url
def uploadfile ( baseurl , filename , format_ , token , nonce , cert , method = requests . post ) : """Uploads file ( given by ` filename ` ) to server at ` baseurl ` . ` sesson _ key ` and ` nonce ` are string values that get passed as POST parameters ."""
filehash = sha1sum ( filename ) files = { 'filedata' : open ( filename , 'rb' ) } payload = { 'sha1' : filehash , 'filename' : os . path . basename ( filename ) , 'token' : token , 'nonce' : nonce , } return method ( "%s/sign/%s" % ( baseurl , format_ ) , files = files , data = payload , verify = cert )
def focusedWindow ( cls ) : """Returns a Region corresponding to whatever window is in the foreground"""
x , y , w , h = PlatformManager . getWindowRect ( PlatformManager . getForegroundWindow ( ) ) return Region ( x , y , w , h )
def _encode_uuid ( name , value , dummy , opts ) : """Encode uuid . UUID ."""
uuid_representation = opts . uuid_representation # Python Legacy Common Case if uuid_representation == OLD_UUID_SUBTYPE : return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value . bytes # Java Legacy elif uuid_representation == JAVA_LEGACY : from_uuid = value . bytes data = from_uuid [ 0 : 8 ] [ : : - 1 ] + from_uuid [ 8 : 16 ] [ : : - 1 ] return b"\x05" + name + b'\x10\x00\x00\x00\x03' + data # C # legacy elif uuid_representation == CSHARP_LEGACY : # Microsoft GUID representation . return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value . bytes_le # New else : return b"\x05" + name + b'\x10\x00\x00\x00\x04' + value . bytes
def depthwise_convolution ( inp , kernel , pad = None , stride = None , dilation = None , multiplier = 1 , w_init = None , b_init = None , base_axis = 1 , fix_parameters = False , rng = None , with_bias = True ) : """N - D Depthwise Convolution with a bias term . Reference : - F . Chollet : Chollet , Francois . " Xception : Deep Learning with Depthwise Separable Convolutions . https : / / arxiv . org / abs / 1610.02357 Args : inp ( ~ nnabla . Variable ) : N - D array . kernel ( : obj : ` tuple ` of : obj : ` int ` ) : Convolution kernel size . For example , to apply convolution on an image with a 3 ( height ) by 5 ( width ) two - dimensional kernel , specify ( 3,5 ) . pad ( : obj : ` tuple ` of : obj : ` int ` ) : Padding sizes for dimensions . stride ( : obj : ` tuple ` of : obj : ` int ` ) : Stride sizes for dimensions . dilation ( : obj : ` tuple ` of : obj : ` int ` ) : Dilation sizes for dimensions . multiplier ( : obj : ` int ` ) : Number of output feature maps per input feature map . w _ init ( : obj : ` nnabla . initializer . BaseInitializer ` or : obj : ` numpy . ndarray ` ) : Initializer for weight . By default , it is initialized with : obj : ` nnabla . initializer . UniformInitializer ` within the range determined by : obj : ` nnabla . initializer . calc _ uniform _ lim _ glorot ` . b _ init ( : obj : ` nnabla . initializer . BaseInitializer ` or : obj : ` numpy . ndarray ` ) : Initializer for bias . By default , it is initialized with zeros if ` with _ bias ` is ` True ` . base _ axis ( int ) : Dimensions up to ` base _ axis ` are treated as the sample dimensions . fix _ parameters ( bool ) : When set to ` True ` , the weights and biases will not be updated . rng ( numpy . random . RandomState ) : Random generator for Initializer . with _ bias ( bool ) : Specify whether to include the bias term . Returns : : class : ` ~ nnabla . Variable ` : N - D array . See : obj : ` ~ nnabla . functions . depthwise _ convolution ` for the output shape ."""
if w_init is None : w_init = UniformInitializer ( calc_uniform_lim_glorot ( inp . shape [ base_axis ] * multiplier , inp . shape [ base_axis ] , tuple ( kernel ) ) , rng = rng ) if with_bias and b_init is None : b_init = ConstantInitializer ( ) w = get_parameter_or_create ( "W" , ( inp . shape [ base_axis ] * multiplier , ) + tuple ( kernel ) , w_init , True , not fix_parameters ) b = None if with_bias : b = get_parameter_or_create ( "b" , ( inp . shape [ base_axis ] * multiplier , ) , b_init , True , not fix_parameters ) return F . depthwise_convolution ( inp , w , b , base_axis , pad , stride , dilation , multiplier )
def update ( self , response_headers ) : """Update the state of the rate limiter based on the response headers . This method should only be called following a HTTP request to reddit . Response headers that do not contain x - ratelimit fields will be treated as a single request . This behavior is to error on the safe - side as such responses should trigger exceptions that indicate invalid behavior ."""
if "x-ratelimit-remaining" not in response_headers : if self . remaining is not None : self . remaining -= 1 self . used += 1 return now = time . time ( ) prev_remaining = self . remaining seconds_to_reset = int ( response_headers [ "x-ratelimit-reset" ] ) self . remaining = float ( response_headers [ "x-ratelimit-remaining" ] ) self . used = int ( response_headers [ "x-ratelimit-used" ] ) self . reset_timestamp = now + seconds_to_reset if self . remaining <= 0 : self . next_request_timestamp = self . reset_timestamp return if prev_remaining is not None and prev_remaining > self . remaining : estimated_clients = prev_remaining - self . remaining else : estimated_clients = 1.0 self . next_request_timestamp = min ( self . reset_timestamp , now + ( estimated_clients * seconds_to_reset / self . remaining ) , )
def sort_items ( self , items , args = False ) : """Sort the ` self ` ' s contents , as contained in the list ` items ` as specified in ` self ` ' s meta - data ."""
if self . settings [ 'sort' ] . lower ( ) == 'src' : return def alpha ( i ) : return i . name def permission ( i ) : if args : if i . intent == 'in' : return 'b' if i . intent == 'inout' : return 'c' if i . intent == 'out' : return 'd' if i . intent == '' : return 'e' perm = getattr ( i , 'permission' , '' ) if perm == 'public' : return 'b' if perm == 'protected' : return 'c' if perm == 'private' : return 'd' return 'a' def permission_alpha ( i ) : return permission ( i ) + '-' + i . name def itype ( i ) : if i . obj == 'variable' : retstr = i . vartype if retstr == 'class' : retstr = 'type' if i . kind : retstr = retstr + '-' + str ( i . kind ) if i . strlen : retstr = retstr + '-' + str ( i . strlen ) if i . proto : retstr = retstr + '-' + i . proto [ 0 ] return retstr elif i . obj == 'proc' : if i . proctype != 'Function' : return i . proctype . lower ( ) else : return i . proctype . lower ( ) + '-' + itype ( i . retvar ) else : return i . obj def itype_alpha ( i ) : return itype ( i ) + '-' + i . name if self . settings [ 'sort' ] . lower ( ) == 'alpha' : items . sort ( key = alpha ) elif self . settings [ 'sort' ] . lower ( ) == 'permission' : items . sort ( key = permission ) elif self . settings [ 'sort' ] . lower ( ) == 'permission-alpha' : items . sort ( key = permission_alpha ) elif self . settings [ 'sort' ] . lower ( ) == 'type' : items . sort ( key = itype ) elif self . settings [ 'sort' ] . lower ( ) == 'type-alpha' : items . sort ( key = itype_alpha )
def _surfdens ( self , R , z , phi = 0. , t = 0. ) : """NAME : _ surfdens PURPOSE : evaluate the surface density for this potential INPUT : R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT : the surface density HISTORY : 2018-08-19 - Written - Bovy ( UofT )"""
r = numpy . sqrt ( R ** 2. + z ** 2. ) x = r / self . a Rpa = numpy . sqrt ( R ** 2. + self . a ** 2. ) Rma = numpy . sqrt ( R ** 2. - self . a ** 2. + 0j ) if Rma == 0 : za = z / self . a return self . a ** 2. / 2. * ( ( 2. - 2. * numpy . sqrt ( za ** 2. + 1 ) + numpy . sqrt ( 2. ) * za * numpy . arctan ( za / numpy . sqrt ( 2. ) ) ) / z + numpy . sqrt ( 2 * za ** 2. + 2. ) * numpy . arctanh ( za / numpy . sqrt ( 2. * ( za ** 2. + 1 ) ) ) / numpy . sqrt ( self . a ** 2. + z ** 2. ) ) else : return self . a ** 2. * ( numpy . arctan ( z / x / Rma ) / Rma + numpy . arctanh ( z / x / Rpa ) / Rpa - numpy . arctan ( z / Rma ) / Rma + numpy . arctan ( z / Rpa ) / Rpa ) . real
def hex_repr ( d ) : """> > > hex _ repr ( { " A " : 0x1 , " B " : 0xabc } ) ' A = $ 01 B = $ 0abc '"""
txt = [ ] for k , v in sorted ( d . items ( ) ) : if isinstance ( v , int ) : txt . append ( "%s=%s" % ( k , nice_hex ( v ) ) ) else : txt . append ( "%s=%s" % ( k , v ) ) return " " . join ( txt )
def resource_spec ( opts ) : """Return the spec ( URL , package spec , file , etc ) for a named resource ."""
resources = _load ( opts . resources , opts . output_dir ) if opts . resource_name not in resources : sys . stderr . write ( 'Invalid resource name: {}\n' . format ( opts . resource_name ) ) return 1 print ( resources [ opts . resource_name ] . spec )
def setup_container_system_config ( basedir , mountdir = None ) : """Create a minimal system configuration for use in a container . @ param basedir : The directory where the configuration files should be placed as bytes . @ param mountdir : If present , bind mounts to the configuration files will be added below this path ( given as bytes ) ."""
etc = os . path . join ( basedir , b"etc" ) if not os . path . exists ( etc ) : os . mkdir ( etc ) for file , content in CONTAINER_ETC_FILE_OVERRIDE . items ( ) : # Create " basedir / etc / file " util . write_file ( content , etc , file ) if mountdir : # Create bind mount to " mountdir / etc / file " make_bind_mount ( os . path . join ( etc , file ) , os . path . join ( mountdir , b"etc" , file ) , private = True ) os . symlink ( b"/proc/self/mounts" , os . path . join ( etc , b"mtab" ) )
def check_smart_storage_config_ids ( self ) : """Check SmartStorageConfig controllers is there in hardware . : raises : IloError , on an error from iLO ."""
if self . smart_storage_config_identities is None : msg = ( 'The Redfish controller failed to get the ' 'SmartStorageConfig controller configurations.' ) LOG . debug ( msg ) raise exception . IloError ( msg )
def add_argument ( self , parser , bootstrap = False ) : """Add boolean item as an argument to the given parser . An exclusive group is created on the parser , which will add a boolean - style command line argument to the parser . Examples : A non - nested boolean value with the name ' debug ' will result in a command - line argument like the following : ' - - debug / - - no - debug ' Args : parser ( argparse . ArgumentParser ) : The parser to add this item to . bootstrap ( bool ) : Flag to indicate whether you only want to mark this item as required or not ."""
tmp_default = self . default exclusive_grp = parser . add_mutually_exclusive_group ( ) self . default = True args = self . _get_argparse_names ( parser . prefix_chars ) kwargs = self . _get_argparse_kwargs ( bootstrap ) exclusive_grp . add_argument ( * args , ** kwargs ) self . default = False args = self . _get_argparse_names ( parser . prefix_chars ) kwargs = self . _get_argparse_kwargs ( bootstrap ) exclusive_grp . add_argument ( * args , ** kwargs ) self . default = tmp_default
def get_setting ( name , default ) : """A little helper for fetching global settings with a common prefix ."""
parent_name = "CMSPLUGIN_NEWS_{0}" . format ( name ) return getattr ( django_settings , parent_name , default )
def gabc ( elem , doc ) : """Handle gabc file inclusion and gabc code block ."""
if type ( elem ) == Code and "gabc" in elem . classes : if doc . format == "latex" : if elem . identifier == "" : label = "" else : label = '\\label{' + elem . identifier + '}' return latex ( "\n\\smallskip\n{%\n" + latexsnippet ( '\\gregorioscore{' + elem . text + '}' , elem . attributes ) + "%\n}" + label ) else : infile = elem . text + ( '.gabc' if '.gabc' not in elem . text else '' ) with open ( infile , 'r' ) as doc : code = doc . read ( ) . split ( '%%\n' ) [ 1 ] return Image ( png ( elem . text , latexsnippet ( '\\gregorioscore' , elem . attributes ) ) ) elif type ( elem ) == CodeBlock and "gabc" in elem . classes : if doc . format == "latex" : if elem . identifier == "" : label = "" else : label = '\\label{' + elem . identifier + '}' return latexblock ( "\n\\smallskip\n{%\n" + latexsnippet ( '\\gabcsnippet{' + elem . text + '}' , elem . attributes ) + "%\n}" + label ) else : return Para ( Image ( url = png ( elem . text , latexsnippet ( '\\gabcsnippet' , elem . attributes ) ) ) )
def ball_and_sticks ( self , ball_radius = 0.05 , stick_radius = 0.02 , colorlist = None , opacity = 1.0 ) : """Display the system using a ball and stick representation ."""
# Add the spheres if colorlist is None : colorlist = [ get_atom_color ( t ) for t in self . topology [ 'atom_types' ] ] sizes = [ ball_radius ] * len ( self . topology [ 'atom_types' ] ) spheres = self . add_representation ( 'spheres' , { 'coordinates' : self . coordinates . astype ( 'float32' ) , 'colors' : colorlist , 'radii' : sizes , 'opacity' : opacity } ) def update ( self = self , spheres = spheres ) : self . update_representation ( spheres , { 'coordinates' : self . coordinates . astype ( 'float32' ) } ) self . update_callbacks . append ( update ) # Add the cylinders if 'bonds' in self . topology and self . topology [ 'bonds' ] is not None : start_idx , end_idx = zip ( * self . topology [ 'bonds' ] ) # Added this so bonds don ' t go through atoms when opacity < 1.0 new_start_coords = [ ] new_end_coords = [ ] for bond_ind , bond in enumerate ( self . topology [ 'bonds' ] ) : trim_amt = ( ball_radius ** 2 - stick_radius ** 2 ) ** 0.5 if ball_radius > stick_radius else 0 start_coord = self . coordinates [ bond [ 0 ] ] end_coord = self . coordinates [ bond [ 1 ] ] vec = ( end_coord - start_coord ) / np . linalg . norm ( end_coord - start_coord ) new_start_coords . append ( start_coord + vec * trim_amt ) new_end_coords . append ( end_coord - vec * trim_amt ) cylinders = self . add_representation ( 'cylinders' , { 'startCoords' : np . array ( new_start_coords , dtype = 'float32' ) , 'endCoords' : np . array ( new_end_coords , dtype = 'float32' ) , 'colors' : [ 0xcccccc ] * len ( new_start_coords ) , 'radii' : [ stick_radius ] * len ( new_start_coords ) , 'opacity' : opacity } ) # Update closure def update ( self = self , rep = cylinders , start_idx = start_idx , end_idx = end_idx ) : self . update_representation ( rep , { 'startCoords' : self . coordinates [ list ( start_idx ) ] , 'endCoords' : self . coordinates [ list ( end_idx ) ] } ) self . update_callbacks . append ( update ) self . autozoom ( self . coordinates )
def parse_PISCES_output ( pisces_output , path = False ) : """Takes the output list of a PISCES cull and returns in a usable dictionary . Notes Designed for outputs of protein sequence redundancy culls conducted using the PISCES server . http : / / dunbrack . fccc . edu / PISCES . php G . Wang and R . L . Dunbrack , Jr . PISCES : a protein sequence culling server . Bioinformatics , 19:1589-1591 , 2003. Parameters pisces _ output : str or path Output list of non - redundant protein chains from PISCES , or path to text file . path : bool True if path given rather than string . Returns pisces _ dict : dict Data output by PISCES in dictionary form ."""
pisces_dict = { } if path : pisces_path = Path ( pisces_output ) pisces_content = pisces_path . read_text ( ) . splitlines ( ) [ 1 : ] else : pisces_content = pisces_output . splitlines ( ) [ 1 : ] for line in pisces_content : pdb = line . split ( ) [ 0 ] [ : 4 ] . lower ( ) chain = line . split ( ) [ 0 ] [ - 1 ] pdb_dict = { 'length' : line . split ( ) [ 1 ] , 'method' : line . split ( ) [ 2 ] , 'resolution' : line . split ( ) [ 3 ] , 'R-factor' : line . split ( ) [ 4 ] , 'R-free' : line . split ( ) [ 5 ] } if pdb in pisces_dict : pisces_dict [ pdb ] [ 'chains' ] . append ( chain ) else : pdb_dict [ 'chains' ] = [ chain ] pisces_dict [ pdb ] = pdb_dict return pisces_dict
def align_lines ( line_list , character = '=' , replchar = None , pos = 0 ) : r"""Left justifies text on the left side of character align _ lines TODO : clean up and move to ubelt ? Args : line _ list ( list of strs ) : character ( str ) : pos ( int or list or None ) : does one alignment for all chars beyond this column position . If pos is None , then all chars are aligned . Returns : list : new _ lines CommandLine : python - m utool . util _ str - - test - align _ lines : 0 python - m utool . util _ str - - test - align _ lines : 1 python - m utool . util _ str - - test - align _ lines : 2 python - m utool . util _ str - - test - align _ lines : 3 Example0: > > > # ENABLE _ DOCTEST > > > from utool . util _ str import * # NOQA > > > line _ list = ' a = b \ none = two \ nthree = fish ' . split ( ' \ n ' ) > > > character = ' = ' > > > new _ lines = align _ lines ( line _ list , character ) > > > result = ( ' \ n ' . join ( new _ lines ) ) > > > print ( result ) a = b one = two three = fish Example1: > > > # ENABLE _ DOCTEST > > > from utool . util _ str import * # NOQA > > > line _ list = ' foofish : \ n a = b \ n one = two \ n three = fish ' . split ( ' \ n ' ) > > > character = ' = ' > > > new _ lines = align _ lines ( line _ list , character ) > > > result = ( ' \ n ' . join ( new _ lines ) ) > > > print ( result ) foofish : a = b one = two three = fish Example2: > > > # ENABLE _ DOCTEST > > > from utool . util _ str import * # NOQA > > > import utool as ut > > > character = ' : ' > > > text = ut . codeblock ( ' ' ' { ' max ' : ' 1970/01/01 02:30:13 ' , ' mean ' : ' 1970/01/01 01:10:15 ' , ' min ' : ' 1970/01/01 00:01:41 ' , ' range ' : ' 2:28:32 ' , ' std ' : ' 1:13:57 ' , } ' ' ' ) . split ( ' \ n ' ) > > > new _ lines = align _ lines ( text , ' : ' , ' : ' ) > > > result = ' \ n ' . join ( new _ lines ) > > > print ( result ) { ' max ' : ' 1970/01/01 02:30:13 ' , ' mean ' : ' 1970/01/01 01:10:15 ' , ' min ' : ' 1970/01/01 00:01:41 ' , ' range ' : ' 2:28:32 ' , ' std ' : ' 1:13:57 ' , } Example3: > > > # ENABLE _ DOCEST > > > from utool . util _ str import * # NOQA > > > line _ list = ' foofish : \ n a = b = c \ n one = two = three \ nthree = 4 = fish ' . split ( ' \ n ' ) > > > character = ' = ' > > > # align the second occurence of a character > > > new _ lines = align _ lines ( line _ list , character , pos = None ) > > > print ( ( ' \ n ' . join ( line _ list ) ) ) > > > result = ( ' \ n ' . join ( new _ lines ) ) > > > print ( result ) foofish : a = b = c one = two = three three = 4 = fish Ignore : # use this as test case \ begin { tabular } { lrrll } \ toprule { } & Names & Annots & Annots size & Training Edges \ \ \ midrule training & 390 & 1164 & 2.98 \ pm2.83 & 9360 \ \ testing & 363 & 1119 & 3.08 \ pm2.82 & - \ \ \ bottomrule \ end { tabular }"""
# FIXME : continue to fix ansi if pos is None : # Align all occurences num_pos = max ( [ line . count ( character ) for line in line_list ] ) pos = list ( range ( num_pos ) ) # Allow multiple alignments if isinstance ( pos , list ) : pos_list = pos # recursive calls new_lines = line_list for pos in pos_list : new_lines = align_lines ( new_lines , character = character , replchar = replchar , pos = pos ) return new_lines # base case if replchar is None : replchar = character # the pos - th character to align lpos = pos rpos = lpos + 1 tup_list = [ line . split ( character ) for line in line_list ] handle_ansi = True if handle_ansi : # Remove ansi from length calculation # References : http : / / stackoverflow . com / questions / 14693701remove - ansi ansi_escape = re . compile ( r'\x1b[^m]*m' ) # Find how much padding is needed maxlen = 0 for tup in tup_list : if len ( tup ) >= rpos + 1 : if handle_ansi : tup = [ ansi_escape . sub ( '' , x ) for x in tup ] left_lenlist = list ( map ( len , tup [ 0 : rpos ] ) ) left_len = sum ( left_lenlist ) + lpos * len ( replchar ) maxlen = max ( maxlen , left_len ) # Pad each line to align the pos - th occurence of the chosen character new_lines = [ ] for tup in tup_list : if len ( tup ) >= rpos + 1 : lhs = character . join ( tup [ 0 : rpos ] ) rhs = character . join ( tup [ rpos : ] ) # pad the new line with requested justification newline = lhs . ljust ( maxlen ) + replchar + rhs new_lines . append ( newline ) else : new_lines . append ( replchar . join ( tup ) ) return new_lines
def SampleMemoryUsage ( self , parser_name ) : """Takes a sample of the memory usage for profiling . Args : parser _ name ( str ) : name of the parser ."""
if self . _memory_profiler : used_memory = self . _process_information . GetUsedMemory ( ) or 0 self . _memory_profiler . Sample ( parser_name , used_memory )
def can_create_bin_with_record_types ( self , bin_record_types ) : """Tests if this user can create a single ` ` Bin ` ` using the desired record types . While ` ` ResourceManager . getBinRecordTypes ( ) ` ` can be used to examine which records are supported , this method tests which record ( s ) are required for creating a specific ` ` Bin ` ` . Providing an empty array tests if a ` ` Bin ` ` can be created with no records . arg : bin _ record _ types ( osid . type . Type [ ] ) : array of bin record types return : ( boolean ) - ` ` true ` ` if ` ` Bin ` ` creation using the specified ` ` Types ` ` is supported , ` ` false ` ` otherwise raise : NullArgument - ` ` bin _ record _ types ` ` is ` ` null ` ` * compliance : mandatory - - This method must be implemented . *"""
# Implemented from template for # osid . resource . BinAdminSession . can _ create _ bin _ with _ record _ types # NOTE : It is expected that real authentication hints will be # handled in a service adapter above the pay grade of this impl . if self . _catalog_session is not None : return self . _catalog_session . can_create_catalog_with_record_types ( catalog_record_types = bin_record_types ) return True
def from_csv ( cls , path : PathOrStr , folder : PathOrStr = None , label_delim : str = None , csv_labels : PathOrStr = 'labels.csv' , valid_pct : float = 0.2 , fn_col : int = 0 , label_col : int = 1 , suffix : str = '' , delimiter : str = None , header : Optional [ Union [ int , str ] ] = 'infer' , ** kwargs : Any ) -> 'ImageDataBunch' : "Create from a csv file in ` path / csv _ labels ` ."
path = Path ( path ) df = pd . read_csv ( path / csv_labels , header = header , delimiter = delimiter ) return cls . from_df ( path , df , folder = folder , label_delim = label_delim , valid_pct = valid_pct , fn_col = fn_col , label_col = label_col , suffix = suffix , ** kwargs )
def rollback ( self , number = 0 ) : """Will rollback the configuration to a previous state . Can be called also when : param number : How many steps back in the configuration history must look back . : raise pyPluribus . exceptions . RollbackError : In case the configuration cannot be rolled back ."""
if number < 0 : raise pyPluribus . exceptions . RollbackError ( "Please provide a positive number to rollback to!" ) available_configs = len ( self . _config_history ) max_rollbacks = available_configs - 2 if max_rollbacks < 0 : raise pyPluribus . exceptions . RollbackError ( "Cannot rollback: \ not enough configration history available!" ) if max_rollbacks > 0 and number > max_rollbacks : raise pyPluribus . exceptions . RollbackError ( "Cannot rollback more than {cfgs} configurations!\ " . format ( cfgs = max_rollbacks ) ) config_location = 1 # will load the initial config worst case ( user never commited , but wants to discard ) if max_rollbacks > 0 : # in case of previous commit ( s ) will be able to load a specific configuration config_location = available_configs - number - 1 # stored in location len ( ) - rollabck _ nb - 1 # covers also the case of discard uncommitted changes ( rollback 0) desired_config = self . _config_history [ config_location ] try : self . _upload_config_content ( desired_config , rollbacked = True ) except pyPluribus . exceptions . ConfigLoadError as loaderr : raise pyPluribus . exceptions . RollbackError ( "Cannot rollback: {err}" . format ( err = loaderr ) ) del self . _config_history [ ( config_location + 1 ) : ] # delete all newer configurations than the config rolled back self . _last_working_config = desired_config self . _committed = True self . _config_changed = False return True
def get_results ( ) : """Parse all search result pages ."""
base = "http://www.smackjeeves.com/search.php?submit=Search+for+Webcomics&search_mode=webcomics&comic_title=&special=all&last_update=3&style_all=on&genre_all=on&format_all=on&sort_by=2&start=%d" session = requests . Session ( ) # store info in a dictionary { name - > url , number of comics , adult flag , bounce flag } res = { } # a search for an empty string returned 286 result pages result_pages = 286 print ( "Parsing" , result_pages , "search result pages..." , file = sys . stderr ) for i in range ( 0 , result_pages ) : print ( i + 1 , file = sys . stderr , end = " " ) handle_url ( base % ( i * 12 ) , session , res ) save_result ( res , json_file )
def from_dict ( data , ctx ) : """Instantiate a new Transaction from a dict ( generally from loading a JSON response ) . The data used to instantiate the Transaction is a shallow copy of the dict passed in , with any complex child types instantiated appropriately ."""
type = data . get ( "type" ) if type == "MARKET_ORDER" : return MarketOrderTransaction . from_dict ( data , ctx ) if type == "ORDER_FILL" : return OrderFillTransaction . from_dict ( data , ctx ) if type == "ORDER_CANCEL" : return OrderCancelTransaction . from_dict ( data , ctx ) if type == "MARKET_ORDER_REJECT" : return MarketOrderRejectTransaction . from_dict ( data , ctx ) if type == "TRADE_CLIENT_EXTENSIONS_MODIFY" : return TradeClientExtensionsModifyTransaction . from_dict ( data , ctx ) if type == "TRADE_CLIENT_EXTENSIONS_MODIFY_REJECT" : return TradeClientExtensionsModifyRejectTransaction . from_dict ( data , ctx ) if type == "TAKE_PROFIT_ORDER" : return TakeProfitOrderTransaction . from_dict ( data , ctx ) if type == "STOP_LOSS_ORDER" : return StopLossOrderTransaction . from_dict ( data , ctx ) if type == "TRAILING_STOP_LOSS_ORDER" : return TrailingStopLossOrderTransaction . from_dict ( data , ctx ) if type == "ORDER_CANCEL_REJECT" : return OrderCancelRejectTransaction . from_dict ( data , ctx ) if type == "TAKE_PROFIT_ORDER_REJECT" : return TakeProfitOrderRejectTransaction . from_dict ( data , ctx ) if type == "STOP_LOSS_ORDER_REJECT" : return StopLossOrderRejectTransaction . from_dict ( data , ctx ) if type == "TRAILING_STOP_LOSS_ORDER_REJECT" : return TrailingStopLossOrderRejectTransaction . from_dict ( data , ctx ) if type == "CLIENT_CONFIGURE" : return ClientConfigureTransaction . from_dict ( data , ctx ) if type == "CLIENT_CONFIGURE_REJECT" : return ClientConfigureRejectTransaction . from_dict ( data , ctx ) if type == "CREATE" : return CreateTransaction . from_dict ( data , ctx ) if type == "CLOSE" : return CloseTransaction . from_dict ( data , ctx ) if type == "REOPEN" : return ReopenTransaction . from_dict ( data , ctx ) if type == "TRANSFER_FUNDS" : return TransferFundsTransaction . from_dict ( data , ctx ) if type == "TRANSFER_FUNDS_REJECT" : return TransferFundsRejectTransaction . from_dict ( data , ctx ) if type == "FIXED_PRICE_ORDER" : return FixedPriceOrderTransaction . from_dict ( data , ctx ) if type == "LIMIT_ORDER" : return LimitOrderTransaction . from_dict ( data , ctx ) if type == "LIMIT_ORDER_REJECT" : return LimitOrderRejectTransaction . from_dict ( data , ctx ) if type == "STOP_ORDER" : return StopOrderTransaction . from_dict ( data , ctx ) if type == "STOP_ORDER_REJECT" : return StopOrderRejectTransaction . from_dict ( data , ctx ) if type == "MARKET_IF_TOUCHED_ORDER" : return MarketIfTouchedOrderTransaction . from_dict ( data , ctx ) if type == "MARKET_IF_TOUCHED_ORDER_REJECT" : return MarketIfTouchedOrderRejectTransaction . from_dict ( data , ctx ) if type == "ORDER_CLIENT_EXTENSIONS_MODIFY" : return OrderClientExtensionsModifyTransaction . from_dict ( data , ctx ) if type == "ORDER_CLIENT_EXTENSIONS_MODIFY_REJECT" : return OrderClientExtensionsModifyRejectTransaction . from_dict ( data , ctx ) if type == "MARGIN_CALL_ENTER" : return MarginCallEnterTransaction . from_dict ( data , ctx ) if type == "MARGIN_CALL_EXTEND" : return MarginCallExtendTransaction . from_dict ( data , ctx ) if type == "MARGIN_CALL_EXIT" : return MarginCallExitTransaction . from_dict ( data , ctx ) if type == "DELAYED_TRADE_CLOSURE" : return DelayedTradeClosureTransaction . from_dict ( data , ctx ) if type == "DAILY_FINANCING" : return DailyFinancingTransaction . from_dict ( data , ctx ) if type == "RESET_RESETTABLE_PL" : return ResetResettablePLTransaction . from_dict ( data , ctx ) data = data . copy ( ) return Transaction ( ** data )
def doubleClick ( x = None , y = None , interval = 0.0 , button = 'left' , duration = 0.0 , tween = linear , pause = None , _pause = True ) : """Performs a double click . This is a wrapper function for click ( ' left ' , x , y , 2 , interval ) . The x and y parameters detail where the mouse event happens . If None , the current mouse position is used . If a float value , it is rounded down . If outside the boundaries of the screen , the event happens at edge of the screen . Args : x ( int , float , None , tuple , optional ) : The x position on the screen where the click happens . None by default . If tuple , this is used for x and y . If x is a str , it ' s considered a filename of an image to find on the screen with locateOnScreen ( ) and click the center of . y ( int , float , None , optional ) : The y position on the screen where the click happens . None by default . interval ( float , optional ) : The number of seconds in between each click , if the number of clicks is greater than 1 . 0.0 by default , for no pause in between clicks . button ( str , int , optional ) : The mouse button clicked . Must be one of ' left ' , ' middle ' , ' right ' ( or 1 , 2 , or 3 ) respectively . ' left ' by default . Returns : None Raises : ValueError : If button is not one of ' left ' , ' middle ' , ' right ' , 1 , 2 , 3 , 4, 5 , 6 , or 7"""
_failSafeCheck ( ) # Multiple clicks work different in OSX if sys . platform == 'darwin' : x , y = _unpackXY ( x , y ) _mouseMoveDrag ( 'move' , x , y , 0 , 0 , duration = 0 , tween = None ) x , y = platformModule . _position ( ) platformModule . _multiClick ( x , y , button , 2 ) else : click ( x , y , 2 , interval , button , _pause = False ) _autoPause ( pause , _pause )
def set_codes ( self , codes ) : '''Set the country code map for the data . Codes given in a list . i . e . DE - Germany AT - Austria US - United States'''
codemap = '' for cc in codes : cc = cc . upper ( ) if cc in self . __ccodes : codemap += cc else : raise UnknownCountryCodeException ( cc ) self . codes = codemap
def run ( self ) : """Load all artists into the database"""
df = ArtistsInputData ( ) . load ( ) # rename columns df . rename ( columns = { 'artistLabel' : 'name' , 'genderLabel' : 'gender' } , inplace = True ) # attribute columns that exist in the data model attribute_columns = [ 'name' , 'wiki_id' ] # the extended model also stores the date of birth and gender if config . EXTENDED : attribute_columns += [ 'gender' , 'year_of_birth' ] # store entities and attributes self . store ( df , attribute_columns ) self . done ( )
def _register_info ( self , server ) : """Write a TensorBoardInfo file and arrange for its cleanup . Args : server : The result of ` self . _ make _ server ( ) ` ."""
server_url = urllib . parse . urlparse ( server . get_url ( ) ) info = manager . TensorBoardInfo ( version = version . VERSION , start_time = int ( time . time ( ) ) , port = server_url . port , pid = os . getpid ( ) , path_prefix = self . flags . path_prefix , logdir = self . flags . logdir , db = self . flags . db , cache_key = self . cache_key , ) atexit . register ( manager . remove_info_file ) manager . write_info_file ( info )
def cmd ( send , msg , args ) : """Ping something . Syntax : { command } < target >"""
if not msg : send ( "Ping what?" ) return channel = args [ 'target' ] if args [ 'target' ] != 'private' else args [ 'nick' ] # CTCP PING if "." not in msg and ":" not in msg : targets = set ( msg . split ( ) ) if len ( targets ) > 3 : send ( "Please specify three or fewer people to ping." ) return for target in targets : if not re . match ( args [ 'config' ] [ 'core' ] [ 'nickregex' ] , target ) : send ( "Invalid nick %s" % target ) else : args [ 'handler' ] . ping_map [ target ] = channel args [ 'handler' ] . connection . ctcp ( "PING" , target , " " . join ( str ( time ( ) ) . split ( '.' ) ) ) return try : answer = subprocess . check_output ( [ args [ 'name' ] , '-W' , '1' , '-c' , '1' , msg ] , stderr = subprocess . STDOUT ) answer = answer . decode ( ) . splitlines ( ) send ( answer [ 0 ] ) send ( answer [ 1 ] ) except subprocess . CalledProcessError as e : if e . returncode == 2 : send ( "ping: unknown host " + msg ) elif e . returncode == 1 : send ( e . output . decode ( ) . splitlines ( ) [ - 2 ] )
def load ( self , response ) : """Parse the GET response for the collection . This operates as a lazy - loader , meaning that the data are only downloaded from the server if there are not already loaded . Collection items are loaded sequentially . In some rare cases , a collection can have an asynchronous request triggered . For those cases , we handle it here ."""
self . _models = [ ] if isinstance ( response , dict ) : for key in response . keys ( ) : model = self . model_class ( self , href = '' ) model . load ( response [ key ] ) self . _models . append ( model ) else : for item in response : model = self . model_class ( self , href = item . get ( 'href' ) ) model . load ( item ) self . _models . append ( model )
def _log ( self , x ) : """Modified version of np . log that manually sets values < = 0 to - inf Parameters x : ndarray of floats Input to the log function Returns log _ ma : ndarray of floats log of x , with x < = 0 values replaced with - inf"""
xshape = x . shape _x = x . flatten ( ) y = utils . masked_log ( _x ) return y . reshape ( xshape )
def add_url_rule ( self , path : str , endpoint : Optional [ str ] = None , view_func : Optional [ Callable ] = None , methods : Optional [ Iterable [ str ] ] = None , defaults : Optional [ dict ] = None , host : Optional [ str ] = None , subdomain : Optional [ str ] = None , * , provide_automatic_options : Optional [ bool ] = None , is_websocket : bool = False , strict_slashes : bool = True , ) -> None : """Add a route / url rule to the application . This is designed to be used on the application directly . An example usage , . . code - block : : python def route ( ) : app . add _ url _ rule ( ' / ' , route ) Arguments : path : The path to route on , should start with a ` ` / ` ` . func : Callable that returns a reponse . methods : List of HTTP verbs the function routes . endpoint : Optional endpoint name , if not present the function name is used . defaults : A dictionary of variables to provide automatically , use to provide a simpler default path for a route , e . g . to allow for ` ` / book ` ` rather than ` ` / book / 0 ` ` , . . code - block : : python @ app . route ( ' / book ' , defaults = { ' page ' : 0 } ) @ app . route ( ' / book / < int : page > ' ) def book ( page ) : host : The full host name for this route ( should include subdomain if needed ) - cannot be used with subdomain . subdomain : A subdomain for this specific route . provide _ automatic _ options : Optionally False to prevent OPTION handling . strict _ slashes : Strictly match the trailing slash present in the path . Will redirect a leaf ( no slash ) to a branch ( with slash ) ."""
endpoint = endpoint or _endpoint_from_view_func ( view_func ) handler = ensure_coroutine ( view_func ) if methods is None : methods = getattr ( view_func , 'methods' , [ 'GET' ] ) methods = cast ( Set [ str ] , set ( methods ) ) required_methods = set ( getattr ( view_func , 'required_methods' , set ( ) ) ) if provide_automatic_options is None : automatic_options = getattr ( view_func , 'provide_automatic_options' , None ) if automatic_options is None : automatic_options = 'OPTIONS' not in methods else : automatic_options = provide_automatic_options if automatic_options : required_methods . add ( 'OPTIONS' ) methods . update ( required_methods ) if not self . url_map . host_matching and ( host is not None or subdomain is not None ) : raise RuntimeError ( 'Cannot use host or subdomain without host matching enabled.' ) if host is not None and subdomain is not None : raise ValueError ( 'Cannot set host and subdomain, please choose one or the other' ) if subdomain is not None : if self . config [ 'SERVER_NAME' ] is None : raise RuntimeError ( 'SERVER_NAME config is required to use subdomain in a route.' ) host = f"{subdomain}.{self.config['SERVER_NAME']}" elif host is None and self . url_map . host_matching : host = self . config [ 'SERVER_NAME' ] if host is None : raise RuntimeError ( 'Cannot add a route with host matching enabled without either a specified ' 'host or a config SERVER_NAME' , ) self . url_map . add ( self . url_rule_class ( path , methods , endpoint , host = host , provide_automatic_options = automatic_options , defaults = defaults , is_websocket = is_websocket , strict_slashes = strict_slashes , ) , ) if handler is not None : old_handler = self . view_functions . get ( endpoint ) if getattr ( old_handler , '_quart_async_wrapper' , False ) : old_handler = old_handler . __wrapped__ # type : ignore if old_handler is not None and old_handler != view_func : raise AssertionError ( f"Handler is overwriting existing for endpoint {endpoint}" ) self . view_functions [ endpoint ] = handler
def request ( self , arg = None ) : '''Deal with requests'''
if not self . status : return '{"result": "No message"}' try : status_dict = json . loads ( mpstatus_to_json ( self . status ) ) except Exception as e : print ( e ) return # If no key , send the entire json if not arg : return json . dumps ( status_dict ) # Get item from path new_dict = status_dict args = arg . split ( '/' ) for key in args : if key in new_dict : new_dict = new_dict [ key ] else : return '{"key": "%s", "last_dict": %s}' % ( key , json . dumps ( new_dict ) ) return json . dumps ( new_dict )
def _ondemand ( f ) : """Decorator to only request information if not in cache already ."""
name = f . __name__ def func ( self , * args , ** kwargs ) : if not args and not kwargs : if hasattr ( self , '_%s' % name ) : return getattr ( self , '_%s' % name ) a = f ( self , * args , ** kwargs ) setattr ( self , '_%s' % name , a ) return a else : return f ( self , * args , ** kwargs ) func . __name__ = name return func
def get_render_configurations ( self , request , ** kwargs ) : """Render image interface"""
data = self . process_form_data ( self . _get_form_defaults ( ) , kwargs ) variable_set = self . get_variable_set ( self . service . variable_set . order_by ( 'index' ) , data ) base_config = ImageConfiguration ( extent = data [ 'bbox' ] , size = data [ 'size' ] , image_format = data [ 'image_format' ] , background_color = TRANSPARENT_BACKGROUND_COLOR if data . get ( 'transparent' ) else DEFAULT_BACKGROUND_COLOR ) return base_config , self . apply_time_to_configurations ( [ RenderConfiguration ( v ) for v in variable_set ] , data )
def clear_cache ( self , items = None , topic = EVENT_TOPIC ) : """expects event object to be in the format of a session - stop or session - expire event , whose results attribute is a namedtuple ( identifiers , session _ key )"""
try : for realm in self . realms : identifier = items . identifiers . from_source ( realm . name ) if identifier : realm . clear_cached_authc_info ( identifier ) except AttributeError : msg = ( 'Could not clear authc_info from cache after event. ' 'items: ' + str ( items ) ) logger . warn ( msg )
def _invalidate_entry ( self , key ) : "If exists : Invalidate old entry and return it ."
old_entry = self . access_lookup . get ( key ) if old_entry : old_entry . is_valid = False return old_entry
async def select ( self , db ) : """Changes db index for all free connections . All previously acquired connections will be closed when released ."""
res = True async with self . _cond : for i in range ( self . freesize ) : res = res and ( await self . _pool [ i ] . select ( db ) ) else : self . _db = db return res
def _ir_calibrate ( self , data , channel_name , cal_type ) : """Calibrate to brightness temperature ."""
if cal_type == 1 : # spectral radiances return self . _srads2bt ( data , channel_name ) elif cal_type == 2 : # effective radiances return self . _erads2bt ( data , channel_name ) else : raise NotImplementedError ( 'Unknown calibration type' )
def findAllMatches ( self , needle , similarity ) : """Finds all matches above ` ` similarity ` ` using a search pyramid to improve efficiency Pyramid implementation unashamedly stolen from https : / / github . com / stb - tester / stb - tester"""
positions = [ ] # Use findBestMatch to get the best match while True : best_match = self . findBestMatch ( needle , similarity ) if best_match is None : # No more matches break # Found a match . Add it to our list positions . append ( best_match ) # ( position , confidence ) # Erase the found match from the haystack . # Repeat this process until no other matches are found x , y = best_match [ 0 ] w = needle . shape [ 1 ] h = needle . shape [ 0 ] roi = ( x , y , w , h ) # numpy 2D slice roi_slice = ( slice ( roi [ 1 ] , roi [ 1 ] + roi [ 3 ] ) , slice ( roi [ 0 ] , roi [ 0 ] + roi [ 2 ] ) ) self . haystack [ roi_slice ] = 0 # Whew ! Let ' s see if there ' s a match after all that . positions . sort ( key = lambda x : ( x [ 0 ] [ 1 ] , x [ 0 ] [ 0 ] ) ) return positions
def get_webapp_settings ( name , site , settings ) : r'''. . versionadded : : 2017.7.0 Get the value of the setting for the IIS web application . . . note : : Params are case sensitive : param str name : The name of the IIS web application . : param str site : The site name contains the web application . Example : Default Web Site : param str settings : A dictionary of the setting names and their values . Available settings : physicalPath , applicationPool , userName , password Returns : dict : A dictionary of the provided settings and their values . CLI Example : . . code - block : : bash salt ' * ' win _ iis . get _ webapp _ settings name = ' app0 ' site = ' Default Web Site ' settings = " [ ' physicalPath ' , ' applicationPool ' ] "'''
ret = dict ( ) pscmd = list ( ) availableSettings = ( 'physicalPath' , 'applicationPool' , 'userName' , 'password' ) if not settings : log . warning ( 'No settings provided' ) return ret pscmd . append ( r'$Settings = @{};' ) # Verify setting is ine predefined settings and append relevant query command per setting key for setting in settings : if setting in availableSettings : if setting == "userName" or setting == "password" : pscmd . append ( " $Property = Get-WebConfigurationProperty -Filter \"system.applicationHost/sites/site[@name='{0}']/application[@path='/{1}']/virtualDirectory[@path='/']\"" . format ( site , name ) ) pscmd . append ( r' -Name "{0}" -ErrorAction Stop | select Value;' . format ( setting ) ) pscmd . append ( r' $Property = $Property | Select-Object -ExpandProperty Value;' ) pscmd . append ( r" $Settings['{0}'] = [String] $Property;" . format ( setting ) ) pscmd . append ( r' $Property = $Null;' ) if setting == "physicalPath" or setting == "applicationPool" : pscmd . append ( r" $Property = (get-webapplication {0}).{1};" . format ( name , setting ) ) pscmd . append ( r" $Settings['{0}'] = [String] $Property;" . format ( setting ) ) pscmd . append ( r' $Property = $Null;' ) else : availSetStr = ', ' . join ( availableSettings ) message = 'Unexpected setting:' + setting + '. Available settings are: ' + availSetStr raise SaltInvocationError ( message ) pscmd . append ( ' $Settings' ) # Run commands and return data as json cmd_ret = _srvmgr ( cmd = six . text_type ( ) . join ( pscmd ) , return_json = True ) # Update dict var to return data try : items = salt . utils . json . loads ( cmd_ret [ 'stdout' ] , strict = False ) if isinstance ( items , list ) : ret . update ( items [ 0 ] ) else : ret . update ( items ) except ValueError : log . error ( 'Unable to parse return data as Json.' ) if None in six . viewvalues ( ret ) : message = 'Some values are empty - please validate site and web application names. Some commands are case sensitive' raise SaltInvocationError ( message ) return ret
def _set_parse_data ( self ) : """set attributes derived from MediaWiki ( action = parse )"""
pdata = self . _load_response ( 'parse' ) [ 'parse' ] self . data [ 'iwlinks' ] = utils . get_links ( pdata . get ( 'iwlinks' ) ) self . data [ 'pageid' ] = pdata . get ( 'pageid' ) self . data [ 'wikitext' ] = pdata . get ( 'wikitext' ) parsetree = pdata . get ( 'parsetree' ) self . data [ 'parsetree' ] = parsetree boxterm = self . params . get ( 'boxterm' ) if boxterm : infobox = utils . get_infobox ( parsetree , boxterm ) else : infobox = utils . get_infobox ( parsetree ) self . data [ 'infobox' ] = infobox title = pdata . get ( 'title' ) if title : self . data [ 'title' ] = title if not self . params . get ( 'title' ) : self . params [ 'title' ] = title wikibase = pdata . get ( 'properties' ) . get ( 'wikibase_item' ) if wikibase : self . data [ 'wikibase' ] = wikibase self . data [ 'wikidata_url' ] = utils . wikidata_url ( wikibase ) if self . data [ 'infobox' ] : self . _set_parse_image ( self . data [ 'infobox' ] )
def check_git_unchanged ( filename , yes = False ) : """Check git to avoid overwriting user changes ."""
if check_staged ( filename ) : s = 'There are staged changes in {}, overwrite? [y/n] ' . format ( filename ) if yes or input ( s ) in ( 'y' , 'yes' ) : return else : raise RuntimeError ( 'There are staged changes in ' '{}, aborting.' . format ( filename ) ) if check_unstaged ( filename ) : s = 'There are unstaged changes in {}, overwrite? [y/n] ' . format ( filename ) if yes or input ( s ) in ( 'y' , 'yes' ) : return else : raise RuntimeError ( 'There are unstaged changes in ' '{}, aborting.' . format ( filename ) )
def rst_to_docs_rst ( infile , outfile ) : """Convert an rst file to a sphinx docs rst file ."""
# Read infile into a list of lines with open ( infile , 'r' ) as fin : rst = fin . readlines ( ) # Inspect outfile path components to determine whether outfile # is in the root of the examples directory or in a subdirectory # thererof ps = pathsplit ( outfile ) [ - 3 : ] if ps [ - 2 ] == 'examples' : ps = ps [ - 2 : ] idx = 'index' else : idx = '' # Output string starts with a cross - reference anchor constructed from # the file name and path out = '.. _' + '_' . join ( ps ) + ':\n\n' # Iterate over lines from infile it = iter ( rst ) for line in it : if line [ 0 : 12 ] == '.. toc-start' : # Line has start of toc marker # Initialise current toc array and iterate over lines until # end of toc marker encountered toc = [ ] for line in it : if line == '\n' : # Drop newline lines continue elif line [ 0 : 10 ] == '.. toc-end' : # End of toc marker # Add toctree section to output string out += '.. toctree::\n :maxdepth: 1\n\n' for c in toc : out += ' %s <%s>\n' % c break else : # Still within toc section # Extract link text and target url and append to # toc array m = re . search ( r'`(.*?)\s*<(.*?)(?:.py)?>`' , line ) if m : if idx == '' : toc . append ( ( m . group ( 1 ) , m . group ( 2 ) ) ) else : toc . append ( ( m . group ( 1 ) , os . path . join ( m . group ( 2 ) , idx ) ) ) else : # Not within toc section out += line with open ( outfile , 'w' ) as fout : fout . write ( out )
def fit_first_and_second_harmonics ( phi , intensities ) : """Fit the first and second harmonic function values to a set of ( angle , intensity ) pairs . This function is used to compute corrections for ellipse fitting : . . math : : f ( phi ) = y0 + a1* \\ sin ( phi ) + b1* \\ cos ( phi ) + a2* \\ sin ( 2 * phi ) + b2* \\ cos ( 2 * phi ) Parameters phi : float or ` ~ numpy . ndarray ` The angle ( s ) along the elliptical path , going towards the positive y axis , starting coincident with the position angle . That is , the angles are defined from the semimajor axis that lies in the positive x quadrant . intensities : ` ~ numpy . ndarray ` The intensities measured along the elliptical path , at the angles defined by the ` ` phi ` ` parameter . Returns y0 , a1 , b1 , a2 , b2 : float The fitted harmonic coefficent values ."""
a1 = b1 = a2 = b2 = 1. def optimize_func ( x ) : return first_and_second_harmonic_function ( phi , np . array ( [ x [ 0 ] , x [ 1 ] , x [ 2 ] , x [ 3 ] , x [ 4 ] ] ) ) - intensities return _least_squares_fit ( optimize_func , [ np . mean ( intensities ) , a1 , b1 , a2 , b2 ] )
def progressbar ( iterable = None , length = None , label = None , show_eta = True , show_percent = None , show_pos = False , item_show_func = None , fill_char = '#' , empty_char = '-' , bar_template = '%(label)s [%(bar)s] %(info)s' , info_sep = ' ' , width = 36 , file = None , color = None ) : """Create a progressbar that works in Jupyter / IPython notebooks and the terminal"""
try : return IPyBackend ( iterable , length , label = label , show_eta = show_eta , show_percent = show_percent , show_pos = show_pos , item_show_func = item_show_func , info_sep = info_sep ) except ( ImportError , RuntimeError ) : # fall back if ipython is not installed or no notebook is running return click . progressbar ( iterable , length , label , show_eta , show_percent , show_pos , item_show_func , fill_char , empty_char , bar_template , info_sep , width , file , color )
def isseq ( obj ) : '''Returns True if ` obj ` is a sequence - like object ( but not a string or dict ) ; i . e . a tuple , list , subclass thereof , or having an interface that supports iteration .'''
return not isstr ( obj ) and not isdict ( obj ) and ( isinstance ( obj , ( list , tuple ) ) or callable ( getattr ( obj , '__iter__' , None ) ) )
def reset_sum_fluxes ( self ) : """Set the sum of the fluxes calculated so far to zero . > > > from hydpy . models . test _ v1 import * > > > parameterstep ( ) > > > fluxes . fastaccess . _ q _ sum = 5. > > > model . reset _ sum _ fluxes ( ) > > > fluxes . fastaccess . _ q _ sum 0.0"""
fluxes = self . sequences . fluxes for flux in fluxes . numerics : if flux . NDIM == 0 : setattr ( fluxes . fastaccess , '_%s_sum' % flux . name , 0. ) else : getattr ( fluxes . fastaccess , '_%s_sum' % flux . name ) [ : ] = 0.
def _append ( self , menu ) : '''append this menu item to a menu'''
from wx_loader import wx menu . AppendMenu ( - 1 , self . name , self . wx_menu ( ) )
def makeMrkvHist ( self ) : '''Makes a history of macroeconomic Markov states , stored in the attribute MrkvNow _ hist . This version ensures that each state is reached a sufficient number of times to have a valid sample for calcDynamics to produce a good dynamic rule . It will sometimes cause act _ T to be increased beyond its initially specified level . Parameters None Returns None'''
if hasattr ( self , 'loops_max' ) : loops_max = self . loops_max else : # Maximum number of loops ; final act _ T never exceeds act _ T * loops _ max loops_max = 10 state_T_min = 50 # Choose minimum number of periods in each state for a valid Markov sequence logit_scale = 0.2 # Scaling factor on logit choice shocks when jumping to a new state # Values close to zero make the most underrepresented states very likely to visit , while # large values of logit _ scale make any state very likely to be jumped to . # Reset act _ T to the level actually specified by the user if hasattr ( self , 'act_T_orig' ) : act_T = self . act_T_orig else : # Or store it for the first time self . act_T_orig = self . act_T act_T = self . act_T # Find the long run distribution of Markov states w , v = np . linalg . eig ( np . transpose ( self . MrkvArray ) ) idx = ( np . abs ( w - 1.0 ) ) . argmin ( ) x = v [ : , idx ] . astype ( float ) LR_dstn = ( x / np . sum ( x ) ) # Initialize the Markov history and set up transitions MrkvNow_hist = np . zeros ( self . act_T_orig , dtype = int ) cutoffs = np . cumsum ( self . MrkvArray , axis = 1 ) loops = 0 go = True MrkvNow = self . MrkvNow_init t = 0 StateCount = self . MrkvArray . shape [ 0 ] # Add histories until each state has been visited at least state _ T _ min times while go : draws = drawUniform ( N = self . act_T_orig , seed = loops ) for s in range ( draws . size ) : # Add act _ T _ orig more periods MrkvNow_hist [ t ] = MrkvNow MrkvNow = np . searchsorted ( cutoffs [ MrkvNow , : ] , draws [ s ] ) t += 1 # Calculate the empirical distribution state_T = np . zeros ( StateCount ) for i in range ( StateCount ) : state_T [ i ] = np . sum ( MrkvNow_hist == i ) # Check whether each state has been visited state _ T _ min times if np . all ( state_T >= state_T_min ) : go = False # If so , terminate the loop continue # Choose an underrepresented state to " jump " to if np . any ( state_T == 0 ) : # If any states have * never * been visited , randomly choose one of those never_visited = np . where ( np . array ( state_T == 0 ) ) [ 0 ] MrkvNow = np . random . choice ( never_visited ) else : # Otherwise , use logit choice probabilities to visit an underrepresented state emp_dstn = state_T / act_T ratios = LR_dstn / emp_dstn ratios_adj = ratios - np . max ( ratios ) ratios_exp = np . exp ( ratios_adj / logit_scale ) ratios_sum = np . sum ( ratios_exp ) jump_probs = ratios_exp / ratios_sum cum_probs = np . cumsum ( jump_probs ) MrkvNow = np . searchsorted ( cum_probs , draws [ - 1 ] ) loops += 1 # Make the Markov state history longer by act _ T _ orig periods if loops >= loops_max : go = False print ( 'makeMrkvHist reached maximum number of loops without generating a valid sequence!' ) else : MrkvNow_new = np . zeros ( self . act_T_orig , dtype = int ) MrkvNow_hist = np . concatenate ( ( MrkvNow_hist , MrkvNow_new ) ) act_T += self . act_T_orig # Store the results as attributes of self self . MrkvNow_hist = MrkvNow_hist self . act_T = act_T
def get_belns_handle ( client , username = None , password = None ) : """Get BEL namespace arango db handle"""
( username , password ) = get_user_creds ( username , password ) sys_db = client . db ( "_system" , username = username , password = password ) # Create a new database named " belns " try : if username and password : belns_db = sys_db . create_database ( name = belns_db_name , users = [ { "username" : username , "password" : password , "active" : True } ] , ) else : belns_db = sys_db . create_database ( name = belns_db_name ) except arango . exceptions . DatabaseCreateError : if username and password : belns_db = client . db ( belns_db_name , username = username , password = password ) else : belns_db = client . db ( belns_db_name ) try : belns_db . create_collection ( belns_metadata_name ) except Exception : pass try : equiv_nodes = belns_db . create_collection ( equiv_nodes_name , index_bucket_count = 64 ) equiv_nodes . add_hash_index ( fields = [ "name" ] , unique = True ) except Exception : pass try : belns_db . create_collection ( equiv_edges_name , edge = True , index_bucket_count = 64 ) except Exception : pass try : ortholog_nodes = belns_db . create_collection ( ortholog_nodes_name , index_bucket_count = 64 ) ortholog_nodes . add_hash_index ( fields = [ "name" ] , unique = True ) except Exception : pass try : belns_db . create_collection ( ortholog_edges_name , edge = True , index_bucket_count = 64 ) except Exception : pass return belns_db
def package_info ( self ) : """Collect built libraries names and solve flatc path ."""
self . cpp_info . libs = tools . collect_libs ( self ) self . user_info . flatc = os . path . join ( self . package_folder , "bin" , "flatc" )
def register_jinja_loaders ( self , * loaders ) : """Register one or many ` jinja2 . Loader ` instances for templates lookup . During application initialization plugins can register a loader so that their templates are available to jinja2 renderer . Order of registration matters : last registered is first looked up ( after standard Flask lookup in app template folder ) . This allows a plugin to override templates provided by others , or by base application . The application can override any template from any plugins from its template folder ( See ` Flask . Application . template _ folder ` ) . : raise : ` ValueError ` if a template has already been rendered"""
if not hasattr ( self , "_jinja_loaders" ) : raise ValueError ( "Cannot register new jinja loaders after first template rendered" ) self . _jinja_loaders . extend ( loaders )
def __get_min_reads ( current_provisioning , min_provisioned_reads , log_tag ) : """Get the minimum number of reads to current _ provisioning : type current _ provisioning : int : param current _ provisioning : Current provisioned reads : type min _ provisioned _ reads : int : param min _ provisioned _ reads : Configured min provisioned reads : type log _ tag : str : param log _ tag : Prefix for the log : returns : int - - Minimum number of reads"""
# Fallback value to ensure that we always have at least 1 read reads = 1 if min_provisioned_reads : reads = int ( min_provisioned_reads ) if reads > int ( current_provisioning * 2 ) : reads = int ( current_provisioning * 2 ) logger . debug ( '{0} - ' 'Cannot reach min-provisioned-reads as max scale up ' 'is 100% of current provisioning' . format ( log_tag ) ) logger . debug ( '{0} - Setting min provisioned reads to {1}' . format ( log_tag , min_provisioned_reads ) ) return reads
def _full_to_yearly_ts ( self , arr , dt ) : """Average the full timeseries within each year ."""
time_defined = self . def_time and not ( 'av' in self . dtype_in_time ) if time_defined : arr = utils . times . yearly_average ( arr , dt ) return arr
def search_satellite_imagery ( self , polygon_id , acquired_from , acquired_to , img_type = None , preset = None , min_resolution = None , max_resolution = None , acquired_by = None , min_cloud_coverage = None , max_cloud_coverage = None , min_valid_data_coverage = None , max_valid_data_coverage = None ) : """Searches on the Agro API the metadata for all available satellite images that contain the specified polygon and acquired during the specified time interval ; and optionally matching the specified set of filters : - image type ( eg . GeoTIF ) - image preset ( eg . false color , NDVI , . . . ) - min / max acquisition resolution - acquiring satellite - min / max cloud coverage on acquired scene - min / max valid data coverage on acquired scene : param polygon _ id : the ID of the reference polygon : type polygon _ id : str : param acquired _ from : lower edge of acquisition interval , UNIX timestamp : type acquired _ from : int : param acquired _ to : upper edge of acquisition interval , UNIX timestamp : type acquired _ to : int : param img _ type : the desired file format type of the images . Allowed values are given by ` pyowm . commons . enums . ImageTypeEnum ` : type img _ type : ` pyowm . commons . databoxes . ImageType ` : param preset : the desired preset of the images . Allowed values are given by ` pyowm . agroapi10 . enums . PresetEnum ` : type preset : str : param min _ resolution : minimum resolution for images , px / meters : type min _ resolution : int : param max _ resolution : maximum resolution for images , px / meters : type max _ resolution : int : param acquired _ by : short symbol of the satellite that acquired the image ( eg . " l8 " ) : type acquired _ by : str : param min _ cloud _ coverage : minimum cloud coverage percentage on acquired images : type min _ cloud _ coverage : int : param max _ cloud _ coverage : maximum cloud coverage percentage on acquired images : type max _ cloud _ coverage : int : param min _ valid _ data _ coverage : minimum valid data coverage percentage on acquired images : type min _ valid _ data _ coverage : int : param max _ valid _ data _ coverage : maximum valid data coverage percentage on acquired images : type max _ valid _ data _ coverage : int : return : a list of ` pyowm . agro10 . imagery . MetaImage ` subtypes instances"""
assert polygon_id is not None assert acquired_from is not None assert acquired_to is not None assert acquired_from <= acquired_to , 'Start timestamp of acquisition window must come before its end' if min_resolution is not None : assert min_resolution > 0 , 'Minimum resolution must be positive' if max_resolution is not None : assert max_resolution > 0 , 'Maximum resolution must be positive' if min_resolution is not None and max_resolution is not None : assert min_resolution <= max_resolution , 'Mininum resolution must be lower than maximum resolution' if min_cloud_coverage is not None : assert min_cloud_coverage >= 0 , 'Minimum cloud coverage must be non negative' if max_cloud_coverage is not None : assert max_cloud_coverage >= 0 , 'Maximum cloud coverage must be non negative' if min_cloud_coverage is not None and max_cloud_coverage is not None : assert min_cloud_coverage <= max_cloud_coverage , 'Minimum cloud coverage must be lower than maximum cloud coverage' if min_valid_data_coverage is not None : assert min_valid_data_coverage >= 0 , 'Minimum valid data coverage must be non negative' if max_valid_data_coverage is not None : assert max_valid_data_coverage >= 0 , 'Maximum valid data coverage must be non negative' if min_valid_data_coverage is not None and max_valid_data_coverage is not None : assert min_valid_data_coverage <= max_valid_data_coverage , 'Minimum valid data coverage must be lower than maximum valid data coverage' # prepare params params = dict ( appid = self . API_key , polyid = polygon_id , start = acquired_from , end = acquired_to ) if min_resolution is not None : params [ 'resolution_min' ] = min_resolution if max_resolution is not None : params [ 'resolution_max' ] = max_resolution if acquired_by is not None : params [ 'type' ] = acquired_by if min_cloud_coverage is not None : params [ 'clouds_min' ] = min_cloud_coverage if max_cloud_coverage is not None : params [ 'clouds_max' ] = max_cloud_coverage if min_valid_data_coverage is not None : params [ 'coverage_min' ] = min_valid_data_coverage if max_valid_data_coverage is not None : params [ 'coverage_max' ] = max_valid_data_coverage # call API status , data = self . http_client . get_json ( SATELLITE_IMAGERY_SEARCH_URI , params = params ) result_set = SatelliteImagerySearchResultSet ( polygon_id , data , timeutils . now ( timeformat = 'unix' ) ) # further filter by img _ type and / or preset ( if specified ) if img_type is not None and preset is not None : return result_set . with_img_type_and_preset ( img_type , preset ) elif img_type is not None : return result_set . with_img_type ( img_type ) elif preset is not None : return result_set . with_preset ( preset ) else : return result_set . all ( )
def lines_from_string ( string , as_interned = False ) : """Create a list of file lines from a given string . Args : string ( str ) : File string as _ interned ( bool ) : List of " interned " strings ( default False ) Returns : strings ( list ) : File line list"""
if as_interned : return [ sys . intern ( line ) for line in string . splitlines ( ) ] return string . splitlines ( )
def check_grid_coordinates ( self , ds ) : """5.6 When the coordinate variables for a horizontal grid are not longitude and latitude , it is required that the true latitude and longitude coordinates be supplied via the coordinates attribute . : param netCDF4 . Dataset ds : An open netCDF dataset : rtype : list : return : List of results"""
ret_val = [ ] latitudes = cfutil . get_true_latitude_variables ( ds ) longitudes = cfutil . get_true_longitude_variables ( ds ) check_featues = [ '2d-regular-grid' , '2d-static-grid' , '3d-regular-grid' , '3d-static-grid' , 'mapped-grid' , 'reduced-grid' ] # This one is tricky because there ' s a very subtle difference between # latitude as defined in Chapter 4 and " true " latitude as defined in # chapter 5. # For each geophysical variable that defines a grid , assert it is # associated with a true latitude or longitude coordinate . for variable in self . _find_geophysical_vars ( ds ) : # We use a set so we can do set - wise comparisons with coordinate # dimensions dimensions = set ( ds . variables [ variable ] . dimensions ) # If it ' s not a grid , skip it if cfutil . guess_feature_type ( ds , variable ) not in check_featues : continue has_coords = TestCtx ( BaseCheck . HIGH , self . section_titles [ '5.6' ] ) # axis _ map is a defaultdict ( list ) mapping the axis to a list of # coordinate names . For example : # { ' X ' : [ ' lon ' ] , ' Y ' : [ ' lat ' ] , ' Z ' : [ ' lev ' ] } # The mapping comes from the dimensions of the variable and the # contents of the ` coordinates ` attribute only . axis_map = cfutil . get_axis_map ( ds , variable ) msg = '{}\'s coordinate variable "{}" is not one of the variables identifying true ' + 'latitude/longitude and its dimensions are not a subset of {}\'s dimensions' alt = '{} has no coordinate associated with a variable identified as true latitude/longitude; ' + 'its coordinate variable should also share a subset of {}\'s dimensions' # Make sure we can find latitude and its dimensions are a subset _lat = None found_lat = False for lat in axis_map [ 'Y' ] : _lat = lat is_subset_dims = set ( ds . variables [ lat ] . dimensions ) . issubset ( dimensions ) if is_subset_dims and lat in latitudes : found_lat = True break if _lat : has_coords . assert_true ( found_lat , msg . format ( variable , _lat , variable ) ) else : has_coords . assert_true ( found_lat , alt . format ( variable , variable ) ) # Make sure we can find longitude and its dimensions are a subset _lon = None found_lon = False for lon in axis_map [ 'X' ] : _lon = lon is_subset_dims = set ( ds . variables [ lon ] . dimensions ) . issubset ( dimensions ) if is_subset_dims and lon in longitudes : found_lon = True break if _lon : has_coords . assert_true ( found_lon , msg . format ( variable , _lon , variable ) ) else : has_coords . assert_true ( found_lon , alt . format ( variable , variable ) ) ret_val . append ( has_coords . to_result ( ) ) return ret_val
def _determine_spec ( self , index ) : """Determine how a value for a field should be constructed : param index : The field number : return : A tuple containing the following elements : - unicode string of the field name - Asn1Value class of the field spec - Asn1Value class of the value spec - None or dict of params to pass to the field spec - None or Asn1Value class indicating the value spec was derived from an OID or a spec callback"""
name , field_spec , field_params = self . _fields [ index ] value_spec = field_spec spec_override = None if self . _spec_callbacks is not None and name in self . _spec_callbacks : callback = self . _spec_callbacks [ name ] spec_override = callback ( self ) if spec_override : # Allow a spec callback to specify both the base spec and # the override , for situations such as OctetString and parse _ as if spec_override . __class__ == tuple and len ( spec_override ) == 2 : field_spec , value_spec = spec_override if value_spec is None : value_spec = field_spec spec_override = None # When no field spec is specified , use a single return value as that elif field_spec is None : field_spec = spec_override value_spec = field_spec spec_override = None else : value_spec = spec_override elif self . _oid_nums is not None and self . _oid_nums [ 1 ] == index : oid = self . _lazy_child ( self . _oid_nums [ 0 ] ) . native if oid in self . _oid_specs : spec_override = self . _oid_specs [ oid ] value_spec = spec_override return ( name , field_spec , value_spec , field_params , spec_override )
def count_nulls ( self , field ) : """Count the number of null values in a column"""
try : n = self . df [ field ] . isnull ( ) . sum ( ) except KeyError : self . warning ( "Can not find column" , field ) return except Exception as e : self . err ( e , "Can not count nulls" ) return self . ok ( "Found" , n , "nulls in column" , field )
def get_mac_address_table_input_request_type_get_next_request_mac_address_type ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) get_mac_address_table = ET . Element ( "get_mac_address_table" ) config = get_mac_address_table input = ET . SubElement ( get_mac_address_table , "input" ) request_type = ET . SubElement ( input , "request-type" ) get_next_request = ET . SubElement ( request_type , "get-next-request" ) mac_address_type = ET . SubElement ( get_next_request , "mac-address-type" ) mac_address_type . text = kwargs . pop ( 'mac_address_type' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def ChiSquared ( k , tag = None ) : """A Chi - Squared random variate Parameters k : int The degrees of freedom of the distribution ( must be greater than one )"""
assert int ( k ) == k and k >= 1 , 'Chi-Squared "k" must be an integer greater than 0' return uv ( ss . chi2 ( k ) , tag = tag )
def load_images ( image_paths : Iterable [ Union [ str , Path ] ] ) -> Iterable [ SpatialImage ] : """Load images from paths . For efficiency , returns an iterator , not a sequence , so the results cannot be accessed by indexing . For every new iteration through the images , load _ images must be called again . Parameters image _ paths : Paths to images . Yields SpatialImage Image ."""
for image_path in image_paths : if isinstance ( image_path , Path ) : string_path = str ( image_path ) else : string_path = image_path logger . debug ( 'Starting to read file %s' , string_path ) yield nib . load ( string_path )
def rest_verbs ( http_method_names = None ) : """Decorator that converts a function - based view into an RestView subclass . Takes a list of allowed methods for the view as an argument ."""
http_method_names = [ 'GET' ] if ( http_method_names is None ) else http_method_names def decorator ( func ) : WrappedRestView = type ( six . PY3 and 'WrappedRestView' or b'WrappedRestView' , ( RestView , ) , { '__doc__' : func . __doc__ } ) # Note , the above allows us to set the docstring . # It is the equivalent of : # class WrappedRestView ( RestView ) : # pass # WrappedRestView . _ _ doc _ _ = func . doc < - - - Not possible to do this # api _ view applied without ( method _ names ) assert not ( isinstance ( http_method_names , types . FunctionType ) ) , '@api_view missing list of allowed HTTP methods' # api _ view applied with eg . string instead of list of strings assert isinstance ( http_method_names , ( list , tuple ) ) , '@api_view expected a list of strings, received %s' % type ( http_method_names ) . __name__ allowed_methods = set ( http_method_names ) | set ( ( 'options' , ) ) WrappedRestView . http_method_names = [ method . lower ( ) for method in allowed_methods ] def handler ( self , * args , ** kwargs ) : return func ( * args , ** kwargs ) for method in http_method_names : setattr ( WrappedRestView , method . lower ( ) , handler ) WrappedRestView . __name__ = func . __name__ WrappedRestView . renderer_classes = getattr ( func , 'renderer_classes' , RestView . renderer_classes ) WrappedRestView . parser_classes = getattr ( func , 'parser_classes' , RestView . parser_classes ) WrappedRestView . authentication_classes = getattr ( func , 'authentication_classes' , RestView . authentication_classes ) WrappedRestView . throttle_classes = getattr ( func , 'throttle_classes' , RestView . throttle_classes ) WrappedRestView . permission_classes = getattr ( func , 'permission_classes' , RestView . permission_classes ) return WrappedRestView . as_view ( ) return decorator
def collapse ( self , id_user ) : """Collapse comment beloging to user ."""
c = CmtCOLLAPSED ( id_bibrec = self . id_bibrec , id_cmtRECORDCOMMENT = self . id , id_user = id_user ) db . session . add ( c ) db . session . commit ( )
def get_adapter_path ( obj , to_cls ) : """Returns the adapter path that would be used to adapt ` obj ` to ` to _ cls ` ."""
from_cls = type ( obj ) key = ( from_cls , to_cls ) if key not in __mro__ : __mro__ [ key ] = list ( itertools . product ( inspect . getmro ( from_cls ) , inspect . getmro ( to_cls ) ) ) return __mro__ [ key ]
def map_id ( self , id , prefix , closure_list ) : """Map identifiers based on an equivalence closure list ."""
prefixc = prefix + ':' ids = [ eid for eid in closure_list if eid . startswith ( prefixc ) ] # TODO : add option to fail if no mapping , or if > 1 mapping if len ( ids ) == 0 : # default to input return id return ids [ 0 ]
def bar3d ( h2 : Histogram2D , ax : Axes3D , ** kwargs ) : """Plot of 2D histograms as 3D boxes ."""
density = kwargs . pop ( "density" , False ) data = get_data ( h2 , cumulative = False , flatten = True , density = density ) if "cmap" in kwargs : cmap = _get_cmap ( kwargs ) _ , cmap_data = _get_cmap_data ( data , kwargs ) colors = cmap ( cmap_data ) else : colors = kwargs . pop ( "color" , "blue" ) xpos , ypos = ( arr . flatten ( ) for arr in h2 . get_bin_centers ( ) ) zpos = np . zeros_like ( ypos ) dx , dy = ( arr . flatten ( ) for arr in h2 . get_bin_widths ( ) ) _add_labels ( ax , h2 , kwargs ) ax . bar3d ( xpos , ypos , zpos , dx , dy , data , color = colors , ** kwargs ) ax . set_zlabel ( "density" if density else "frequency" )
def getrolesurl ( idrole = '' , * args , ** kwargs ) : """Request Roles URL . If idrole is set , you ' ll get a response adequate for a MambuRole object . If not set , you ' ll get a response adequate for a MambuRoles object too . See mamburoles module and pydoc for further information . See Mambu official developer documentation for further details ."""
url = getmambuurl ( * args , ** kwargs ) + "userroles" + ( ( "/" + idrole ) if idrole else "" ) return url
def tree ( s , token = [ WORD , POS , CHUNK , PNP , REL , LEMMA ] ) : """Returns a parsed Text from the given parsed string ."""
return Text ( s , token )
def os_release ( package , base = 'essex' , reset_cache = False ) : '''Returns OpenStack release codename from a cached global . If reset _ cache then unset the cached os _ release version and return the freshly determined version . If the codename can not be determined from either an installed package or the installation source , the earliest release supported by the charm should be returned .'''
global _os_rel if reset_cache : reset_os_release ( ) if _os_rel : return _os_rel _os_rel = ( get_os_codename_package ( package , fatal = False ) or get_os_codename_install_source ( config ( 'openstack-origin' ) ) or base ) return _os_rel
def signout ( request , next_page = accounts_settings . ACCOUNTS_REDIRECT_ON_SIGNOUT , template_name = 'accounts/signout.html' , * args , ** kwargs ) : """Signs out the user and adds a success message ` ` You have been signed out . ` ` If next _ page is defined you will be redirected to the URI . If not the template in template _ name is used . : param next _ page : A string which specifies the URI to redirect to . : param template _ name : String defining the name of the template to use . Defaults to ` ` accounts / signout . html ` ` ."""
if request . user . is_authenticated ( ) and accounts_settings . ACCOUNTS_USE_MESSAGES : # pragma : no cover messages . success ( request , _ ( 'You have been signed out.' ) , fail_silently = True ) return Signout ( request , next_page , template_name , * args , ** kwargs )
def configure_integration ( path ) : """Configure and enable an integration ."""
integration = register_integration ( path ) integration_args = { } try : with open ( os . path . join ( path , ARGS_JSON ) ) as f : integration_args = json . loads ( f . read ( ) ) except Exception as exc : logger . debug ( str ( exc ) , exc_info = True ) raise click . ClickException ( "Cannot load {} integration args, please configure it first." . format ( os . path . basename ( path ) ) ) click . secho ( "[*] Adding integration {}" . format ( integration . name ) ) logger . debug ( "Adding integration %s" , integration . name , extra = { "integration" : integration . name , "args" : integration_args } ) configured_integration = ConfiguredIntegration ( name = integration . name , integration = integration , path = path ) configured_integration . data = integration_args configured_integration . integration . module = get_integration_module ( path ) . IntegrationActionsClass ( integration_args ) configured_integrations . append ( configured_integration )
def randomDecimalField ( self , model_class , field_name ) : """Validate if the field has a ` max _ digits ` and ` decimal _ places ` And generating the unique decimal number ."""
decimal_field = model_class . _meta . get_field ( field_name ) max_digits = None decimal_places = None if decimal_field . max_digits is not None : max_digits = decimal_field . max_digits if decimal_field . decimal_places is not None : decimal_places = decimal_field . decimal_places digits = random . choice ( range ( 100 ) ) if max_digits is not None : start = 0 if max_digits < start : start = max_digits - max_digits digits = int ( "" . join ( [ str ( x ) for x in random . sample ( range ( start , max_digits ) , max_digits - 1 ) ] ) ) places = random . choice ( range ( 10 , 99 ) ) if decimal_places is not None : places = str ( random . choice ( range ( 9999 * 99999 ) ) ) [ : decimal_places ] return float ( str ( digits ) [ : decimal_places ] + "." + str ( places ) )
def jaccard_sims ( feature_list ) : """Compute Jaccard similarities between all the observations in the feature list . Args : feature _ list : a list of dictionaries , each having structure as { ' md5 ' : String , ' features ' : list of Strings } Returns : list of dictionaries with structure as { ' source ' : md5 String , ' target ' : md5 String , ' sim ' : Jaccard similarity Number }"""
sim_info_list = [ ] for feature_info in feature_list : md5_source = feature_info [ 'md5' ] features_source = feature_info [ 'features' ] for feature_info in feature_list : md5_target = feature_info [ 'md5' ] features_target = feature_info [ 'features' ] if md5_source == md5_target : continue sim = jaccard_sim ( features_source , features_target ) if sim > .5 : sim_info_list . append ( { 'source' : md5_source , 'target' : md5_target , 'sim' : sim } ) return sim_info_list
def _pcdata_nodes ( pcdata ) : """Return a list of minidom nodes with the properly escaped ` ` pcdata ` ` inside . The following special XML characters are escaped : * left angle bracket ( < ) * Right angle bracket ( > ) * Ampersand ( & ) By default , XML - based escaping is used for these characters . XML - based escaping will cause the corresponding XML entity references to be used ( for example , the ampersand character ` ` & ` ` will be represented as ` ` & amp ; ` ` , and the returned list contains one text node with the escaped pcdata string . Nesting of escaped pcdata is naturally supported with XML - based escaping . For example , if the pcdata string is ` ` a & amp ; b ` ` , the XML - escaped string will be ` ` a & amp ; amp ; b ` ` . If the ` ` cim _ xml . _ CDATA _ ESCAPING ` ` switch is set to True , CDATA - based escaping is used instead . CDATA - based escaping will cause a CDATA section to be used for the entire string , or consecutive CDATA sequences ( see discussion of nesting , below ) . The returned node list contains only CDATA section nodes . Example : The pcdata string ` ` a < b > c ` ` will become ` ` < ! [ CDATA [ a < b > ] ] > ` ` , allowing the special XML characters to be used unchanged inside of the CDATA section . Nesting of escaped pcdata is supported with CDATA - based escaping , by using the following approach : If the input pcdata string already contains CDATA sections , they are split into separate strings , splitting the CDATA end token string in the middle , and these part strings are CDATA - escaped separately . See https : / / en . wikipedia . org / wiki / CDATA # Nesting for details . Escaping of already escaped pcdata is needed in support of nested embedded instances . That requires that each level of escaping can lateron be unescaped , one at a time ."""
nodelist = [ ] if _CDATA_ESCAPING and isinstance ( pcdata , six . string_types ) and ( pcdata . find ( "<" ) >= 0 or pcdata . find ( ">" ) >= 0 or pcdata . find ( "&" ) >= 0 ) : # noqa : E129 # In order to support nesting of CDATA sections , we represent pcdata # that already contains CDATA sections by multiple new CDATA sections # whose boundaries split the end marker of the already existing CDATA # sections . pcdata_part_list = pcdata . split ( "]]>" ) # ' ] ] > ' is the complete CDATA section end marker i = 0 for pcdata_part in pcdata_part_list : i += 1 left = "" if i == 1 else "]>" # ' ] > ' is right part of CDATA section end marker right = "" if i == len ( pcdata_part_list ) else "]" # " ] " is left part of CDATA section end marker # The following initialization approach requires Python 2.3 or # higher . node = CDATASection ( ) node . data = left + pcdata_part + right nodelist . append ( node ) else : # The following automatically uses XML entity references # for escaping . node = _text ( pcdata ) nodelist . append ( node ) return nodelist
def BSR_Row_WriteScalar ( A , i , x ) : """Write a scalar at each nonzero location in row i of BSR matrix A . Parameters A : bsr _ matrix Input matrix i : int Row number x : float Scalar to overwrite nonzeros of row i in A Returns A : bsr _ matrix All nonzeros in row i of A have been overwritten with x . If x is a vector , the first length ( x ) nonzeros in row i of A have been overwritten with entries from x Examples > > > from numpy import array > > > from scipy . sparse import bsr _ matrix > > > from pyamg . util . BSR _ utils import BSR _ Row _ WriteScalar > > > indptr = array ( [ 0,2,3,6 ] ) > > > indices = array ( [ 0,2,2,0,1,2 ] ) > > > data = array ( [ 1,2,3,4,5,6 ] ) . repeat ( 4 ) . reshape ( 6,2,2) > > > B = bsr _ matrix ( ( data , indices , indptr ) , shape = ( 6,6 ) ) > > > BSR _ Row _ WriteScalar ( B , 5,22)"""
blocksize = A . blocksize [ 0 ] BlockIndx = int ( i / blocksize ) rowstart = A . indptr [ BlockIndx ] rowend = A . indptr [ BlockIndx + 1 ] localRowIndx = i % blocksize # for j in range ( rowstart , rowend ) : # indys = A . data [ j , localRowIndx , : ] . nonzero ( ) [ 0] # increment = indys . shape [ 0] # A . data [ j , localRowIndx , indys ] = x indys = A . data [ rowstart : rowend , localRowIndx , : ] . nonzero ( ) A . data [ rowstart : rowend , localRowIndx , : ] [ indys [ 0 ] , indys [ 1 ] ] = x
def set_discrimination_value ( self , discrimination ) : """stub"""
if not isinstance ( discrimination , float ) : raise InvalidArgument ( 'discrimination value must be a decimal' ) self . add_decimal_value ( discrimination , 'discrimination' )
def setup_handler ( self , io_loop ) : """: meth : ` . WIOLoopServiceHandler . setup _ handler ` implementation . When this object is in ' non - server mode ' ( client mode ) , then beacon message is sent"""
WNativeSocketHandler . setup_handler ( self , io_loop ) if self . server_mode ( ) is False : self . io_handler ( ) . transport_socket ( ) . sendto ( self . io_handler ( ) . messenger ( ) . request ( self . config ( ) ) , self . transport ( ) . target_socket ( self . config ( ) ) . pair ( ) )
def __convert ( root , tag , values , func ) : """Converts the tag type found in the root and converts them using the func and appends them to the values ."""
elements = root . getElementsByTagName ( tag ) for element in elements : converted = func ( element ) # Append to the list __append_list ( values , converted )
async def SetMeterStatus ( self , statues ) : '''statues : typing . Sequence [ ~ MeterStatusParam ] Returns - > typing . Sequence [ ~ ErrorResult ]'''
# map input types to rpc msg _params = dict ( ) msg = dict ( type = 'MetricsDebug' , request = 'SetMeterStatus' , version = 2 , params = _params ) _params [ 'statues' ] = statues reply = await self . rpc ( msg ) return reply
def parse_passage ( obj : dict ) -> BioCPassage : """Deserialize a dict obj to a BioCPassage object"""
passage = BioCPassage ( ) passage . offset = obj [ 'offset' ] passage . infons = obj [ 'infons' ] if 'text' in obj : passage . text = obj [ 'text' ] for sentence in obj [ 'sentences' ] : passage . add_sentence ( parse_sentence ( sentence ) ) for annotation in obj [ 'annotations' ] : passage . add_annotation ( parse_annotation ( annotation ) ) for relation in obj [ 'relations' ] : passage . add_relation ( parse_relation ( relation ) ) return passage
def remove_output_data_port ( self , data_port_id , force = False , destroy = True ) : """Overwrites the remove _ output _ data _ port method of the State class . Prevents user from removing a output data port from the library state . For further documentation , look at the State class . : param bool force : True if the removal should be forced : raises exceptions . NotImplementedError : in the removal is not forced"""
if force : return State . remove_output_data_port ( self , data_port_id , force , destroy ) else : raise NotImplementedError ( "Remove output data port is not implemented for library state {}" . format ( self ) )
def make_quantile_df ( data , draw_quantiles ) : """Return a dataframe with info needed to draw quantile segments"""
dens = data [ 'density' ] . cumsum ( ) / data [ 'density' ] . sum ( ) ecdf = interp1d ( dens , data [ 'y' ] , assume_sorted = True ) ys = ecdf ( draw_quantiles ) # Get the violin bounds for the requested quantiles violin_xminvs = interp1d ( data [ 'y' ] , data [ 'xminv' ] ) ( ys ) violin_xmaxvs = interp1d ( data [ 'y' ] , data [ 'xmaxv' ] ) ( ys ) data = pd . DataFrame ( { 'x' : interleave ( violin_xminvs , violin_xmaxvs ) , 'y' : np . repeat ( ys , 2 ) , 'group' : np . repeat ( np . arange ( 1 , len ( ys ) + 1 ) , 2 ) } ) return data
def fn_name ( fn ) : '''Gets a funtion fully quaified name . Args : fn : The function . Returns : The name of the function .'''
expression = '(\S+) (?:of|at)' # Checks if the function is a mehtod and should have the self argument passed is_method = inspect . ismethod ( fn ) # Builds the name of the method , module . class . method or module . method name = '{}.{}' . format ( fn . __module__ , re . compile ( expression ) . findall ( str ( fn ) ) [ 0 ] ) return name , is_method
def night_light ( self ) : """Build command for turning the led to night light mode . : return : The command ."""
return self . _build_command ( self . NIGHT_BYTES [ self . _group_number - 1 ] , select = True , select_command = self . off ( ) )
def _increment_prop ( self , prop , path = None , ** kwargs ) : """increments the property path count args : prop : the key for the prop path : the path to the prop kwargs : current : dictionary count for the current dictionay"""
new_path = self . make_path ( prop , path ) if self . method == 'simple' : counter = kwargs [ 'current' ] else : counter = self . counts try : counter [ new_path ] += 1 except KeyError : counter [ new_path ] = 1 return counter
def disposition ( self , value ) : """The content - disposition of the attachment , specifying display style . Specifies how you would like the attachment to be displayed . - " inline " results in the attached file being displayed automatically within the message . - " attachment " results in the attached file requiring some action to display ( e . g . opening or downloading the file ) . If unspecified , " attachment " is used . Must be one of the two choices . : param disposition : The content - disposition of the attachment , specifying display style . Specifies how you would like the attachment to be displayed . - " inline " results in the attached file being displayed automatically within the message . - " attachment " results in the attached file requiring some action to display ( e . g . opening or downloading the file ) . If unspecified , " attachment " is used . Must be one of the two choices . : type disposition : Disposition , string , optional"""
if isinstance ( value , Disposition ) : self . _disposition = value else : self . _disposition = Disposition ( value )
def get_metadata ( feature_name , etextno ) : """Looks up the value of a meta - data feature for a given text . Arguments : feature _ name ( str ) : The name of the meta - data to look up . etextno ( int ) : The identifier of the Gutenberg text for which to look up the meta - data . Returns : frozenset : The values of the meta - data for the text or an empty set if the text does not have meta - data associated with the feature . Raises : UnsupportedFeature : If there is no MetadataExtractor registered that can extract meta - data for the given feature name ."""
metadata_values = MetadataExtractor . get ( feature_name ) . get_metadata ( etextno ) return frozenset ( metadata_values )
def send_messages ( self , messages ) : """Send messages . : param list messages : List of SmsMessage instances . : returns : number of messages sended successful . : rtype : int"""
counter = 0 for message in messages : res , _ = self . _send ( message ) if res : counter += 1 return counter
def ReadList ( self , * branches , ** kwargs ) : """Same as ` phi . dsl . Expression . List ` but any string argument ` x ` is translated to ` Read ( x ) ` ."""
branches = map ( lambda x : E . Read ( x ) if isinstance ( x , str ) else x , branches ) return self . List ( * branches , ** kwargs )
def delete ( self ) : """Destructor ."""
if self . glucose : pysolvers . glucose3_del ( self . glucose ) self . glucose = None if self . prfile : self . prfile . close ( )
def get_xy_array ( x_segment , y_segment ) : """input : x _ segment , y _ segment output : xy _ segment , ( format : [ ( x [ 0 ] , y [ 0 ] ) , ( x [ 1 ] , y [ 1 ] ) ]"""
xy_array = [ ] for num , x in enumerate ( x_segment ) : xy_array . append ( ( x , y_segment [ num ] ) ) return xy_array
def xml_marshal_complete_multipart_upload ( uploaded_parts ) : """Marshal ' s complete multipart upload request based on * uploaded _ parts * . : param uploaded _ parts : List of all uploaded parts , ordered by part number . : return : Marshalled XML data ."""
root = s3_xml . Element ( 'CompleteMultipartUpload' , { 'xmlns' : _S3_NAMESPACE } ) for uploaded_part in uploaded_parts : part_number = uploaded_part . part_number part = s3_xml . SubElement ( root , 'Part' ) part_num = s3_xml . SubElement ( part , 'PartNumber' ) part_num . text = str ( part_number ) etag = s3_xml . SubElement ( part , 'ETag' ) etag . text = '"' + uploaded_part . etag + '"' data = io . BytesIO ( ) s3_xml . ElementTree ( root ) . write ( data , encoding = None , xml_declaration = False ) return data . getvalue ( )
def from_record ( cls , record ) : """Factory methods to create Record from pymarc . Record object ."""
if not isinstance ( record , pymarc . Record ) : raise TypeError ( 'record must be of type pymarc.Record' ) record . __class__ = Record return record
def euler_options ( fn ) : """Decorator to link CLI options with their appropriate functions"""
euler_functions = cheat , generate , preview , skip , verify , verify_all # Reverse functions to print help page options in alphabetical order for option in reversed ( euler_functions ) : name , docstring = option . __name__ , option . __doc__ kwargs = { 'flag_value' : option , 'help' : docstring } # Apply flag ( s ) depending on whether or not name is a single word flag = '--%s' % name . replace ( '_' , '-' ) flags = [ flag ] if '_' in name else [ flag , '-%s' % name [ 0 ] ] fn = click . option ( 'option' , * flags , ** kwargs ) ( fn ) return fn
def generate_basic ( self ) : """RFC 2617."""
from base64 import b64encode if not self . basic_auth : creds = self . username + ':' + self . password self . basic_auth = 'Basic ' self . basic_auth += b64encode ( creds . encode ( 'UTF-8' ) ) . decode ( 'UTF-8' ) return self . basic_auth
def extract_version ( ) : """Extract the version from the package ."""
with open ( 'pdftools/__init__.py' , 'r' ) as f : content = f . read ( ) version_match = _version_re . search ( content ) version = str ( ast . literal_eval ( version_match . group ( 1 ) ) ) return version
def _Open ( self , path_spec , mode = 'rb' ) : """Opens the file system object defined by path specification . Args : path _ spec ( PathSpec ) : path specification of the file system . mode ( Optional [ str ] ) : file access mode . The default is ' rb ' which represents read - only binary . Raises : AccessError : if the access to open the file was denied . IOError : if the file system object could not be opened . PathSpecError : if the path specification is incorrect . ValueError : if the path specification is invalid ."""
if not path_spec . HasParent ( ) : raise errors . PathSpecError ( 'Unsupported path specification without parent.' ) file_object = resolver . Resolver . OpenFileObject ( path_spec . parent , resolver_context = self . _resolver_context ) try : zip_file = zipfile . ZipFile ( file_object , 'r' ) except : file_object . close ( ) raise self . _file_object = file_object self . _zip_file = zip_file