signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def factory ( cls , config , db ) : """Given a configuration and database , select and return an appropriate instance of a subclass of GetBucketKey . This will ensure that both client and server support are available for the Lua script feature of Redis , and if not , a lock will be used . : param config : A dictionary of compactor options . : param db : A database handle for the Redis database . : returns : An instance of a subclass of GetBucketKey , dependent on the support for the Lua script feature of Redis ."""
# Make sure that the client supports register _ script ( ) if not hasattr ( db , 'register_script' ) : LOG . debug ( "Redis client does not support register_script()" ) return GetBucketKeyByLock ( config , db ) # OK , the client supports register _ script ( ) ; what about the # server ? info = db . info ( ) if version_greater ( '2.6' , info [ 'redis_version' ] ) : LOG . debug ( "Redis server supports register_script()" ) return GetBucketKeyByScript ( config , db ) # OK , use our fallback . . . LOG . debug ( "Redis server does not support register_script()" ) return GetBucketKeyByLock ( config , db )
def set_bytes ( self , data , addr , ** kwargs ) : '''Writing bytes of any arbitrary size Parameters data : iterable The data ( byte array ) to be written . addr : int The register address . Returns nothing'''
self . _intf . write ( self . _conf [ 'base_addr' ] + addr , data )
def _parsecsv ( x ) : """Deserialize file - like object containing csv to a Python generator ."""
for line in x : # decode as utf - 8 , whitespace - strip and split on delimiter yield line . decode ( 'utf-8' ) . strip ( ) . split ( config . DELIMITER )
def guard_sample ( analysis_request ) : """Returns whether ' sample ' transition can be performed or not . Returns True only if the analysis request has the DateSampled and Sampler set or if the user belongs to the Samplers group"""
if analysis_request . getDateSampled ( ) and analysis_request . getSampler ( ) : return True current_user = api . get_current_user ( ) return "Sampler" in current_user . getRolesInContext ( analysis_request )
def from_obj ( cls , container , file_obj ) : """Create from regular info object ."""
# RFC 1123 : Thu , 07 Jun 2007 18:57:07 GMT return cls ( container , name = file_obj . name , size = file_obj . size , content_type = file_obj . content_type , last_modified = dt_from_header ( file_obj . last_modified ) , obj_type = cls . choose_type ( file_obj . content_type ) )
def generate_subsets ( lst ) : """Generate all possible subsets of a list . > > > generate _ subsets ( [ 10 , 20 , 30 , 40 ] ) [ [ ] , [ 10 ] , [ 20 ] , [ 30 ] , [ 40 ] , [ 10 , 20 ] , [ 10 , 30 ] , [ 10 , 40 ] , [ 20 , 30 ] , [ 20 , 40 ] , [ 30 , 40 ] , [ 10 , 20 , 30 ] , [ 10 , 20 , 40 ] , [ 10 , 30 , 40 ] , [ 20 , 30 , 40 ] , [ 10 , 20 , 30 , 40 ] ] > > > generate _ subsets ( [ ' X ' , ' Y ' , ' Z ' ] ) [ [ ] , [ ' X ' ] , [ ' Y ' ] , [ ' Z ' ] , [ ' X ' , ' Y ' ] , [ ' X ' , ' Z ' ] , [ ' Y ' , ' Z ' ] , [ ' X ' , ' Y ' , ' Z ' ] ] > > > generate _ subsets ( [ 1 , 2 , 3 ] ) [ [ ] , [ 1 ] , [ 2 ] , [ 3 ] , [ 1 , 2 ] , [ 1 , 3 ] , [ 2 , 3 ] , [ 1 , 2 , 3 ] ]"""
from itertools import combinations sub_list = [ ] for i in range ( len ( lst ) + 1 ) : temp = [ list ( j ) for j in combinations ( lst , i ) ] if temp : sub_list . extend ( temp ) return sub_list
def sort_marks ( marks ) : """This function sorts a list of tuples using a lambda function as the key . It organizes the list based on the second element of each tuple . Sample usages of the function are as follows : > > > sort _ marks ( [ ( ' English ' , 88 ) , ( ' Science ' , 90 ) , ( ' Maths ' , 97 ) , ( ' Social sciences ' , 82 ) ] ) outputs : [ ( ' Social sciences ' , 82 ) , ( ' English ' , 88 ) , ( ' Science ' , 90 ) , ( ' Maths ' , 97 ) ] > > > sort _ marks ( [ ( ' Telugu ' , 49 ) , ( ' Hindhi ' , 54 ) , ( ' Social ' , 33 ) ] ) outputs : [ ( ' Social ' , 33 ) , ( ' Telugu ' , 49 ) , ( ' Hindhi ' , 54 ) ] > > > sort _ marks ( [ ( ' Physics ' , 96 ) , ( ' Chemistry ' , 97 ) , ( ' Biology ' , 45 ) ] ) outputs : [ ( ' Biology ' , 45 ) , ( ' Physics ' , 96 ) , ( ' Chemistry ' , 97 ) ]"""
marks . sort ( key = lambda x : x [ 1 ] ) return marks
def project_shape ( self , projection : Union [ pyproj . Proj , crs . Projection , None ] = None ) -> base . BaseGeometry : """Projection for a decent representation of the structure . By default , an equivalent projection is applied . Equivalent projections locally respect areas , which is convenient for the area attribute ."""
if self . shape is None : return None if isinstance ( projection , crs . Projection ) : projection = pyproj . Proj ( projection . proj4_init ) if projection is None : bounds = self . bounds projection = pyproj . Proj ( proj = "aea" , # equivalent projection lat_1 = bounds [ 1 ] , lat_2 = bounds [ 3 ] , lat_0 = ( bounds [ 1 ] + bounds [ 3 ] ) / 2 , lon_0 = ( bounds [ 0 ] + bounds [ 2 ] ) / 2 , ) projected_shape = transform ( partial ( pyproj . transform , pyproj . Proj ( init = "EPSG:4326" ) , projection ) , self . shape , ) if not projected_shape . is_valid : warnings . warn ( "The chosen projection is invalid for current shape" ) return projected_shape
def submit ( self , command = 'sleep 1' , blocksize = 1 , job_name = "parsl.auto" ) : """Submit command to an Azure instance . Submit returns an ID that corresponds to the task that was just submitted . Parameters command : str Command to be invoked on the remote side . blocksize : int Number of blocks requested . job _ name : str Prefix for job name . Returns None or str If at capacity ( no more can be provisioned ) , None is returned . Otherwise , an identifier for the job is returned ."""
job_name = "parsl.auto.{0}" . format ( time . time ( ) ) [ instance , * rest ] = self . deployer . deploy ( command = command , job_name = job_name , blocksize = 1 ) if not instance : logger . error ( "Failed to submit request to Azure" ) return None logger . debug ( "Started instance_id: {0}" . format ( instance . instance_id ) ) state = translate_table . get ( instance . state [ 'Name' ] , "PENDING" ) self . resources [ instance . instance_id ] = { "job_id" : instance . instance_id , "instance" : instance , "status" : state } return instance . instance_id
def get_host_for_command ( self , command , args ) : """Returns the host this command should be executed against ."""
return self . get_host_for_key ( self . get_key ( command , args ) )
def _set_position ( self , value ) : """Subclasses may override this method ."""
pX , pY = self . position x , y = value dX = x - pX dY = y - pY self . moveBy ( ( dX , dY ) )
def _get_abbreviations ( input_string , output_length = 0 ) : """Generates abbreviations for input _ string : param input _ string : str , name of object : param output _ length : int , optional specific length of abbreviations , default is off : return : list ( str ) , list of all combinations that include the first letter ( possible abbreviations )"""
for i , j in itertools . combinations ( range ( len ( input_string [ 1 : ] ) + 1 ) , 2 ) : abbr = input_string [ 0 ] + input_string [ 1 : ] [ i : j ] if len ( abbr ) >= output_length : yield abbr elif output_length == 0 : yield abbr # Have to add the solitary letter as well if not output_length or output_length == 1 : yield input_string [ 0 ]
def sync2 ( queryset , model_objs , unique_fields , update_fields = None , returning = False , ignore_duplicate_updates = True ) : """Performs a sync operation on a queryset , making the contents of the queryset match the contents of model _ objs . Note : The definition of a sync requires that we return untouched rows from the upsert opertion . There is no way to turn off returning untouched rows in a sync . Args : queryset ( Model | QuerySet ) : A model or a queryset that defines the collection to sync model _ objs ( List [ Model ] ) : A list of Django models to sync . All models in this list will be bulk upserted and any models not in the table ( or queryset ) will be deleted if sync = True . unique _ fields ( List [ str ] ) : A list of fields that define the uniqueness of the model . The model must have a unique constraint on these fields update _ fields ( List [ str ] , default = None ) : A list of fields to update whenever objects already exist . If an empty list is provided , it is equivalent to doing a bulk insert on the objects that don ' t exist . If ` None ` , all fields will be updated . returning ( bool | List [ str ] ) : If True , returns all fields . If a list , only returns fields in the list . Return values are split in a tuple of created , updated , and deleted models . ignore _ duplicate _ updates ( bool , default = False ) : Ignore updating a row in the upsert if all of the update fields are duplicates Returns : UpsertResult : A list of results if ` ` returning ` ` is not ` ` False ` ` . created , updated , untouched , and deleted results can be obtained by accessing the ` ` created ` ` , ` ` updated ` ` , ` ` untouched ` ` , and ` ` deleted ` ` properties of the result ."""
results = upsert2 . upsert ( queryset , model_objs , unique_fields , update_fields = update_fields , returning = returning , sync = True , ignore_duplicate_updates = ignore_duplicate_updates ) post_bulk_operation . send ( sender = queryset . model , model = queryset . model ) return results
def write ( self , container : Container , filepath : str , contents : str ) -> str : """Reads the contents of a given file belonging to a container ."""
logger . debug ( "writing to file [%s] inside container [%s]" , filepath , container . id ) filepath = self . _resolve_path ( container , filepath ) # write the file contents to a temporary file on the host before # copying that file to the container ( _ , fn_host ) = tempfile . mkstemp ( suffix = '.bugzoo' ) try : with open ( fn_host , 'w' ) as fh : fh . write ( contents ) self . __mgr_ctr . copy_to ( container , fn_host , filepath ) finally : os . remove ( fn_host ) logger . debug ( "wrote to file [%s] inside container [%s]" , filepath , container . id )
def all_values ( * values ) : """Returns the ` logical conjunction ` _ of all supplied values ( the result is only : data : ` True ` if and only if all input values are simultaneously : data : ` True ` ) . One or more * values * can be specified . For example , to light an : class : ` ~ gpiozero . LED ` only when * both * buttons are pressed : : from gpiozero import LED , Button from gpiozero . tools import all _ values from signal import pause led = LED ( 4) btn1 = Button ( 20) btn2 = Button ( 21) led . source = all _ values ( btn1 , btn2) pause ( ) . . _ logical conjunction : https : / / en . wikipedia . org / wiki / Logical _ conjunction"""
print ( "here" ) values = [ _normalize ( v ) for v in values ] for v in zip ( * values ) : yield all ( v )
def process_raw_data ( cls , raw_data ) : """Create a new model using raw API response ."""
properties = raw_data [ "properties" ] raw_content = properties . get ( "accessControlList" , None ) if raw_content is not None : resource = Resource . from_raw_data ( raw_content ) properties [ "accessControlList" ] = resource # TODO ( alexcoman ) : Add model for ServiceInsertion raw_content = properties . get ( "serviceInsertion" , None ) if raw_content is not None : resource = Resource . from_raw_data ( raw_content ) properties [ "serviceInsertion" ] = resource raw_content = properties . get ( "routeTable" , None ) if raw_content is not None : resource = Resource . from_raw_data ( raw_content ) properties [ "routeTable" ] = resource ip_configurations = [ ] for raw_config in properties . get ( "ipConfigurations" , [ ] ) : ip_configurations . append ( Resource . from_raw_data ( raw_config ) ) properties [ "ipConfigurations" ] = ip_configurations return super ( SubNetworks , cls ) . process_raw_data ( raw_data )
def decimate ( self , fraction = 0.5 , N = None , boundaries = False , verbose = True ) : """Downsample the number of vertices in a mesh . : param float fraction : the desired target of reduction . : param int N : the desired number of final points ( * * fraction * * is recalculated based on it ) . : param bool boundaries : ( True ) , decide whether to leave boundaries untouched or not . . . note : : Setting ` ` fraction = 0.1 ` ` leaves 10 % of the original nr of vertices . . . hint : : | skeletonize | | skeletonize . py | _"""
poly = self . polydata ( True ) if N : # N = desired number of points Np = poly . GetNumberOfPoints ( ) fraction = float ( N ) / Np if fraction >= 1 : return self decimate = vtk . vtkDecimatePro ( ) decimate . SetInputData ( poly ) decimate . SetTargetReduction ( 1 - fraction ) decimate . PreserveTopologyOff ( ) if boundaries : decimate . BoundaryVertexDeletionOff ( ) else : decimate . BoundaryVertexDeletionOn ( ) decimate . Update ( ) if verbose : print ( "Nr. of pts, input:" , poly . GetNumberOfPoints ( ) , end = "" ) print ( " output:" , decimate . GetOutput ( ) . GetNumberOfPoints ( ) ) return self . updateMesh ( decimate . GetOutput ( ) )
def recordHostname ( self , basedir ) : "Record my hostname in twistd . hostname , for user convenience"
log . msg ( "recording hostname in twistd.hostname" ) filename = os . path . join ( basedir , "twistd.hostname" ) try : hostname = os . uname ( ) [ 1 ] # only on unix except AttributeError : # this tends to fail on non - connected hosts , e . g . , laptops # on planes hostname = socket . getfqdn ( ) try : with open ( filename , "w" ) as f : f . write ( "{0}\n" . format ( hostname ) ) except Exception : log . msg ( "failed - ignoring" )
def POST_AUTH ( self , courseid ) : # pylint : disable = arguments - differ """POST request"""
course , __ = self . get_course_and_check_rights ( courseid ) data = web . input ( task = [ ] ) if "task" in data : # Change tasks order for index , taskid in enumerate ( data [ "task" ] ) : try : task = self . task_factory . get_task_descriptor_content ( courseid , taskid ) task [ "order" ] = index self . task_factory . update_task_descriptor_content ( courseid , taskid , task ) except : pass return self . page ( course )
def _create_tensor_summary ( name , true_positive_counts , false_positive_counts , true_negative_counts , false_negative_counts , precision , recall , num_thresholds = None , display_name = None , description = None , collections = None ) : """A private helper method for generating a tensor summary . We use a helper method instead of having ` op ` directly call ` raw _ data _ op ` to prevent the scope of ` raw _ data _ op ` from being embedded within ` op ` . Arguments are the same as for raw _ data _ op . Returns : A tensor summary that collects data for PR curves ."""
# TODO ( nickfelt ) : remove on - demand imports once dep situation is fixed . import tensorflow . compat . v1 as tf # Store the number of thresholds within the summary metadata because # that value is constant for all pr curve summaries with the same tag . summary_metadata = metadata . create_summary_metadata ( display_name = display_name if display_name is not None else name , description = description or '' , num_thresholds = num_thresholds ) # Store values within a tensor . We store them in the order : # true positives , false positives , true negatives , false # negatives , precision , and recall . combined_data = tf . stack ( [ tf . cast ( true_positive_counts , tf . float32 ) , tf . cast ( false_positive_counts , tf . float32 ) , tf . cast ( true_negative_counts , tf . float32 ) , tf . cast ( false_negative_counts , tf . float32 ) , tf . cast ( precision , tf . float32 ) , tf . cast ( recall , tf . float32 ) ] ) return tf . summary . tensor_summary ( name = 'pr_curves' , tensor = combined_data , collections = collections , summary_metadata = summary_metadata )
def _get_geom_type ( type_bytes ) : """Get the GeoJSON geometry type label from a WKB type byte string . : param type _ bytes : 4 byte string in big endian byte order containing a WKB type number . It may also contain a " has SRID " flag in the high byte ( the first type , since this is big endian byte order ) , indicated as 0x20 . If the SRID flag is not set , the high byte will always be null ( 0x00 ) . : returns : 3 - tuple ofGeoJSON geometry type label , the bytes resprenting the geometry type , and a separate " has SRID " flag . If the input ` type _ bytes ` contains an SRID flag , it will be removed . > > > # Z Point , with SRID flag > > > _ get _ geom _ type ( b ' \\ x20 \\ x00 \\ x03 \\ xe9 ' ) = = ( . . . ' Point ' , b ' \\ x00 \\ x00 \\ x03 \\ xe9 ' , True ) True > > > # 2D MultiLineString , without SRID flag > > > _ get _ geom _ type ( b ' \\ x00 \\ x00 \\ x00 \\ x05 ' ) = = ( . . . ' MultiLineString ' , b ' \\ x00 \\ x00 \\ x00 \\ x05 ' , False ) True"""
# slice off the high byte , which may contain the SRID flag high_byte = type_bytes [ 0 ] if six . PY3 : high_byte = bytes ( [ high_byte ] ) has_srid = high_byte == b'\x20' if has_srid : # replace the high byte with a null byte type_bytes = as_bin_str ( b'\x00' + type_bytes [ 1 : ] ) else : type_bytes = as_bin_str ( type_bytes ) # look up the geometry type geom_type = _BINARY_TO_GEOM_TYPE . get ( type_bytes ) return geom_type , type_bytes , has_srid
def spin_data ( self ) : """The data decomposed into actual spin data as { spin : data } . Essentially , this provides the actual Spin . up and Spin . down data instead of the total and diff . Note that by definition , a non - spin - polarized run would have Spin . up data = = Spin . down data ."""
if not self . _spin_data : spin_data = dict ( ) spin_data [ Spin . up ] = 0.5 * ( self . data [ "total" ] + self . data . get ( "diff" , 0 ) ) spin_data [ Spin . down ] = 0.5 * ( self . data [ "total" ] - self . data . get ( "diff" , 0 ) ) self . _spin_data = spin_data return self . _spin_data
def GET ( self , path ) : """Raw GET to the MISP server : param path : URL fragment ( ie / events / ) : returns : HTTP raw content ( as seen by : class : ` requests . Response ` )"""
url = self . _absolute_url ( path ) resp = requests . get ( url , headers = self . headers , verify = self . verify_ssl ) if resp . status_code != 200 : raise MispTransportError ( 'GET %s: returned status=%d' , path , resp . status_code ) return resp . content
def _auth ( self ) : """Creates hash from api keys and returns all required parametsrs : returns : str - - URL encoded query parameters containing " ts " , " apikey " , and " hash " """
ts = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d%H:%M:%S" ) hash_string = hashlib . md5 ( "%s%s%s" % ( ts , self . private_key , self . public_key ) ) . hexdigest ( ) return "ts=%s&apikey=%s&hash=%s" % ( ts , self . public_key , hash_string )
def forget ( self , key ) : """Remove an item from the cache . : param key : The cache key : type key : str : rtype : bool"""
path = self . _path ( key ) if os . path . exists ( path ) : os . remove ( path ) return True return False
def _diff ( state_data , resource_object ) : '''helper method to compare salt state info with the PagerDuty API json structure , and determine if we need to update . returns the dict to pass to the PD API to perform the update , or empty dict if no update .'''
state_data [ 'id' ] = resource_object [ 'schedule' ] [ 'id' ] objects_differ = None # first check all the easy top - level properties : everything except the schedule _ layers . for k , v in state_data [ 'schedule' ] . items ( ) : if k == 'schedule_layers' : continue if v != resource_object [ 'schedule' ] [ k ] : objects_differ = '{0} {1} {2}' . format ( k , v , resource_object [ 'schedule' ] [ k ] ) break # check schedule _ layers if not objects_differ : for layer in state_data [ 'schedule' ] [ 'schedule_layers' ] : # find matching layer name resource_layer = None for resource_layer in resource_object [ 'schedule' ] [ 'schedule_layers' ] : found = False if layer [ 'name' ] == resource_layer [ 'name' ] : found = True break if not found : objects_differ = 'layer {0} missing' . format ( layer [ 'name' ] ) break # set the id , so that we will update this layer instead of creating a new one layer [ 'id' ] = resource_layer [ 'id' ] # compare contents of layer and resource _ layer for k , v in layer . items ( ) : if k == 'users' : continue if k == 'start' : continue if v != resource_layer [ k ] : objects_differ = 'layer {0} key {1} {2} != {3}' . format ( layer [ 'name' ] , k , v , resource_layer [ k ] ) break if objects_differ : break # compare layer [ ' users ' ] if len ( layer [ 'users' ] ) != len ( resource_layer [ 'users' ] ) : objects_differ = 'num users in layer {0} {1} != {2}' . format ( layer [ 'name' ] , len ( layer [ 'users' ] ) , len ( resource_layer [ 'users' ] ) ) break for user1 in layer [ 'users' ] : found = False user2 = None for user2 in resource_layer [ 'users' ] : # deal with PD API bug : when you submit member _ order = N , you get back member _ order = N + 1 if user1 [ 'member_order' ] == user2 [ 'member_order' ] - 1 : found = True break if not found : objects_differ = 'layer {0} no one with member_order {1}' . format ( layer [ 'name' ] , user1 [ 'member_order' ] ) break if user1 [ 'user' ] [ 'id' ] != user2 [ 'user' ] [ 'id' ] : objects_differ = 'layer {0} user at member_order {1} {2} != {3}' . format ( layer [ 'name' ] , user1 [ 'member_order' ] , user1 [ 'user' ] [ 'id' ] , user2 [ 'user' ] [ 'id' ] ) break if objects_differ : return state_data else : return { }
def scan_modules ( self ) : """Populates the snapshot with loaded modules ."""
# The module filenames may be spoofed by malware , # since this information resides in usermode space . # See : http : / / www . ragestorm . net / blogs / ? p = 163 # Ignore special process IDs . # PID 0 : System Idle Process . Also has a special meaning to the # toolhelp APIs ( current process ) . # PID 4 : System Integrity Group . See this forum post for more info : # http : / / tinyurl . com / ycza8jo # ( points to social . technet . microsoft . com ) # Only on XP and above # PID 8 : System ( ? ) only in Windows 2000 and below AFAIK . # It ' s probably the same as PID 4 in XP and above . dwProcessId = self . get_pid ( ) if dwProcessId in ( 0 , 4 , 8 ) : return # It would seem easier to clear the snapshot first . # But then all open handles would be closed . found_bases = set ( ) with win32 . CreateToolhelp32Snapshot ( win32 . TH32CS_SNAPMODULE , dwProcessId ) as hSnapshot : me = win32 . Module32First ( hSnapshot ) while me is not None : lpBaseAddress = me . modBaseAddr fileName = me . szExePath # full pathname if not fileName : fileName = me . szModule # filename only if not fileName : fileName = None else : fileName = PathOperations . native_to_win32_pathname ( fileName ) found_bases . add ( lpBaseAddress ) # # if not self . has _ module ( lpBaseAddress ) : # XXX triggers a scan if lpBaseAddress not in self . __moduleDict : aModule = Module ( lpBaseAddress , fileName = fileName , SizeOfImage = me . modBaseSize , process = self ) self . _add_module ( aModule ) else : aModule = self . get_module ( lpBaseAddress ) if not aModule . fileName : aModule . fileName = fileName if not aModule . SizeOfImage : aModule . SizeOfImage = me . modBaseSize if not aModule . process : aModule . process = self me = win32 . Module32Next ( hSnapshot ) # # for base in self . get _ module _ bases ( ) : # XXX triggers a scan for base in compat . keys ( self . __moduleDict ) : if base not in found_bases : self . _del_module ( base )
def convert_datetimes_to_timestamps ( data , datetime_attrs ) : """Given a dictionary of data , and a dictionary of datetime attributes , return a new dictionary that converts any datetime attributes that may be present to their timestamped equivalent ."""
if not data : return data new_data = { } for key , value in data . items ( ) : if key in datetime_attrs and isinstance ( value , datetime ) : new_key = datetime_attrs [ key ] new_data [ new_key ] = timestamp_from_dt ( value ) else : new_data [ key ] = value return new_data
def iter_elements ( element_function , parent_to_parse , ** kwargs ) : """Applies element _ function to each of the sub - elements in parent _ to _ parse . The passed in function must take at least one element , and an optional list of kwargs which are relevant to each of the elements in the list : def elem _ func ( each _ elem , * * kwargs )"""
parent = get_element ( parent_to_parse ) if not hasattr ( element_function , '__call__' ) : return parent for child in ( [ ] if parent is None else parent ) : element_function ( child , ** kwargs ) return parent
def files ( self , pattern = None ) : """D . files ( ) - > List of the files in this directory . The elements of the list are Path objects . This does not walk into subdirectories ( see : meth : ` walkfiles ` ) . With the optional ` pattern ` argument , this only lists files whose names match the given pattern . For example , ` ` d . files ( ' * . pyc ' ) ` ` ."""
return [ p for p in self . listdir ( pattern ) if p . isfile ( ) ]
def encodeThetas ( self , theta1 , theta2 ) : """Return the SDR for theta1 and theta2"""
# print > > sys . stderr , " encoded theta1 value = " , theta1 # print > > sys . stderr , " encoded theta2 value = " , theta2 t1e = self . theta1Encoder . encode ( theta1 ) t2e = self . theta2Encoder . encode ( theta2 ) # print > > sys . stderr , " encoded theta1 = " , t1e . nonzero ( ) [ 0] # print > > sys . stderr , " encoded theta2 = " , t2e . nonzero ( ) [ 0] ex = numpy . outer ( t2e , t1e ) return ex . flatten ( ) . nonzero ( ) [ 0 ]
def add_edges ( self , edge_list , dataframe = True ) : """Add a all edges in edge _ list . : return : A data structure with Cytoscape SUIDs for the newly - created edges . : param edge _ list : List of ( source , target , interaction ) tuples * or * list of dicts with ' source ' , ' target ' , ' interaction ' , ' direction ' keys . : param dataframe : If dataframe is True ( default ) , return a Pandas DataFrame . If dataframe is False , return a list of dicts with keys ' SUID ' , ' source ' and ' target ' ."""
# It might be nice to have an option pass a list of dicts instead of list of tuples if not isinstance ( edge_list [ 0 ] , dict ) : edge_list = [ { 'source' : edge_tuple [ 0 ] , 'target' : edge_tuple [ 1 ] , 'interaction' : edge_tuple [ 2 ] } for edge_tuple in edge_list ] res = self . session . post ( self . __url + 'edges' , data = json . dumps ( edge_list ) , headers = HEADERS ) check_response ( res ) edges = res . json ( ) if dataframe : return pd . DataFrame ( edges ) . set_index ( [ 'SUID' ] ) else : return edges
def keyPressEvent ( self , event ) : """Listens for the left / right keys and the escape key to control the slides . : param event | < QtCore . Qt . QKeyEvent >"""
if event . key ( ) == QtCore . Qt . Key_Escape : self . cancel ( ) elif event . key ( ) == QtCore . Qt . Key_Left : self . goBack ( ) elif event . key ( ) == QtCore . Qt . Key_Right : self . goForward ( ) elif event . key ( ) == QtCore . Qt . Key_Home : self . restart ( ) super ( XWalkthroughWidget , self ) . keyPressEvent ( event )
def set_secure ( self , section , option , value ) : """Set an option and mark it as secure . Any subsequent uses of ' set ' or ' get ' will also now know that this option is secure as well ."""
if self . keyring_available : s_option = "%s%s" % ( section , option ) self . _unsaved [ s_option ] = ( 'set' , value ) value = self . _secure_placeholder ConfigParser . set ( self , section , option , value )
def terminate ( self , force = False ) : """This forces a child process to terminate ."""
if not self . isalive ( ) : return True self . kill ( signal . SIGINT ) time . sleep ( self . delayafterterminate ) if not self . isalive ( ) : return True if force : self . kill ( signal . SIGKILL ) time . sleep ( self . delayafterterminate ) if not self . isalive ( ) : return True else : return False
def next_sample ( self ) : """Helper function for reading in next sample ."""
if self . _allow_read is False : raise StopIteration if self . seq is not None : if self . cur < self . num_image : idx = self . seq [ self . cur ] else : if self . last_batch_handle != 'discard' : self . cur = 0 raise StopIteration self . cur += 1 if self . imgrec is not None : s = self . imgrec . read_idx ( idx ) header , img = recordio . unpack ( s ) if self . imglist is None : return header . label , img else : return self . imglist [ idx ] [ 0 ] , img else : label , fname = self . imglist [ idx ] return label , self . read_image ( fname ) else : s = self . imgrec . read ( ) if s is None : if self . last_batch_handle != 'discard' : self . imgrec . reset ( ) raise StopIteration header , img = recordio . unpack ( s ) return header . label , img
def create_session ( username , password ) : """Create a session for the user , and then return the key ."""
user = User . objects . get_user_by_password ( username , password ) auth_session_engine = get_config ( 'auth_session_engine' ) if not user : raise InvalidInput ( 'Username or password incorrect' ) session_key = random_string ( 15 ) while auth_session_engine . get ( session_key ) : session_key = random_string ( 15 ) auth_session_engine . set ( session_key , user . username , get_config ( 'auth_session_expire' ) ) return { 'session_key' : session_key , 'user' : user }
def _from_dict ( cls , _dict ) : """Initialize a VoiceModels object from a json dictionary ."""
args = { } if 'customizations' in _dict : args [ 'customizations' ] = [ VoiceModel . _from_dict ( x ) for x in ( _dict . get ( 'customizations' ) ) ] else : raise ValueError ( 'Required property \'customizations\' not present in VoiceModels JSON' ) return cls ( ** args )
def save ( filename = None , family = 'ipv4' ) : '''Save the current in - memory rules to disk CLI Example : . . code - block : : bash salt ' * ' iptables . save / etc / sysconfig / iptables IPv6: salt ' * ' iptables . save / etc / sysconfig / iptables family = ipv6'''
if _conf ( ) and not filename : filename = _conf ( family ) log . debug ( 'Saving rules to %s' , filename ) parent_dir = os . path . dirname ( filename ) if not os . path . isdir ( parent_dir ) : os . makedirs ( parent_dir ) cmd = '{0}-save' . format ( _iptables_cmd ( family ) ) ipt = __salt__ [ 'cmd.run' ] ( cmd ) # regex out the output if configured with filters if _conf_save_filters ( ) : ipt = _regex_iptables_save ( ipt ) out = __salt__ [ 'file.write' ] ( filename , ipt ) return out
def cloneNode ( self , deep : bool = False ) -> AbstractNode : """Return new copy of this node . If optional argument ` ` deep ` ` is specified and is True , new node has clones of child nodes of this node ( if presents ) ."""
if deep : return self . _clone_node_deep ( ) return self . _clone_node ( )
def set_extension ( self , ext ) : """RETURN NEW FILE WITH GIVEN EXTENSION"""
path = self . _filename . split ( "/" ) parts = path [ - 1 ] . split ( "." ) if len ( parts ) == 1 : parts . append ( ext ) else : parts [ - 1 ] = ext path [ - 1 ] = "." . join ( parts ) return File ( "/" . join ( path ) )
def get_family_hierarchy_session ( self , proxy ) : """Gets the ` ` OsidSession ` ` associated with the family hierarchy service . arg : proxy ( osid . proxy . Proxy ) : a proxy return : ( osid . relationship . FamilyHierarchySession ) - a ` ` FamilyHierarchySession ` ` for families raise : NullArgument - ` ` proxy ` ` is ` ` null ` ` raise : OperationFailed - unable to complete request raise : Unimplemented - ` ` supports _ family _ hierarchy ( ) ` ` is ` ` false ` ` * compliance : optional - - This method must be implemented if ` ` supports _ family _ hierarchy ( ) ` ` is ` ` true ` ` . *"""
if not self . supports_family_hierarchy ( ) : raise errors . Unimplemented ( ) # pylint : disable = no - member return sessions . FamilyHierarchySession ( proxy = proxy , runtime = self . _runtime )
def highlightjs_javascript ( jquery = None ) : """Return HTML for highlightjs JavaScript . Adjust url in settings . If no url is returned , we don ' t want this statement to return any HTML . This is intended behavior . Default value : ` ` None ` ` This value is configurable , see Settings section * * Tag name * * : : highlightjs _ javascript * * Parameters * * : : jquery : Truthy to include jQuery as well as highlightjs * * usage * * : : { % highlightjs _ javascript % } * * example * * : : { % highlightjs _ javascript jquery = 1 % }"""
javascript = '' # See if we have to include jQuery if jquery is None : jquery = get_highlightjs_setting ( 'include_jquery' , False ) if jquery : url = highlightjs_jquery_url ( ) if url : javascript += '<script src="{url}"></script>' . format ( url = url ) url = highlightjs_url ( ) if url : javascript += '<script src="{url}"></script>' . format ( url = url ) javascript += '<script>hljs.initHighlightingOnLoad();</script>' return javascript
def delete_feature ( self , feature_id , organism = None , sequence = None ) : """Delete a feature : type feature _ id : str : param feature _ id : Feature UUID : type organism : str : param organism : Organism Common Name : type sequence : str : param sequence : Sequence Name : rtype : dict : return : A standard apollo feature dictionary ( { " features " : [ { . . . } ] } )"""
data = { 'features' : [ { 'uniquename' : feature_id , } ] } data = self . _update_data ( data , organism , sequence ) return self . post ( 'deleteFeature' , data )
def time_to_text ( time ) : """Get a representative text of a time ( in s ) ."""
if time < 0.001 : return str ( round ( time * 1000000 ) ) + " µs" elif time < 1 : return str ( round ( time * 1000 ) ) + " ms" elif time < 60 : return str ( round ( time , 1 ) ) + " s" else : return str ( round ( time / 60 , 1 ) ) + " min"
def exec ( self , args ) : """todo : Docstring for exec : param args : arg description : type args : type description : return : : rtype :"""
logger . debug ( "status %s" , args . status ) self . status ( args . status )
def match ( self , item ) : """Return True if filter matches item ."""
tags = getattr ( item , self . _name ) or [ ] if self . _exact : # Equality check return self . _value == set ( tags ) else : # Is given tag in list ? return self . _value in tags
def to_bigquery_field ( self , name_case = DdlParseBase . NAME_CASE . original ) : """Generate BigQuery JSON field define"""
col_name = self . get_name ( name_case ) mode = self . bigquery_mode if self . array_dimensional <= 1 : # no or one dimensional array data type type = self . bigquery_legacy_data_type else : # multiple dimensional array data type type = "RECORD" fields = OrderedDict ( ) fields_cur = fields for i in range ( 1 , self . array_dimensional ) : is_last = True if i == self . array_dimensional - 1 else False fields_cur [ 'fields' ] = [ OrderedDict ( ) ] fields_cur = fields_cur [ 'fields' ] [ 0 ] fields_cur [ 'name' ] = "dimension_{}" . format ( i ) fields_cur [ 'type' ] = self . bigquery_legacy_data_type if is_last else "RECORD" fields_cur [ 'mode' ] = self . bigquery_mode if is_last else "REPEATED" col = OrderedDict ( ) col [ 'name' ] = col_name col [ 'type' ] = type col [ 'mode' ] = mode if self . array_dimensional > 1 : col [ 'fields' ] = fields [ 'fields' ] return json . dumps ( col )
def seconds_in_day_to_time ( seconds ) : """Decomposes atime of day into hour , minute and seconds components . Arguments seconds : int A time of day by the number of seconds passed since midnight . Returns datetime . time The corresponding time of day as a datetime . time object . Example > > > seconds _ in _ day _ to _ time ( 23430) datetime . time ( 6 , 30 , 30)"""
try : return time ( * decompose_seconds_in_day ( seconds ) ) except ValueError : print ( "Seconds = {}" . format ( seconds ) ) print ( "H = {}, M={}, S={}" . format ( * decompose_seconds_in_day ( seconds ) ) ) raise
def map_plugin_coro ( self , coro_name , * args , ** kwargs ) : """Call a plugin declared by plugin by its name : param coro _ name : : param args : : param kwargs : : return :"""
return ( yield from self . map ( self . _call_coro , coro_name , * args , ** kwargs ) )
def _cost_gp_withGradients ( self , x ) : """Predicts the time cost and its gradient of evaluating the function at x ."""
m , _ , dmdx , _ = self . cost_model . predict_withGradients ( x ) return np . exp ( m ) , np . exp ( m ) * dmdx
def MAU ( self ) : '''Result of preconditioned operator to deflation space , i . e . , : math : ` MM _ lAM _ rU ` .'''
if self . _MAU is None : self . _MAU = self . linear_system . M * self . AU return self . _MAU
def profiling ( ) : """Runs a profiling test for the function ` ` lyap _ e ` ` ( mainly used for development ) This function requires the package ` ` cProfile ` ` ."""
import cProfile n = 10000 data = np . cumsum ( np . random . random ( n ) - 0.5 ) cProfile . runctx ( 'lyap_e(data)' , { 'lyap_e' : nolds . lyap_e } , { 'data' : data } )
def new ( self , repo_type , name = None , make_default = False , repository_class = None , aggregate_class = None , configuration = None ) : """Creates a new repository of the given type . If the root repository domain ( see : class : ` everest . repositories . constants . REPOSITORY _ DOMAINS ` ) is passed as a repository name , the type string is used as the name ; if no name is passed , a unique name is created automatically ."""
if name == REPOSITORY_DOMAINS . ROOT : # Unless explicitly configured differently , all root repositories # join the transaction . join_transaction = True autocommit = False name = repo_type else : join_transaction = False if name is None : name = "%s%d" % ( repo_type , next ( self . __repo_id_gen ) ) # The system repository is special in that its repository # should not join the transaction but still commit all changes . autocommit = name == REPOSITORY_DOMAINS . SYSTEM if repository_class is None : reg = get_current_registry ( ) repository_class = reg . queryUtility ( IRepository , name = repo_type ) if repository_class is None : raise ValueError ( 'Unknown repository type "%s".' % repo_type ) repo = repository_class ( name , aggregate_class , join_transaction = join_transaction , autocommit = autocommit ) if not configuration is None : repo . configure ( ** configuration ) if make_default : self . __default_repo = repo return repo
def jsonify_error ( message_or_exception , status_code = 400 ) : """Returns a JSON payload that indicates the request had an error ."""
if isinstance ( message_or_exception , Exception ) : message = '%s: %s' % ( message_or_exception . __class__ . __name__ , message_or_exception ) else : message = message_or_exception logging . debug ( 'Returning status=%s, error message: %s' , status_code , message ) response = jsonify ( error = message ) response . status_code = status_code return response
def satisfiable ( self , extra_constraints = ( ) , exact = None ) : """This function does a constraint check and checks if the solver is in a sat state . : param extra _ constraints : Extra constraints ( as ASTs ) to add to s for this solve : param exact : If False , return approximate solutions . : return : True if sat , otherwise false"""
if exact is False and o . VALIDATE_APPROXIMATIONS in self . state . options : er = self . _solver . satisfiable ( extra_constraints = self . _adjust_constraint_list ( extra_constraints ) ) ar = self . _solver . satisfiable ( extra_constraints = self . _adjust_constraint_list ( extra_constraints ) , exact = False ) if er is True : assert ar is True return ar return self . _solver . satisfiable ( extra_constraints = self . _adjust_constraint_list ( extra_constraints ) , exact = exact )
def peered ( name ) : '''Check if node is peered . name The remote host with which to peer . . . code - block : : yaml peer - cluster : glusterfs . peered : - name : two peer - clusters : glusterfs . peered : - names : - one - two - three - four'''
ret = { 'name' : name , 'changes' : { } , 'comment' : '' , 'result' : False } try : suc . check_name ( name , 'a-zA-Z0-9._-' ) except SaltCloudException : ret [ 'comment' ] = 'Invalid characters in peer name.' return ret # Check if the name resolves to one of this minion IP addresses name_ips = salt . utils . network . host_to_ips ( name ) if name_ips is not None : # if it is None , it means resolution fails , let ' s not hide # it from the user . this_ips = set ( salt . utils . network . ip_addrs ( ) ) this_ips . update ( salt . utils . network . ip_addrs6 ( ) ) if this_ips . intersection ( name_ips ) : ret [ 'result' ] = True ret [ 'comment' ] = 'Peering with localhost is not needed' return ret peers = __salt__ [ 'glusterfs.peer_status' ] ( ) if peers and any ( name in v [ 'hostnames' ] for v in peers . values ( ) ) : ret [ 'result' ] = True ret [ 'comment' ] = 'Host {0} already peered' . format ( name ) return ret if __opts__ [ 'test' ] : ret [ 'comment' ] = 'Peer {0} will be added.' . format ( name ) ret [ 'result' ] = None return ret if not __salt__ [ 'glusterfs.peer' ] ( name ) : ret [ 'comment' ] = 'Failed to peer with {0}, please check logs for errors' . format ( name ) return ret # Double check that the action succeeded newpeers = __salt__ [ 'glusterfs.peer_status' ] ( ) if newpeers and any ( name in v [ 'hostnames' ] for v in newpeers . values ( ) ) : ret [ 'result' ] = True ret [ 'comment' ] = 'Host {0} successfully peered' . format ( name ) ret [ 'changes' ] = { 'new' : newpeers , 'old' : peers } else : ret [ 'comment' ] = 'Host {0} was successfully peered but did not appear in the list of peers' . format ( name ) return ret
def get_key ( self , path , geometry , filters , options ) : """Generates the thumbnail ' s key from it ' s arguments . If the arguments doesn ' t change the key will not change"""
seed = u' ' . join ( [ str ( path ) , str ( geometry ) , str ( filters ) , str ( options ) , ] ) . encode ( 'utf8' ) return md5 ( seed ) . hexdigest ( )
def compile_id_list ( self , polygon_id_list , nr_of_polygons ) : """sorts the polygons _ id list from least to most occurrences of the zone ids ( - > speed up ) only 4.8 % of all shortcuts include polygons from more than one zone but only for about 0.4 % sorting would be beneficial ( zones have different frequencies ) in most of those cases there are only two types of zones ( = entries in counted _ zones ) and one of them has only one entry . the polygon lists of all single shortcut are already sorted ( during compilation of the binary files ) sorting should be used for closest _ timezone _ at ( ) , because only in that use case the polygon lists are quite long ( multiple shortcuts are being checked simultaneously ) . : param polygon _ id _ list : : param nr _ of _ polygons : length of polygon _ id _ list : return : sorted list of polygon _ ids , sorted list of zone _ ids , boolean : do all entries belong to the same zone"""
# TODO functional def all_equal ( iterable ) : x = None for x in iterable : # first _ val = x break for y in iterable : if x != y : return False return True zone_id_list = empty ( [ nr_of_polygons ] , dtype = DTYPE_FORMAT_H_NUMPY ) counted_zones = { } for pointer_local , polygon_id in enumerate ( polygon_id_list ) : zone_id = self . id_of ( polygon_id ) zone_id_list [ pointer_local ] = zone_id try : counted_zones [ zone_id ] += 1 except KeyError : counted_zones [ zone_id ] = 1 if len ( counted_zones ) == 1 : # there is only one zone . no sorting needed . return polygon_id_list , zone_id_list , True if all_equal ( list ( counted_zones . values ( ) ) ) : # all the zones have the same amount of polygons . no sorting needed . return polygon_id_list , zone_id_list , False counted_zones_sorted = sorted ( list ( counted_zones . items ( ) ) , key = lambda zone : zone [ 1 ] ) sorted_polygon_id_list = empty ( [ nr_of_polygons ] , dtype = DTYPE_FORMAT_H_NUMPY ) sorted_zone_id_list = empty ( [ nr_of_polygons ] , dtype = DTYPE_FORMAT_H_NUMPY ) pointer_output = 0 for zone_id , amount in counted_zones_sorted : # write all polygons from this zone in the new list pointer_local = 0 detected_polygons = 0 while detected_polygons < amount : if zone_id_list [ pointer_local ] == zone_id : # the polygon at the pointer has the wanted zone _ id detected_polygons += 1 sorted_polygon_id_list [ pointer_output ] = polygon_id_list [ pointer_local ] sorted_zone_id_list [ pointer_output ] = zone_id pointer_output += 1 pointer_local += 1 return sorted_polygon_id_list , sorted_zone_id_list , False
def payload ( self ) : """The payload property automatically decodes the encapsulated data ."""
if self . args_rdf_name : # Now try to create the correct RDFValue . result_cls = self . classes . get ( self . args_rdf_name , rdfvalue . RDFString ) result = result_cls . FromSerializedString ( self . Get ( "args" ) , age = self . args_age ) return result
def _init_glyph ( self , plot , mapping , properties ) : """Returns a Bokeh glyph object ."""
ret = super ( SideHistogramPlot , self ) . _init_glyph ( plot , mapping , properties ) if not 'field' in mapping . get ( 'fill_color' , { } ) : return ret dim = mapping [ 'fill_color' ] [ 'field' ] sources = self . adjoined . traverse ( lambda x : ( x . handles . get ( 'color_dim' ) , x . handles . get ( 'source' ) ) ) sources = [ src for cdim , src in sources if cdim == dim ] tools = [ t for t in self . handles [ 'plot' ] . tools if isinstance ( t , BoxSelectTool ) ] if not tools or not sources : return box_select , main_source = tools [ 0 ] , sources [ 0 ] handles = { 'color_mapper' : self . handles [ 'color_mapper' ] , 'source' : self . handles [ 'source' ] , 'cds' : self . handles [ 'source' ] , 'main_source' : main_source } axis = 'y' if self . invert_axes else 'x' callback = self . _callback . format ( axis = axis ) if box_select . callback : box_select . callback . code += callback box_select . callback . args . update ( handles ) else : box_select . callback = CustomJS ( args = handles , code = callback ) return ret
def unpack_from ( self , buff , offset = 0 ) : """Read bytes from a buffer and return as a namedtuple ."""
return self . _create ( super ( NamedStruct , self ) . unpack_from ( buff , offset ) )
def get_available_name ( self , name ) : """Returns a filename that ' s free on the target storage system , and available for new content to be written to ."""
dir_name , file_name = os . path . split ( name ) file_root , file_ext = os . path . splitext ( file_name ) # If the filename already exists , add an underscore and a number ( before # the file extension , if one exists ) to the filename until the generated # filename doesn ' t exist . count = itertools . count ( 1 ) while self . exists ( name ) : # file _ ext includes the dot . name = os . path . join ( dir_name , "%s_%s%s" % ( file_root , next ( count ) , file_ext ) ) return name
def _check_required_fields ( self , object_type , ignore_fields ) : # type : ( str , List [ str ] ) - > None """Helper method to check that metadata for HDX object is complete Args : ignore _ fields ( List [ str ] ) : Any fields to ignore in the check Returns : None"""
for field in self . configuration [ object_type ] [ 'required_fields' ] : if field not in self . data and field not in ignore_fields : raise HDXError ( 'Field %s is missing in %s!' % ( field , object_type ) )
def _get_options_group ( group = None ) : """Get a specific group of options which are allowed ."""
# : These expect a hexidecimal keyid as their argument , and can be parsed # : with : func : ` _ is _ hex ` . hex_options = frozenset ( [ '--check-sigs' , '--default-key' , '--default-recipient' , '--delete-keys' , '--delete-secret-keys' , '--delete-secret-and-public-keys' , '--desig-revoke' , '--export' , '--export-secret-keys' , '--export-secret-subkeys' , '--fingerprint' , '--gen-revoke' , '--hidden-encrypt-to' , '--hidden-recipient' , '--list-key' , '--list-keys' , '--list-public-keys' , '--list-secret-keys' , '--list-sigs' , '--recipient' , '--recv-keys' , '--send-keys' , '--edit-key' , '--sign-key' , ] ) # : These options expect value which are left unchecked , though still run # : through : func : ` _ fix _ unsafe ` . unchecked_options = frozenset ( [ '--list-options' , '--passphrase-fd' , '--status-fd' , '--verify-options' , '--command-fd' , ] ) # : These have their own parsers and don ' t really fit into a group other_options = frozenset ( [ '--debug-level' , '--keyserver' , ] ) # : These should have a directory for an argument dir_options = frozenset ( [ '--homedir' , ] ) # : These expect a keyring or keyfile as their argument keyring_options = frozenset ( [ '--keyring' , '--primary-keyring' , '--secret-keyring' , '--trustdb-name' , ] ) # : These expect a filename ( or the contents of a file as a string ) or None # : ( meaning that they read from stdin ) file_or_none_options = frozenset ( [ '--decrypt' , '--decrypt-files' , '--encrypt' , '--encrypt-files' , '--import' , '--verify' , '--verify-files' , '--output' , ] ) # : These options expect a string . see : func : ` _ check _ preferences ` . pref_options = frozenset ( [ '--digest-algo' , '--cipher-algo' , '--compress-algo' , '--compression-algo' , '--cert-digest-algo' , '--personal-digest-prefs' , '--personal-digest-preferences' , '--personal-cipher-prefs' , '--personal-cipher-preferences' , '--personal-compress-prefs' , '--personal-compress-preferences' , '--pinentry-mode' , '--print-md' , '--trust-model' , ] ) # : These options expect no arguments none_options = frozenset ( [ '--allow-loopback-pinentry' , '--always-trust' , '--armor' , '--armour' , '--batch' , '--check-sigs' , '--check-trustdb' , '--clearsign' , '--debug-all' , '--default-recipient-self' , '--detach-sign' , '--export' , '--export-ownertrust' , '--export-secret-keys' , '--export-secret-subkeys' , '--fingerprint' , '--fixed-list-mode' , '--gen-key' , '--import-ownertrust' , '--list-config' , '--list-key' , '--list-keys' , '--list-packets' , '--list-public-keys' , '--list-secret-keys' , '--list-sigs' , '--lock-multiple' , '--lock-never' , '--lock-once' , '--no-default-keyring' , '--no-default-recipient' , '--no-emit-version' , '--no-options' , '--no-tty' , '--no-use-agent' , '--no-verbose' , '--print-mds' , '--quiet' , '--sign' , '--symmetric' , '--throw-keyids' , '--use-agent' , '--verbose' , '--version' , '--with-colons' , '--yes' , ] ) # : These options expect either None or a hex string hex_or_none_options = hex_options . intersection ( none_options ) allowed = hex_options . union ( unchecked_options , other_options , dir_options , keyring_options , file_or_none_options , pref_options , none_options ) if group and group in locals ( ) . keys ( ) : return locals ( ) [ group ]
def horizontal_positions ( docgraph , sentence_root = None ) : """return map : node ID - > first token index ( int ) it covers"""
# calculate positions for the whole graph root_cond = ( sentence_root is None ) or ( sentence_root == docgraph . root ) if root_cond or ( 'tokens' not in docgraph . node [ sentence_root ] ) : sentence_root = docgraph . root token_nodes = docgraph . tokens path = { } else : # calculate positions only for the given sentence subgraph token_nodes = docgraph . node [ sentence_root ] [ 'tokens' ] path = { sentence_root : 0 } for i , token_node in enumerate ( token_nodes ) : start_node = token_node while get_parents ( docgraph , start_node , strict = True ) : if start_node not in path : path [ start_node ] = i start_node = get_parents ( docgraph , start_node , strict = True ) [ 0 ] return path
def check_colormap ( cmap ) : """Check if cmap is one of the colorbrewer maps"""
names = set ( [ 'BrBG' , 'PiYG' , 'PRGn' , 'PuOr' , 'RdBu' , 'RdGy' , 'RdYlBu' , 'RdYlGn' , 'Spectral' , 'Blues' , 'BuGn' , 'BuPu' , 'GnBu' , 'Greens' , 'Greys' , 'Oranges' , 'OrRd' , 'PuBu' , 'PuBuGn' , 'PuRd' , 'Purples' , 'RdPu' , 'Reds' , 'YlGn' , 'YlGnBu' , 'YlOrBr' , 'YlOrRd' , 'Accent' , 'Dark2' , 'Paired' , 'Pastel1' , 'Pastel2' , 'Set1' , 'Set2' , 'Set3' , 'Lightning' ] ) if cmap not in names : raise Exception ( "Invalid cmap '%s', must be one of %s" % ( cmap , names ) ) else : return cmap
def get_userid_by_email ( self , email ) : '''get userid by email'''
response , status_code = self . __pod__ . Users . get_v2_user ( sessionToken = self . __session__ , email = email ) . result ( ) self . logger . debug ( '%s: %s' % ( status_code , response ) ) return status_code , response
def strided_sample ( items , num , offset = 0 ) : r"""Example : > > > # DISABLE _ DOCTEST > > > from utool . util _ list import * # NOQA > > > # build test data > > > items = [ 1 , 2 , 3 , 4 , 5] > > > num = 3 > > > offset = 0 > > > # execute function > > > sample _ items = strided _ sample ( items , num , offset ) > > > # verify results > > > result = str ( sample _ items ) > > > print ( result )"""
import math stride = max ( int ( math . ceil ( len ( items ) / num ) ) , 1 ) sample_items = items [ offset : : stride ] return sample_items
def from_grib_date_time ( message , date_key = 'dataDate' , time_key = 'dataTime' , epoch = DEFAULT_EPOCH ) : # type : ( T . Mapping , str , str , datetime . datetime ) - > int """Return the number of seconds since the ` ` epoch ` ` from the values of the ` ` message ` ` keys , using datetime . total _ seconds ( ) . : param message : the target GRIB message : param date _ key : the date key , defaults to " dataDate " : param time _ key : the time key , defaults to " dataTime " : param epoch : the reference datetime"""
date = message [ date_key ] time = message [ time_key ] hour = time // 100 minute = time % 100 year = date // 10000 month = date // 100 % 100 day = date % 100 data_datetime = datetime . datetime ( year , month , day , hour , minute ) # Python 2 compatible timestamp implementation without timezone hurdle # see : https : / / docs . python . org / 3 / library / datetime . html # datetime . datetime . timestamp return int ( ( data_datetime - epoch ) . total_seconds ( ) )
def _job_handler ( self ) -> bool : """Process the work items ."""
while True : try : task = self . _unfullfilled . get_nowait ( ) except queue . Empty : break else : self . _log . debug ( "Job: %s" % str ( task ) ) engine = self . _dyn_loader ( task [ 'engine' ] , task ) task [ 'start_time' ] = now_time ( ) results = engine . search ( ) task [ 'end_time' ] = now_time ( ) duration : str = str ( ( task [ 'end_time' ] - task [ 'start_time' ] ) . seconds ) task [ 'duration' ] = duration task . update ( { 'results' : results } ) self . _fulfilled . put ( task ) return True
def start ( self ) : '''Starts measuring time , and prints the bar at 0 % . It returns self so you can use it like this : > > > pbar = ProgressBar ( ) . start ( ) > > > for i in range ( 100 ) : . . . # do something . . . pbar . update ( i + 1) > > > pbar . finish ( )'''
if self . redirect_stderr : self . _stderr = sys . stderr sys . stderr = StringIO ( ) if self . redirect_stdout : self . _stdout = sys . stdout sys . stdout = StringIO ( ) if self . maxval is None : self . maxval = self . _DEFAULT_MAXVAL self . num_intervals = max ( 100 , self . term_width ) self . next_update = 0 if self . maxval is not UnknownLength : if self . maxval < 0 : raise ValueError ( 'Value out of range' ) self . update_interval = self . maxval / self . num_intervals self . start_time = self . last_update_time = time . time ( ) self . update ( 0 ) return self
async def login ( cls , credentials : AuthenticationCredentials , config : Config ) -> 'Session' : """Checks the given credentials for a valid login and returns a new session . The mailbox data is shared between concurrent and future sessions , but only for the lifetime of the process ."""
user = credentials . authcid password = cls . _get_password ( config , user ) if user != credentials . identity : raise InvalidAuth ( ) elif not credentials . check_secret ( password ) : raise InvalidAuth ( ) mailbox_set , filter_set = config . set_cache . get ( user , ( None , None ) ) if not mailbox_set or not filter_set : mailbox_set = MailboxSet ( ) filter_set = FilterSet ( ) if config . demo_data : await cls . _load_demo ( mailbox_set , filter_set ) config . set_cache [ user ] = ( mailbox_set , filter_set ) return cls ( credentials . identity , config , mailbox_set , filter_set )
def serial_ppmap ( func , fixed_arg , var_arg_iter ) : """A serial implementation of the " partially - pickling map " function returned by the : meth : ` ParallelHelper . get _ ppmap ` interface . Its arguments are : * func * A callable taking three arguments and returning a Pickle - able value . * fixed _ arg * Any value , even one that is not pickle - able . * var _ arg _ iter * An iterable that generates Pickle - able values . The functionality is : : def serial _ ppmap ( func , fixed _ arg , var _ arg _ iter ) : return [ func ( i , fixed _ arg , x ) for i , x in enumerate ( var _ arg _ iter ) ] Therefore the arguments to your ` ` func ` ` function , which actually does the interesting computations , are : * index * The 0 - based index number of the item being processed ; often this can be ignored . * fixed _ arg * The same * fixed _ arg * that was passed to ` ` ppmap ` ` . * var _ arg * The * index * ' th item in the * var _ arg _ iter * iterable passed to ` ` ppmap ` ` ."""
return [ func ( i , fixed_arg , x ) for i , x in enumerate ( var_arg_iter ) ]
def fromjson ( source , * args , ** kwargs ) : """Extract data from a JSON file . The file must contain a JSON array as the top level object , and each member of the array will be treated as a row of data . E . g . : : > > > import petl as etl > > > data = ' ' ' . . . [ { " foo " : " a " , " bar " : 1 } , . . . { " foo " : " b " , " bar " : 2 } , . . . { " foo " : " c " , " bar " : 2 } ] > > > with open ( ' example . json ' , ' w ' ) as f : . . . f . write ( data ) 74 > > > table1 = etl . fromjson ( ' example . json ' , header = [ ' foo ' , ' bar ' ] ) > > > table1 | foo | bar | | ' a ' | 1 | | ' b ' | 2 | | ' c ' | 2 | If your JSON file does not fit this structure , you will need to parse it via : func : ` json . load ` and select the array to treat as the data , see also : func : ` petl . io . json . fromdicts ` . . . versionchanged : : 1.1.0 If no ` header ` is specified , fields will be discovered by sampling keys from the first ` sample ` objects in ` source ` . The header will be constructed from keys in the order discovered . Note that this ordering may not be stable , and therefore it may be advisable to specify an explicit ` header ` or to use another function like : func : ` petl . transform . headers . sortheader ` on the resulting table to guarantee stability ."""
source = read_source_from_arg ( source ) return JsonView ( source , * args , ** kwargs )
def get_file_info ( hash , context = None ) : """Returns information about the file , identified by ` ` hash ` ` . If the ` context ` ( an ident - hash ) is supplied , the information returned will be specific to that context ."""
if context is None : stmt = _get_sql ( 'get-file-info.sql' ) args = dict ( hash = hash ) else : stmt = _get_sql ( 'get-file-info-in-context.sql' ) id , version = get_id_n_version ( context ) args = dict ( hash = hash , id = id , version = version ) with db_connect ( ) as db_conn : with db_conn . cursor ( ) as cursor : cursor . execute ( stmt , args ) try : filename , media_type = cursor . fetchone ( ) except TypeError : raise FileNotFound ( hash ) return filename , media_type
def integer_binning ( data = None , ** kwargs ) -> StaticBinning : """Construct fixed - width binning schema with bins centered around integers . Parameters range : Optional [ Tuple [ int ] ] min ( included ) and max integer ( excluded ) bin bin _ width : Optional [ int ] group " bin _ width " integers into one bin ( not recommended )"""
if "range" in kwargs : kwargs [ "range" ] = tuple ( r - 0.5 for r in kwargs [ "range" ] ) return fixed_width_binning ( data = data , bin_width = kwargs . pop ( "bin_width" , 1 ) , align = True , bin_shift = 0.5 , ** kwargs )
def change_nsvalue ( self , namespace , value ) : """Deprecated"""
self . namespace = namespace self . value = value
def delete ( self ) : """Override delete in parent class , this will also delete the routing configuration referencing this interface . engine = Engine ( ' vm ' ) interface = engine . interface . get ( 2) interface . delete ( )"""
super ( Interface , self ) . delete ( ) for route in self . _engine . routing : if route . to_delete : route . delete ( ) self . _engine . _del_cache ( )
def create_request ( self , reset_gpd_iterator = False ) : """Set a list of download requests Set a list of DownloadRequests for all images that are under the given property of the Geopedia ' s Vector layer . : param reset _ gpd _ iterator : When re - running the method this flag is used to reset / keep existing ` ` gpd _ iterator ` ` ( i . e . instance of ` ` GeopediaFeatureIterator ` ` class ) . If the iterator is not reset you don ' t have to repeat a service call but tiles and dates will stay the same . : type reset _ gpd _ iterator : bool"""
if reset_gpd_iterator : self . gpd_iterator = None gpd_service = GeopediaImageService ( ) self . download_list = gpd_service . get_request ( self ) self . gpd_iterator = gpd_service . get_gpd_iterator ( )
def _GetStat ( self ) : """Retrieves the stat object . Returns : VFSStat : stat object ."""
stat_object = super ( TSKPartitionFileEntry , self ) . _GetStat ( ) bytes_per_sector = tsk_partition . TSKVolumeGetBytesPerSector ( self . _tsk_volume ) # File data stat information . if self . _tsk_vs_part is not None : number_of_sectors = tsk_partition . TSKVsPartGetNumberOfSectors ( self . _tsk_vs_part ) if number_of_sectors : stat_object . size = number_of_sectors * bytes_per_sector # Date and time stat information . # Ownership and permissions stat information . # File entry type stat information . # The root file entry is virtual and should have type directory . if not self . _is_virtual : stat_object . is_allocated = tsk_partition . TSKVsPartIsAllocated ( self . _tsk_vs_part ) return stat_object
def check ( self , diff ) : """Check that the new file is within the contrib subdirectory"""
path = diff . b_path contrib_path = self . project . contrib_module_path assert pathlib . Path ( contrib_path ) in pathlib . Path ( path ) . parents
def dumps ( obj , preserve = False ) : """Stringifies a dict as toml : param obj : the object to be dumped into toml : param preserve : optional flag to preserve the inline table in result"""
f = StringIO ( ) dump ( obj , f , preserve ) return f . getvalue ( )
def _check_scalar ( value ) : '''If value is a 0 - dimensional array , returns the contents of value . Otherwise , returns value .'''
if isinstance ( value , np . ndarray ) : if value . ndim == 0 : # We have a 0 - dimensional array return value [ None ] [ 0 ] return value
def handle_matches ( self , match ) : """Returns a response statement from a matched input statement . : param match : It is a valid matched pattern from the input statement : type : ` _ sre . SRE _ Match `"""
response = Statement ( text = '' ) from_parsed = match . group ( "from" ) target_parsed = match . group ( "target" ) n_statement = match . group ( "number" ) if n_statement == 'a' or n_statement == 'an' : n_statement = '1.0' n = mathparse . parse ( n_statement , self . language . ISO_639 . upper ( ) ) ureg = UnitRegistry ( ) from_parsed , target_parsed = self . get_valid_units ( ureg , from_parsed , target_parsed ) if from_parsed is None or target_parsed is None : response . confidence = 0.0 else : from_value = ureg . Quantity ( float ( n ) , from_parsed ) target_value = from_value . to ( target_parsed ) response . confidence = 1.0 response . text = str ( target_value . magnitude ) return response
def groups_archive ( self , room_id , ** kwargs ) : """Archives a private group , only if you ’ re part of the group ."""
return self . __call_api_post ( 'groups.archive' , roomId = room_id , kwargs = kwargs )
def _read_response ( self , response ) : """JSON Documentation : https : / / www . jfrog . com / confluence / display / RTF / Security + Configuration + JSON"""
# self . password = ' ' # never returned self . name = response [ 'name' ] self . email = response [ 'email' ] self . admin = response [ 'admin' ] self . profileUpdatable = response [ 'profileUpdatable' ] self . internalPasswordDisabled = response [ 'internalPasswordDisabled' ] self . _groups = response [ 'groups' ] if 'groups' in response else [ ] self . _lastLoggedIn = response [ 'lastLoggedIn' ] if 'lastLoggedIn' in response else [ ] self . _realm = response [ 'realm' ] if 'realm' in response else [ ]
def getvalue ( x ) : """Return the single value of x or raise TypError if more than one value ."""
if isrepeating ( x ) : raise TypeError ( "Ambiguous call to getvalue for %r which has more than one value." % x ) for value in getvalues ( x ) : return value
def getValidReff ( self , urn , inventory = None , level = None ) : """Retrieve valid urn - references for a text : param urn : URN identifying the text : type urn : text : param inventory : Name of the inventory : type inventory : text : param level : Depth of references expected : type level : int : return : XML Response from the API as string : rtype : str"""
return self . call ( { "inv" : inventory , "urn" : urn , "level" : level , "request" : "GetValidReff" } )
def best_guess ( self , line , args = None , kwargs = None , multiple = True , ** options ) : """Given multiple = False , this will simply return the first matching regexes first handler ; otherwise , this will return a list of all the matching handler function ( s ) . If rank _ best was set on the parser , this will attempt to " rank " the regexes , and will return the highest ranked handler ( s ) . Returns ( matches , args , kwargs ) - it ' s up to the caller to apply these and test whether f . options [ " inject " ] / f . no _ args is set , etc ."""
cmd , _args , _kwargs = self . parse ( line ) if args is None : args = _args if kwargs is None : kwargs = _kwargs matches = [ ] for regex , funcs in self . handlers . items ( ) : if not regex . findall ( cmd ) : continue if not multiple : return ( ( funcs [ 0 ] , args , kwargs ) if len ( funcs ) > 0 else ( None , args , kwargs ) ) matches . append ( funcs ) if self . rank_best : # TODO : rank regexes . pass return ( matches , args , kwargs )
def scramble ( expnums , ccd , version = 'p' , dry_run = False ) : """run the plant script on this combination of exposures"""
mjds = [ ] fobjs = [ ] for expnum in expnums : filename = storage . get_image ( expnum , ccd = ccd , version = version ) fobjs . append ( fits . open ( filename ) ) # Pull out values to replace in headers . . must pull them # as otherwise we get pointers . . . mjds . append ( fobjs [ - 1 ] [ 0 ] . header [ 'MJD-OBS' ] ) order = [ 0 , 2 , 1 ] for idx in range ( len ( fobjs ) ) : logging . info ( "Flipping %d to %d" % ( fobjs [ idx ] [ 0 ] . header [ 'EXPNUM' ] , expnums [ order [ idx ] ] ) ) fobjs [ idx ] [ 0 ] . header [ 'EXPNUM' ] = expnums [ order [ idx ] ] fobjs [ idx ] [ 0 ] . header [ 'MJD-OBS' ] = mjds [ order [ idx ] ] uri = storage . get_uri ( expnums [ order [ idx ] ] , ccd = ccd , version = 's' , ext = 'fits' ) fname = os . path . basename ( uri ) if os . access ( fname , os . F_OK ) : os . unlink ( fname ) fobjs [ idx ] . writeto ( fname ) if dry_run : continue storage . copy ( fname , uri ) return
def combine_threads ( threads , assert_birth_point = False ) : """Combine list of threads into a single ns run . This is different to combining runs as repeated threads are allowed , and as some threads can start from log - likelihood contours on which no dead point in the run is present . Note that if all the thread labels are not unique and in ascending order , the output will fail check _ ns _ run . However provided the thread labels are not used it will work ok for calculations based on nlive , logl and theta . Parameters threads : list of dicts List of nested sampling run dicts , each representing a single thread . assert _ birth _ point : bool , optional Whether or not to assert there is exactly one point present in the run with the log - likelihood at which each point was born . This is not true for bootstrap resamples of runs , where birth points may be repeated or not present at all . Returns run : dict Nested sampling run dict ( see data _ processing module docstring for more details ) ."""
thread_min_max = np . vstack ( [ td [ 'thread_min_max' ] for td in threads ] ) assert len ( threads ) == thread_min_max . shape [ 0 ] # construct samples array from the threads , including an updated nlive samples_temp = np . vstack ( [ array_given_run ( thread ) for thread in threads ] ) samples_temp = samples_temp [ np . argsort ( samples_temp [ : , 0 ] ) ] # update the changes in live points column for threads which start part way # through the run . These are only present in dynamic nested sampling . logl_starts = thread_min_max [ : , 0 ] state = np . random . get_state ( ) # save random state np . random . seed ( 0 ) # seed to make sure any random assignment is repoducable for logl_start in logl_starts [ logl_starts != - np . inf ] : ind = np . where ( samples_temp [ : , 0 ] == logl_start ) [ 0 ] if assert_birth_point : assert ind . shape == ( 1 , ) , 'No unique birth point! ' + str ( ind . shape ) if ind . shape == ( 1 , ) : # If the point at which this thread started is present exactly # once in this bootstrap replication : samples_temp [ ind [ 0 ] , 2 ] += 1 elif ind . shape == ( 0 , ) : # If the point with the likelihood at which the thread started # is not present in this particular bootstrap replication , # approximate it with the point with the nearest likelihood . ind_closest = np . argmin ( np . abs ( samples_temp [ : , 0 ] - logl_start ) ) samples_temp [ ind_closest , 2 ] += 1 else : # If the point at which this thread started is present multiple # times in this bootstrap replication , select one at random to # increment nlive on . This avoids any systematic bias from e . g . # always choosing the first point . samples_temp [ np . random . choice ( ind ) , 2 ] += 1 np . random . set_state ( state ) # make run ns_run = dict_given_run_array ( samples_temp , thread_min_max ) try : check_ns_run_threads ( ns_run ) except AssertionError : # If the threads are not valid ( e . g . for bootstrap resamples ) then # set them to None so they can ' t be accidentally used ns_run [ 'thread_labels' ] = None ns_run [ 'thread_min_max' ] = None return ns_run
def get_switch_actors ( self ) : """Get information about all actors This needs 1 + ( 5n ) requests where n = number of actors registered Deprecated , use get _ actors instead . Returns a dict : [ ain ] = { ' name ' : Name of actor , ' state ' : Powerstate ( boolean ) ' present ' : Connected to server ? ( boolean ) ' power ' : Current power consumption in mW ' energy ' : Used energy in Wh since last energy reset ' temperature ' : Current environment temperature in celsius"""
actors = { } for ain in self . homeautoswitch ( "getswitchlist" ) . split ( ',' ) : actors [ ain ] = { 'name' : self . homeautoswitch ( "getswitchname" , ain ) , 'state' : bool ( self . homeautoswitch ( "getswitchstate" , ain ) ) , 'present' : bool ( self . homeautoswitch ( "getswitchpresent" , ain ) ) , 'power' : self . homeautoswitch ( "getswitchpower" , ain ) , 'energy' : self . homeautoswitch ( "getswitchenergy" , ain ) , 'temperature' : self . homeautoswitch ( "getswitchtemperature" , ain ) , } return actors
def divide_list_into_parts ( input_list , length_of_first_part ) : """Split a given list into two parts based on the provided length for the first part . Examples : divide _ list _ into _ parts ( [ 1 , 1 , 2 , 3 , 4 , 4 , 5 , 1 ] , 3 ) - > ( [ 1 , 1 , 2 ] , [ 3 , 4 , 4 , 5 , 1 ] ) divide _ list _ into _ parts ( [ ' a ' , ' b ' , ' c ' , ' d ' ] , 2 ) - > ( [ ' a ' , ' b ' ] , [ ' c ' , ' d ' ] ) divide _ list _ into _ parts ( [ ' p ' , ' y ' , ' t ' , ' h ' , ' o ' , ' n ' ] , 4 ) - > ( [ ' p ' , ' y ' , ' t ' , ' h ' ] , [ ' o ' , ' n ' ] ) Args : input _ list : The list to be split . length _ of _ first _ part : The length of the first part of the split list . Returns : A tuple of two lists , containing the split parts of the original list ."""
return ( input_list [ : length_of_first_part ] , input_list [ length_of_first_part : ] )
def resolvePrefix ( self , prefix ) : """Resolve the specified prefix to a known namespace . @ param prefix : A declared prefix @ type prefix : basestring @ return : The namespace that has been mapped to I { prefix } @ rtype : ( I { prefix } , I { name } )"""
ns = Namespace . default if self . parent is not None : ns = self . parent . resolvePrefix ( prefix ) return ns
def get_operation_mtf_dimension_names ( self , operation_name ) : """The Mesh TensorFlow dimensions associated with an operation . Args : operation _ name : a string , name of an operation in the graph . Returns : a set ( string ) , the names of Mesh TensorFlow dimensions ."""
mtf_dimension_names = set ( ) for tensor_name in self . get_operation_input_names ( operation_name ) : mtf_dimension_names . update ( self . get_tensor_mtf_dimension_names ( tensor_name ) ) for tensor_name in self . get_operation_output_names ( operation_name ) : mtf_dimension_names . update ( self . get_tensor_mtf_dimension_names ( tensor_name ) ) return mtf_dimension_names
def _set_range ( self , init ) : """Reset the camera view using the known limits ."""
if init and ( self . _scale_factor is not None ) : return # We don ' t have to set our scale factor # Get window size ( and store factor now to sync with resizing ) w , h = self . _viewbox . size w , h = float ( w ) , float ( h ) # Get range and translation for x and y x1 , y1 , z1 = self . _xlim [ 0 ] , self . _ylim [ 0 ] , self . _zlim [ 0 ] x2 , y2 , z2 = self . _xlim [ 1 ] , self . _ylim [ 1 ] , self . _zlim [ 1 ] rx , ry , rz = ( x2 - x1 ) , ( y2 - y1 ) , ( z2 - z1 ) # Correct ranges for window size . Note that the window width # influences the x and y data range , while the height influences # the z data range . if w / h > 1 : rx /= w / h ry /= w / h else : rz /= h / w # Convert to screen coordinates . In screen x , only x and y have effect . # In screen y , all three dimensions have effect . The idea of the lines # below is to calculate the range on screen when that will fit the # data under any rotation . rxs = ( rx ** 2 + ry ** 2 ) ** 0.5 rys = ( rx ** 2 + ry ** 2 + rz ** 2 ) ** 0.5 self . scale_factor = max ( rxs , rys ) * 1.04
def as_shared_dtype ( scalars_or_arrays ) : """Cast a arrays to a shared dtype using xarray ' s type promotion rules ."""
arrays = [ asarray ( x ) for x in scalars_or_arrays ] # Pass arrays directly instead of dtypes to result _ type so scalars # get handled properly . # Note that result _ type ( ) safely gets the dtype from dask arrays without # evaluating them . out_type = dtypes . result_type ( * arrays ) return [ x . astype ( out_type , copy = False ) for x in arrays ]
def guard ( pidfile , guardfile = None ) : """Raise an EnvironmentError when the " guardfile " doesn ' t exist , or the process with the ID found in " pidfile " is still active ."""
# Check guard if guardfile and not os . path . exists ( guardfile ) : raise EnvironmentError ( "Guard file '%s' not found, won't start!" % guardfile ) if os . path . exists ( pidfile ) : running , pid = check_process ( pidfile ) if running : raise EnvironmentError ( "Daemon process #%d still running, won't start!" % pid ) else : logging . getLogger ( "daemonize" ) . info ( "Process #%d disappeared, continuing..." % pid ) # Keep race condition window small , by immediately writing launcher process ID _write_pidfile ( pidfile )
def remove_user ( self , username ) : """Remove user from the SQLite database . * ` username ` [ string ] Username of user to remove ."""
sql = '''DELETE FROM user WHERE username = ?''' try : self . _db_curs . execute ( sql , ( username , ) ) self . _db_conn . commit ( ) except ( sqlite3 . OperationalError , sqlite3 . IntegrityError ) as error : raise AuthError ( error ) return self . _db_curs . rowcount