signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def parse_localnamespacepath ( parser , event , node ) : # pylint : disable = unused - argument """Parse LOCALNAMESPACEPATH for Namespace . Return assembled namespace < ! ELEMENT LOCALNAMESPACEPATH ( NAMESPACE + ) >"""
( next_event , next_node ) = six . next ( parser ) namespaces = [ ] if not _is_start ( next_event , next_node , 'NAMESPACE' ) : print ( next_event , next_node ) raise ParseError ( 'Expecting NAMESPACE' ) namespaces . append ( parse_namespace ( parser , next_event , next_node ) ) while 1 : ( next_event , next_node ) = six . next ( parser ) if _is_end ( next_event , next_node , 'LOCALNAMESPACEPATH' ) : break if _is_start ( next_event , next_node , 'NAMESPACE' ) : namespaces . append ( parse_namespace ( parser , next_event , next_node ) ) else : raise ParseError ( 'Expecting NAMESPACE' ) return '/' . join ( namespaces )
def _update_pop ( self , pop_size ) : """Assigns fitnesses to particles that are within bounds ."""
valid_particles = [ ] invalid_particles = [ ] for part in self . population : if any ( x > 1 or x < - 1 for x in part ) : invalid_particles . append ( part ) else : valid_particles . append ( part ) self . _model_count += len ( valid_particles ) for part in valid_particles : self . update_particle ( part ) self . assign_fitnesses ( valid_particles ) for part in valid_particles : if part . fitness > part . best . fitness : part . best = creator . Particle ( part ) part . best . fitness = part . fitness for part in invalid_particles : self . update_particle ( part ) self . population [ : ] = valid_particles + invalid_particles self . population . sort ( key = lambda x : x . ident ) # shouldn ' t need to sort ? return
def engage ( proto = 'udp' ) : '''Fire thrusters .'''
from ironman . server import ServerFactory from twisted . internet import reactor from twisted . internet . defer import Deferred getattr ( reactor , 'listen{0:s}' . format ( proto . upper ( ) ) ) ( 8888 , ServerFactory ( proto , Deferred ) ) reactor . run ( )
def get_station_temperature_datetime ( self , station_id ) : """Return temperature measurement datetime for a given station"""
request = requests . get ( "{}/station/{}/parameters/temperature/datetime" . format ( self . base_url , station_id ) ) if request . status_code != 200 : return None return datetime . strptime ( request . json ( ) , "%Y-%m-%dT%H:%M:%S" )
def duration ( self ) : """Calculates the breeding cage ' s duration . This is relative to the current date ( if alive ) or the date of inactivation ( if not ) . The duration is formatted in days ."""
if self . End : age = self . End - self . Start else : age = datetime . date . today ( ) - self . Start return age . days
def _overapprox ( self ) : """The method extracts a model corresponding to an over - approximation of an MCS , i . e . it is the model of the hard part of the formula ( the corresponding oracle call is made in : func : ` compute ` ) . Here , the set of selectors is divided into two parts : ` ` self . ss _ assumps ` ` , which is an under - approximation of an MSS ( maximal satisfiable subset ) and ` ` self . setd ` ` , which is an over - approximation of the target MCS . Both will be further refined in : func : ` _ compute ` ."""
model = self . oracle . get_model ( ) for sel in self . sels : if len ( model ) < sel or model [ sel - 1 ] > 0 : # soft clauses contain positive literals # so if var is true then the clause is satisfied self . ss_assumps . append ( sel ) else : self . setd . append ( sel )
def connect ( sock , addr ) : """Connect to some addr ."""
try : sock . connect ( addr ) except ssl . SSLError as e : return ( ssl . SSLError , e . strerror if e . strerror else e . message ) except socket . herror as ( _ , msg ) : return ( socket . herror , msg ) except socket . gaierror as ( _ , msg ) : return ( socket . gaierror , msg ) except socket . timeout : return ( socket . timeout , "timeout" ) except socket . error as e : return ( socket . error , e . strerror if e . strerror else e . message ) return None
def checkpat ( self , pattern ) : """check for errors in a regex pattern"""
if pattern is None : return try : re . match ( pattern , "" ) except re . error : print3 ( "\nBad user-defined singular pattern:\n\t%s\n" % pattern ) raise BadUserDefinedPatternError
def _pre_action ( self , action ) : """Overrides the superclass method to actuate the robot with the passed joint velocities and gripper control . Args : action ( numpy array ) : The control to apply to the robot . The first @ self . mujoco _ robot . dof dimensions should be the desired normalized joint velocities and if the robot has a gripper , the next @ self . gripper . dof dimensions should be actuation controls for the gripper ."""
# clip actions into valid range assert len ( action ) == self . dof , "environment got invalid action dimension" low , high = self . action_spec action = np . clip ( action , low , high ) if self . has_gripper : arm_action = action [ : self . mujoco_robot . dof ] gripper_action_in = action [ self . mujoco_robot . dof : self . mujoco_robot . dof + self . gripper . dof ] gripper_action_actual = self . gripper . format_action ( gripper_action_in ) action = np . concatenate ( [ arm_action , gripper_action_actual ] ) # rescale normalized action to control ranges ctrl_range = self . sim . model . actuator_ctrlrange bias = 0.5 * ( ctrl_range [ : , 1 ] + ctrl_range [ : , 0 ] ) weight = 0.5 * ( ctrl_range [ : , 1 ] - ctrl_range [ : , 0 ] ) applied_action = bias + weight * action self . sim . data . ctrl [ : ] = applied_action # gravity compensation self . sim . data . qfrc_applied [ self . _ref_joint_vel_indexes ] = self . sim . data . qfrc_bias [ self . _ref_joint_vel_indexes ] if self . use_indicator_object : self . sim . data . qfrc_applied [ self . _ref_indicator_vel_low : self . _ref_indicator_vel_high ] = self . sim . data . qfrc_bias [ self . _ref_indicator_vel_low : self . _ref_indicator_vel_high ]
def mostly ( fn ) : """95 % chance of happening"""
def wrapped ( * args , ** kwargs ) : if in_percentage ( 95 ) : fn ( * args , ** kwargs ) return wrapped
def _validate_value ( self , value , field_spec , path , errors ) : """Validates that the given field value is valid given the associated field spec and path . Any validation failures are added to the given errors collection ."""
# Check if the value is None and add an error if the field is not nullable . # Note that for backward compatibility reasons , the default value of ' nullable ' # is the inverse of ' required ' ( which use to mean both that the key be present # and not set to None ) . if value is None : if not field_spec . get ( 'nullable' , not field_spec . get ( 'required' , False ) ) : errors [ path ] = "{} is not nullable." . format ( path ) return # All fields should have a type field_type = field_spec [ 'type' ] if isinstance ( field_type , types . FunctionType ) : try : field_type = field_type ( value ) except Exception as e : raise SchemaFormatException ( "Dynamic schema function raised exception: {}" . format ( str ( e ) ) , path ) if not isinstance ( field_type , ( type , Schema , Array ) ) : raise SchemaFormatException ( "Dynamic schema function did not return a type at path {}" , path ) # If our field is an embedded document , recurse into it if isinstance ( field_type , Schema ) : if isinstance ( value , dict ) : field_type . _validate_instance ( value , errors , path ) else : errors [ path ] = "{} should be an embedded document" . format ( path ) return elif isinstance ( field_type , Array ) : if isinstance ( value , list ) : is_dynamic = isinstance ( field_type . contained_type , types . FunctionType ) for i , item in enumerate ( value ) : contained_type = field_type . contained_type if is_dynamic : contained_type = contained_type ( item ) instance_path = self . _append_path ( path , i ) if isinstance ( contained_type , Schema ) : contained_type . _validate_instance ( item , errors , instance_path ) elif not isinstance ( item , contained_type ) : errors [ instance_path ] = "Array item at {} is of incorrect type" . format ( instance_path ) continue else : errors [ path ] = "{} should be an embedded array" . format ( path ) return elif not isinstance ( value , field_type ) : errors [ path ] = "Field should be of type {}" . format ( field_type ) return validations = field_spec . get ( 'validates' , None ) if validations is None : return self . _apply_validations ( errors , path , validations , value )
def _connectionEstablished ( self , transport ) : '''Store a reference to our transport and write an open frame .'''
self . transport = transport self . transport . writeOpen ( ) self . heartbeater . schedule ( )
def bitrate ( self ) : """The number of bits per seconds used in the audio coding ( an int ) . If this is provided explicitly by the compressed file format , this is a precise reflection of the encoding . Otherwise , it is estimated from the on - disk file size . In this case , some imprecision is possible because the file header is incorporated in the file size ."""
if hasattr ( self . mgfile . info , 'bitrate' ) and self . mgfile . info . bitrate : # Many formats provide it explicitly . return self . mgfile . info . bitrate else : # Otherwise , we calculate bitrate from the file size . ( This # is the case for all of the lossless formats . ) if not self . length : # Avoid division by zero if length is not available . return 0 size = os . path . getsize ( self . path ) return int ( size * 8 / self . length )
def genms ( self , scans = [ ] ) : """Generate an MS that contains all calibrator scans with 1 s integration time ."""
if len ( scans ) : scanstr = string . join ( [ str ( ss ) for ss in sorted ( scans ) ] , ',' ) else : scanstr = self . allstr print 'Splitting out all cal scans (%s) with 1s int time' % scanstr newname = ps . sdm2ms ( self . sdmfile , self . sdmfile . rstrip ( '/' ) + '.ms' , scanstr , inttime = '1' ) # integrate down to 1s during split return newname
def intersectionlist_to_matrix ( ilist , xterms , yterms ) : """WILL BE DEPRECATED Replace with method to return pandas dataframe"""
z = [ [ 0 ] * len ( xterms ) for i1 in range ( len ( yterms ) ) ] xmap = { } xi = 0 for x in xterms : xmap [ x ] = xi xi = xi + 1 ymap = { } yi = 0 for y in yterms : ymap [ y ] = yi yi = yi + 1 for i in ilist : z [ ymap [ i [ 'y' ] ] ] [ xmap [ i [ 'x' ] ] ] = i [ 'j' ] logging . debug ( "Z={}" . format ( z ) ) return ( z , xterms , yterms )
def create_user_task ( sender = None , body = None , ** kwargs ) : # pylint : disable = unused - argument """Create a : py : class : ` UserTaskStatus ` record for each : py : class : ` UserTaskMixin ` . Also creates a : py : class : ` UserTaskStatus ` for each chain , chord , or group containing the new : py : class : ` UserTaskMixin ` ."""
try : task_class = import_string ( sender ) except ImportError : return if issubclass ( task_class . __class__ , UserTaskMixin ) : arguments_dict = task_class . arguments_as_dict ( * body [ 'args' ] , ** body [ 'kwargs' ] ) user_id = _get_user_id ( arguments_dict ) task_id = body [ 'id' ] if body . get ( 'callbacks' , [ ] ) : return _create_chain_entry ( user_id , task_id , task_class , body [ 'args' ] , body [ 'kwargs' ] , body [ 'callbacks' ] ) if body . get ( 'chord' , None ) : return _create_chord_entry ( task_id , task_class , body , user_id ) parent = _get_or_create_group_parent ( body , user_id ) name = task_class . generate_name ( arguments_dict ) total_steps = task_class . calculate_total_steps ( arguments_dict ) UserTaskStatus . objects . get_or_create ( task_id = task_id , defaults = { 'user_id' : user_id , 'parent' : parent , 'name' : name , 'task_class' : sender , 'total_steps' : total_steps } ) if parent : parent . increment_total_steps ( total_steps )
def _get_msiexec ( use_msiexec ) : '''Return if msiexec . exe will be used and the command to invoke it .'''
if use_msiexec is False : return False , '' if isinstance ( use_msiexec , six . string_types ) : if os . path . isfile ( use_msiexec ) : return True , use_msiexec else : log . warning ( "msiexec path '%s' not found. Using system registered " "msiexec instead" , use_msiexec ) use_msiexec = True if use_msiexec is True : return True , 'msiexec'
def url_join ( base , url , allow_fragments = True ) : """Join a base URL and a possibly relative URL to form an absolute interpretation of the latter . : param base : the base URL for the join operation . : param url : the URL to join . : param allow _ fragments : indicates whether fragments should be allowed ."""
if isinstance ( base , tuple ) : base = url_unparse ( base ) if isinstance ( url , tuple ) : url = url_unparse ( url ) base , url = normalize_string_tuple ( ( base , url ) ) s = make_literal_wrapper ( base ) if not base : return url if not url : return base bscheme , bnetloc , bpath , bquery , bfragment = url_parse ( base , allow_fragments = allow_fragments ) scheme , netloc , path , query , fragment = url_parse ( url , bscheme , allow_fragments ) if scheme != bscheme : return url if netloc : return url_unparse ( ( scheme , netloc , path , query , fragment ) ) netloc = bnetloc if path [ : 1 ] == s ( "/" ) : segments = path . split ( s ( "/" ) ) elif not path : segments = bpath . split ( s ( "/" ) ) if not query : query = bquery else : segments = bpath . split ( s ( "/" ) ) [ : - 1 ] + path . split ( s ( "/" ) ) # If the rightmost part is " . / " we want to keep the slash but # remove the dot . if segments [ - 1 ] == s ( "." ) : segments [ - 1 ] = s ( "" ) # Resolve " . . " and " . " segments = [ segment for segment in segments if segment != s ( "." ) ] while 1 : i = 1 n = len ( segments ) - 1 while i < n : if segments [ i ] == s ( ".." ) and segments [ i - 1 ] not in ( s ( "" ) , s ( ".." ) ) : del segments [ i - 1 : i + 1 ] break i += 1 else : break # Remove trailing " . . " if the URL is absolute unwanted_marker = [ s ( "" ) , s ( ".." ) ] while segments [ : 2 ] == unwanted_marker : del segments [ 1 ] path = s ( "/" ) . join ( segments ) return url_unparse ( ( scheme , netloc , path , query , fragment ) )
def findRequirements ( ) : """Read the requirements . txt file and parse into requirements for setup ' s install _ requirements option ."""
requirementsPath = os . path . join ( REPO_DIR , "requirements.txt" ) requirements = parse_file ( requirementsPath ) # User has a pre - release version of numenta packages installed , which is only # possible if the user installed and built the packages from source and # it is up to the user to decide when to update these packages . We ' ll # quietly remove the entries in requirements . txt so as to not conflate the # two . if nupicPrereleaseInstalled ( ) : requirements = [ req for req in requirements if "nupic" not in req ] if htmresearchCorePrereleaseInstalled ( ) : requirements = [ req for req in requirements if "htmresearch-core" not in req ] return requirements
async def emit ( self , event , data = None , namespace = None , callback = None ) : """Emit a custom event to the server . The only difference with the : func : ` socketio . Client . emit ` method is that when the ` ` namespace ` ` argument is not given the namespace associated with the class is used . Note : this method is a coroutine ."""
return await self . client . emit ( event , data = data , namespace = namespace or self . namespace , callback = callback )
def is_valid_hendecasyllables ( self , scanned_line : str ) -> bool : """Determine if a scansion pattern is one of the valid Hendecasyllables metrical patterns : param scanned _ line : a line containing a sequence of stressed and unstressed syllables : return bool > > > print ( MetricalValidator ( ) . is _ valid _ hendecasyllables ( " - U - UU - U - U - U " ) ) True"""
line = scanned_line . replace ( self . constants . FOOT_SEPARATOR , "" ) line = line . replace ( " " , "" ) if len ( line ) < 11 : return False line = line [ : - 1 ] + self . constants . OPTIONAL_ENDING return self . VALID_HENDECASYLLABLES . __contains__ ( line )
def _compute_term_3 ( self , C , rrup , mag ) : """This computes the third term in equation 2 , page 2."""
return ( C [ 'a3' ] * np . log10 ( rrup + C [ 'a4' ] * np . power ( 10 , C [ 'a5' ] * mag ) ) )
def ProcessMessage ( self , message ) : """Begins an enrollment flow for this client . Args : message : The Certificate sent by the client . Note that this message is not authenticated ."""
cert = rdf_crypto . Certificate ( message . payload ) queue = self . well_known_session_id . Queue ( ) client_id = message . source # It makes no sense to enrol the same client multiple times , so we # eliminate duplicates . Note , that we can still enroll clients multiple # times due to cache expiration . try : enrolment_cache . Get ( client_id ) return except KeyError : enrolment_cache . Put ( client_id , 1 ) # Create a new client object for this client . if data_store . AFF4Enabled ( ) : client = aff4 . FACTORY . Create ( client_id , aff4_grr . VFSGRRClient , mode = "rw" , token = self . token ) client_cert = client . Get ( client . Schema . CERT ) if data_store . RelationalDBEnabled ( ) : try : md = data_store . REL_DB . ReadClientMetadata ( client_id . Basename ( ) ) client_cert = md . certificate except db . UnknownClientError : client_cert = None if data_store . RelationalDBEnabled ( ) : data_store . REL_DB . WriteClientMetadata ( client_id . Basename ( ) , fleetspeak_enabled = False ) # Only enroll this client if it has no certificate yet . if not client_cert : # Start the enrollment flow for this client . # Note , that the actual CAEnroler class is autogenerated from the # CAEnrolerMixin by the DualDBFlow decorator confusing the linter - hence # the disable directive . flow . StartAFF4Flow ( client_id = client_id , flow_name = CAEnroler . __name__ , # pylint : disable = undefined - variable csr = cert , queue = queue , token = self . token )
def build_duration ( self ) : """Return the difference between build and build _ done states"""
return int ( self . state . build_done ) - int ( self . state . build )
def bed12 ( self , score = "0" , rgb = "." ) : """return a bed12 ( http : / / genome . ucsc . edu / FAQ / FAQformat . html # format1) representation of this interval"""
if not self . is_gene_pred : raise CruzException ( "can't create bed12 from non genepred feature" ) exons = list ( self . exons ) # go from global start , stop , to relative start , length . . . sizes = "," . join ( [ str ( e [ 1 ] - e [ 0 ] ) for e in exons ] ) + "," starts = "," . join ( [ str ( e [ 0 ] - self . txStart ) for e in exons ] ) + "," name = self . name2 + "," + self . name if hasattr ( self , "name2" ) else self . name return "\t" . join ( map ( str , ( self . chrom , self . txStart , self . txEnd , name , score , self . strand , self . cdsStart , self . cdsEnd , rgb , len ( exons ) , sizes , starts ) ) )
def make_serializable ( json ) : """This function ensures that the dictionary is JSON serializable . If not , keys with non - serializable values are removed from the return value . Args : json ( dict ) : Dictionary to convert to serializable Returns : new _ dict ( dict ) : New dictionary with non JSON serializable values removed"""
new_dict = dict ( ) for key , value in iteritems ( json ) : if is_valid_json ( value ) : new_dict [ key ] = value return new_dict
def set_CCLE_context ( self , cell_types ) : """Set context of all nodes and node members from CCLE ."""
self . get_gene_names ( ) # Get expression and mutations from context client exp_values = context_client . get_protein_expression ( self . _gene_names , cell_types ) mut_values = context_client . get_mutations ( self . _gene_names , cell_types ) # Make a dict of presence / absence of mutations muts = { cell_line : { } for cell_line in cell_types } for cell_line , entries in mut_values . items ( ) : if entries is not None : for gene , mutations in entries . items ( ) : if mutations : muts [ cell_line ] [ gene ] = 1 else : muts [ cell_line ] [ gene ] = 0 # Create bins for the exp values # because colorbrewer only does 3-9 bins and I don ' t feel like # reinventing color scheme theory , this will only bin 3-9 bins def bin_exp ( expression_dict ) : d = expression_dict exp_values = [ ] for line in d : for gene in d [ line ] : val = d [ line ] [ gene ] if val is not None : exp_values . append ( val ) thr_dict = { } for n_bins in range ( 3 , 10 ) : bin_thr = np . histogram ( np . log10 ( exp_values ) , n_bins ) [ 1 ] [ 1 : ] thr_dict [ n_bins ] = bin_thr # this dict isn ' t yet binned , that happens in the loop binned_dict = { x : deepcopy ( expression_dict ) for x in range ( 3 , 10 ) } for n_bins in binned_dict : for line in binned_dict [ n_bins ] : for gene in binned_dict [ n_bins ] [ line ] : # last bin is reserved for None if binned_dict [ n_bins ] [ line ] [ gene ] is None : binned_dict [ n_bins ] [ line ] [ gene ] = n_bins else : val = np . log10 ( binned_dict [ n_bins ] [ line ] [ gene ] ) for thr_idx , thr in enumerate ( thr_dict [ n_bins ] ) : if val <= thr : binned_dict [ n_bins ] [ line ] [ gene ] = thr_idx break return binned_dict binned_exp = bin_exp ( exp_values ) context = { 'bin_expression' : binned_exp , 'mutation' : muts } self . _context [ 'CCLE' ] = context
def page ( self , course , username ) : """Get all data and display the page"""
data = list ( self . database . user_tasks . find ( { "username" : username , "courseid" : course . get_id ( ) } ) ) tasks = course . get_tasks ( ) result = dict ( [ ( taskid , { "taskid" : taskid , "name" : tasks [ taskid ] . get_name ( self . user_manager . session_language ( ) ) , "tried" : 0 , "status" : "notviewed" , "grade" : 0 , "url" : self . submission_url_generator ( username , taskid ) } ) for taskid in tasks ] ) for taskdata in data : if taskdata [ "taskid" ] in result : result [ taskdata [ "taskid" ] ] [ "tried" ] = taskdata [ "tried" ] if taskdata [ "tried" ] == 0 : result [ taskdata [ "taskid" ] ] [ "status" ] = "notattempted" elif taskdata [ "succeeded" ] : result [ taskdata [ "taskid" ] ] [ "status" ] = "succeeded" else : result [ taskdata [ "taskid" ] ] [ "status" ] = "failed" result [ taskdata [ "taskid" ] ] [ "grade" ] = taskdata [ "grade" ] result [ taskdata [ "taskid" ] ] [ "submissionid" ] = str ( taskdata [ "submissionid" ] ) if "csv" in web . input ( ) : return make_csv ( result ) results = sorted ( list ( result . values ( ) ) , key = lambda result : ( tasks [ result [ "taskid" ] ] . get_order ( ) , result [ "taskid" ] ) ) return self . template_helper . get_renderer ( ) . course_admin . student_info ( course , username , results )
def cache_penalty_model ( penalty_model , database = None ) : """Caching function for penaltymodel _ cache . Args : penalty _ model ( : class : ` penaltymodel . PenaltyModel ` ) : Penalty model to be cached . database ( str , optional ) : The path to the desired sqlite database file . If None , will use the default ."""
# only handles index - labelled nodes if not _is_index_labelled ( penalty_model . graph ) : mapping , __ = _graph_canonicalization ( penalty_model . graph ) penalty_model = penalty_model . relabel_variables ( mapping , inplace = False ) # connect to the database . Note that once the connection is made it cannot be # broken up between several processes . if database is None : conn = cache_connect ( ) else : conn = cache_connect ( database ) # load into the database with conn as cur : insert_penalty_model ( cur , penalty_model ) # close the connection conn . close ( )
def helical_geometry ( space , src_radius , det_radius , num_turns , n_pi = 1 , num_angles = None , det_shape = None ) : """Create a default helical geometry from ` ` space ` ` . This function is intended for simple test cases where users do not need the full flexibility of the geometries , but simply wants a geometry that works . The geometry returned by this function has equidistant angles that lie ( strictly ) between 0 and ` ` 2 * pi * num _ turns ` ` . The detector is centered around 0 , and its size is chosen such that the whole ` ` space ` ` is covered with lines . The number of angles and detector elements is chosen such that the resulting sinogram is fully sampled according to the Nyquist criterion , which in general results in a very large number of samples . In particular , a ` ` space ` ` that is not centered at the origin can result in very large detectors since the latter is always origin - centered . Parameters space : ` DiscreteLp ` Reconstruction space , the space of the volumetric data to be projected . Must be 3 - dimensional . src _ radius : nonnegative float Radius of the source circle . Must be larger than the radius of the smallest vertical cylinder containing ` ` space . domain ` ` , i . e . , the source must be outside the volume for all rotations . det _ radius : nonnegative float Radius of the detector circle . num _ turns : positive float Total number of helical turns . num _ angles : int , optional Number of angles . Default : Enough to fully sample the data , see Notes . n _ pi : odd int , optional Total number of half rotations to include in the window . Values larger than 1 should be used if the pitch is much smaller than the detector height . det _ shape : int or sequence of ints , optional Number of detector pixels . Default : Enough to fully sample the data , see Notes . Returns geometry : ` ConeFlatGeometry ` Projection geometry with equidistant angles and zero - centered detector as determined by sampling criteria . Examples Create a helical beam geometry from space : > > > space = odl . uniform _ discr ( [ - 1 , - 1 , - 1 ] , [ 1 , 1 , 1 ] , ( 20 , 20 , 20 ) ) > > > geometry = helical _ geometry ( space , src _ radius = 5 , det _ radius = 5, . . . num _ turns = 3) > > > geometry . angles . size 234 > > > geometry . detector . shape (57 , 9) Notes In the " fan beam direction " , the sampling exactly follows the two - dimensional case see ` cone _ beam _ geometry ` for a description . In the " axial direction " , e . g . along the [ 0 , 0 , 1 ] axis , the geometry is sampled according to two criteria . First , the bounds of the detector are chosen to satisfy the tuy condition . See ` [ TSS1998 ] ` _ for a full description . Second , the sampling rate is selected according to the nyquist criterion to give a full sampling . This is done by sampling such that the pixel size is half of the size of the projection of the smallest voxel onto the detector . References [ TSS1998 ] Tam , K C , Samarasekera , S and Sauer , F . * Exact cone beam CT with a spiral scan * . Physics in Medicine & Biology 4 ( 1998 ) , p 1015. . . _ [ TSS1998 ] : https : / / dx . doi . org / 10.1088/0031-9155/43/4/028"""
# Find maximum distance from rotation axis corners = space . domain . corners ( ) [ : , : 2 ] rho = np . max ( np . linalg . norm ( corners , axis = 1 ) ) offset_along_axis = space . partition . min_pt [ 2 ] pitch = space . partition . extent [ 2 ] / num_turns # Find default values according to Nyquist criterion . # We assume that the function is bandlimited by a wave along the x or y # axis . The highest frequency we can measure is then a standing wave with # period of twice the inter - node distance . min_side = min ( space . partition . cell_sides [ : 2 ] ) omega = np . pi / min_side # Compute minimum width of the detector to cover the object . The relation # used here is ( w / 2 ) / ( rs + rd ) = rho / rs since both are equal to tan ( alpha ) , # where alpha is the half fan angle . rs = float ( src_radius ) if ( rs <= rho ) : raise ValueError ( 'source too close to the object, resulting in ' 'infinite detector for full coverage' ) rd = float ( det_radius ) r = rs + rd w = 2 * rho * ( rs + rd ) / rs # Compute minimum number of pixels given the constraint on the # sampling interval and the computed width rb = np . hypot ( r , w / 2 ) # length of the boundary ray to the flat detector num_px_horiz = 2 * int ( np . ceil ( w * omega * r / ( 2 * np . pi * rb ) ) ) + 1 # Compute lower and upper bound needed to fully sample the object . # In particular , since in a helical geometry several turns are used , # this is selected so that the field of view of two opposing projections , # separated by theta = 180 deg , overlap , but as little as possible . # See ` tam _ danielson _ window ` for more information . h_axis = ( pitch / ( 2 * np . pi ) * ( 1 + ( - rho / src_radius ) ** 2 ) * ( n_pi * np . pi / 2.0 - np . arctan ( - rho / src_radius ) ) ) h = 2 * h_axis * ( rs + rd ) / rs # Compute number of pixels min_mag = r / rs dh = 0.5 * space . partition . cell_sides [ 2 ] * min_mag num_px_vert = int ( np . ceil ( h / dh ) ) det_min_pt = [ - w / 2 , - h / 2 ] det_max_pt = [ w / 2 , h / 2 ] if det_shape is None : det_shape = [ num_px_horiz , num_px_vert ] max_angle = 2 * np . pi * num_turns if num_angles is None : num_angles = int ( np . ceil ( max_angle * omega * rho / np . pi * r / ( r + rho ) ) ) angle_partition = uniform_partition ( 0 , max_angle , num_angles ) det_partition = uniform_partition ( det_min_pt , det_max_pt , det_shape ) return ConeFlatGeometry ( angle_partition , det_partition , src_radius , det_radius , offset_along_axis = offset_along_axis , pitch = pitch )
def search ( self , title = None , sort = None , maxresults = 999999 , libtype = None , ** kwargs ) : """Search the library . If there are many results , they will be fetched from the server in batches of X _ PLEX _ CONTAINER _ SIZE amounts . If you ' re only looking for the first < num > results , it would be wise to set the maxresults option to that amount so this functions doesn ' t iterate over all results on the server . Parameters : title ( str ) : General string query to search for ( optional ) . sort ( str ) : column : dir ; column can be any of { addedAt , originallyAvailableAt , lastViewedAt , titleSort , rating , mediaHeight , duration } . dir can be asc or desc ( optional ) . maxresults ( int ) : Only return the specified number of results ( optional ) . libtype ( str ) : Filter results to a spcifiec libtype ( movie , show , episode , artist , album , track ; optional ) . * * kwargs ( dict ) : Any of the available filters for the current library section . Partial string matches allowed . Multiple matches OR together . Negative filtering also possible , just add an exclamation mark to the end of filter name , e . g . ` resolution ! = 1x1 ` . * unwatched : Display or hide unwatched content ( True , False ) . [ all ] * duplicate : Display or hide duplicate items ( True , False ) . [ movie ] * actor : List of actors to search ( [ actor _ or _ id , . . . ] ) . [ movie ] * collection : List of collections to search within ( [ collection _ or _ id , . . . ] ) . [ all ] * contentRating : List of content ratings to search within ( [ rating _ or _ key , . . . ] ) . [ movie , tv ] * country : List of countries to search within ( [ country _ or _ key , . . . ] ) . [ movie , music ] * decade : List of decades to search within ( [ yyy0 , . . . ] ) . [ movie ] * director : List of directors to search ( [ director _ or _ id , . . . ] ) . [ movie ] * genre : List Genres to search within ( [ genere _ or _ id , . . . ] ) . [ all ] * network : List of TV networks to search within ( [ resolution _ or _ key , . . . ] ) . [ tv ] * resolution : List of video resolutions to search within ( [ resolution _ or _ key , . . . ] ) . [ movie ] * studio : List of studios to search within ( [ studio _ or _ key , . . . ] ) . [ music ] * year : List of years to search within ( [ yyyy , . . . ] ) . [ all ] Raises : : class : ` plexapi . exceptions . BadRequest ` : when applying unknown filter"""
# cleanup the core arguments args = { } for category , value in kwargs . items ( ) : args [ category ] = self . _cleanSearchFilter ( category , value , libtype ) if title is not None : args [ 'title' ] = title if sort is not None : args [ 'sort' ] = self . _cleanSearchSort ( sort ) if libtype is not None : args [ 'type' ] = utils . searchType ( libtype ) # iterate over the results results , subresults = [ ] , '_init' args [ 'X-Plex-Container-Start' ] = 0 args [ 'X-Plex-Container-Size' ] = min ( X_PLEX_CONTAINER_SIZE , maxresults ) while subresults and maxresults > len ( results ) : key = '/library/sections/%s/all%s' % ( self . key , utils . joinArgs ( args ) ) subresults = self . fetchItems ( key ) results += subresults [ : maxresults - len ( results ) ] args [ 'X-Plex-Container-Start' ] += args [ 'X-Plex-Container-Size' ] return results
def gen_df_save ( df_grid_group : pd . DataFrame ) -> pd . DataFrame : '''generate a dataframe for saving Parameters df _ output _ grid _ group : pd . DataFrame an output dataframe of a single group and grid Returns pd . DataFrame a dataframe with date time info prepended for saving'''
# generate df _ datetime for prepending idx_dt = df_grid_group . index ser_year = pd . Series ( idx_dt . year , index = idx_dt , name = 'Year' ) ser_DOY = pd . Series ( idx_dt . dayofyear , index = idx_dt , name = 'DOY' ) ser_hour = pd . Series ( idx_dt . hour , index = idx_dt , name = 'Hour' ) ser_min = pd . Series ( idx_dt . minute , index = idx_dt , name = 'Min' ) df_datetime = pd . concat ( [ ser_year , ser_DOY , ser_hour , ser_min , ] , axis = 1 ) df_datetime [ 'Dectime' ] = ser_DOY - 1 + idx_dt . to_perioddelta ( 'd' ) . total_seconds ( ) / ( 24 * 60 * 60 ) df_save = pd . concat ( [ df_datetime , df_grid_group ] , axis = 1 ) return df_save
def _iter_service_names ( ) : '''Detect all of the service names available to upstart via init configuration files and via classic sysv init scripts'''
found = set ( ) for line in glob . glob ( '/etc/init.d/*' ) : name = os . path . basename ( line ) found . add ( name ) yield name # This walk method supports nested services as per the init man page # definition ' For example a configuration file / etc / init / rc - sysinit . conf # is named rc - sysinit , while a configuration file / etc / init / net / apache . conf # is named net / apache ' init_root = '/etc/init/' for root , dirnames , filenames in salt . utils . path . os_walk ( init_root ) : relpath = os . path . relpath ( root , init_root ) for filename in fnmatch . filter ( filenames , '*.conf' ) : if relpath == '.' : # service is defined in the root , no need to append prefix . name = filename [ : - 5 ] else : # service is nested , append its relative path prefix . name = os . path . join ( relpath , filename [ : - 5 ] ) if name in found : continue yield name
def summarise_events ( self ) : """takes the logfiles and produces an event summary matrix date command result process source 20140421 9 40 178 9 20140423 0 0 6 0 20140424 19 1 47 19 20140425 24 0 117 24 20140426 16 0 83 16 20140427 1 0 6 1 20140429 0 0 0 4"""
all_dates = [ ] d_command = self . _count_by_date ( self . command_file , all_dates ) d_result = self . _count_by_date ( self . result_file , all_dates ) d_process = self . _count_by_date ( self . process_file , all_dates ) d_source = self . _count_by_date ( self . source_file , all_dates ) with open ( self . log_sum , "w" ) as sum_file : sum_file . write ( 'date,command,result,process,source\n' ) for dte in sorted ( set ( all_dates ) ) : sum_file . write ( dte + ',' ) if dte in d_command : sum_file . write ( str ( d_command [ dte ] ) + ',' ) else : sum_file . write ( '0,' ) if dte in d_result : sum_file . write ( str ( d_result [ dte ] ) + ',' ) else : sum_file . write ( '0,' ) if dte in d_process : sum_file . write ( str ( d_process [ dte ] ) + ',' ) else : sum_file . write ( '0,' ) if dte in d_source : sum_file . write ( str ( d_source [ dte ] ) + '\n' ) else : sum_file . write ( '0\n' )
def setup ( self , url , stream = True , post = False , parameters = None , timeout = None ) : # type : ( str , bool , bool , Optional [ Dict ] , Optional [ float ] ) - > requests . Response """Setup download from provided url returning the response Args : url ( str ) : URL to download stream ( bool ) : Whether to stream download . Defaults to True . post ( bool ) : Whether to use POST instead of GET . Defaults to False . parameters ( Optional [ Dict ] ) : Parameters to pass . Defaults to None . timeout ( Optional [ float ] ) : Timeout for connecting to URL . Defaults to None ( no timeout ) . Returns : requests . Response : requests . Response object"""
self . close_response ( ) self . response = None try : if post : full_url , parameters = self . get_url_params_for_post ( url , parameters ) self . response = self . session . post ( full_url , data = parameters , stream = stream , timeout = timeout ) else : self . response = self . session . get ( self . get_url_for_get ( url , parameters ) , stream = stream , timeout = timeout ) self . response . raise_for_status ( ) except Exception as e : raisefrom ( DownloadError , 'Setup of Streaming Download of %s failed!' % url , e ) return self . response
def generate_pos_tagger ( check_accuracy = False ) : """Accuracy is about 0.94 with 90 % training data ."""
global tagger logging . debug ( "Reading TIGER corpus" ) corp = nltk . corpus . ConllCorpusReader ( DIR_PATH , TIGER_FILE_NAME , [ 'ignore' , 'words' , 'ignore' , 'ignore' , 'pos' ] , encoding = 'utf-8' ) tagged_sents = list ( corp . tagged_sents ( ) ) logging . debug ( "Shuffling sentences" ) random . shuffle ( tagged_sents ) if check_accuracy : # set a split size : use 90 % for training , 10 % for testing split_perc = 0.1 split_size = int ( len ( tagged_sents ) * split_perc ) train_sents , test_sents = tagged_sents [ split_size : ] , tagged_sents [ : split_size ] else : train_sents = tagged_sents logging . debug ( "Training Tagger" ) tagger = ClassifierBasedGermanTagger ( train = train_sents ) logging . debug ( "Training finished" ) if check_accuracy : accuracy = tagger . evaluate ( test_sents ) logging . debug ( "Accurracy is {}." . format ( accuracy ) ) logging . debug ( "Serializing the Tagger" ) with open ( os . path . join ( DIR_PATH , TAGGER_FILE_NAME ) , 'wb' ) as f : pickle . dump ( tagger , f , protocol = 3 )
def update ( self , status , source = None , params = { } ) : "Update your status . Returns the ID of the new post ."
params = params . copy ( ) params [ 'status' ] = status if source : params [ 'source' ] = source return self . __parsed_post ( self . __post ( '/statuses/update.xml' , params ) , txml . parseUpdateResponse )
def subtract_imagenet_mean_preprocess_batch ( batch ) : """Subtract ImageNet mean pixel - wise from a BGR image ."""
batch = F . swapaxes ( batch , 0 , 1 ) ( r , g , b ) = F . split ( batch , num_outputs = 3 , axis = 0 ) r = r - 123.680 g = g - 116.779 b = b - 103.939 batch = F . concat ( b , g , r , dim = 0 ) batch = F . swapaxes ( batch , 0 , 1 ) return batch
def cmp ( self , other_service ) : """Compare with an instance of this object . Returns None if the object is not comparable , False is relevant attributes don ' t match and True if they do ."""
if not isinstance ( other_service , HttpService ) : return None for att in dir ( self ) : if att == 'cmp' or att . startswith ( '_' ) : continue if not hasattr ( other_service , att ) : return None if getattr ( self , att ) != getattr ( other_service , att ) : return False return True
def embed_snippet ( views , drop_defaults = True , state = None , indent = 2 , embed_url = None , requirejs = True , cors = True ) : """Return a snippet that can be embedded in an HTML file . Parameters { views _ attribute } { embed _ kwargs } Returns A unicode string with an HTML snippet containing several ` < script > ` tags ."""
data = embed_data ( views , drop_defaults = drop_defaults , state = state ) widget_views = u'\n' . join ( widget_view_template . format ( view_spec = escape_script ( json . dumps ( view_spec ) ) ) for view_spec in data [ 'view_specs' ] ) if embed_url is None : embed_url = DEFAULT_EMBED_REQUIREJS_URL if requirejs else DEFAULT_EMBED_SCRIPT_URL load = load_requirejs_template if requirejs else load_template use_cors = ' crossorigin="anonymous"' if cors else '' values = { 'load' : load . format ( embed_url = embed_url , use_cors = use_cors ) , 'json_data' : escape_script ( json . dumps ( data [ 'manager_state' ] , indent = indent ) ) , 'widget_views' : widget_views , } return snippet_template . format ( ** values )
def update_video ( self ) : """Read list of files , convert to video time , and add video to queue ."""
window_start = self . parent . value ( 'window_start' ) window_length = self . parent . value ( 'window_length' ) d = self . parent . info . dataset videos , begsec , endsec = d . read_videos ( window_start , window_start + window_length ) lg . debug ( f'Video: {begsec} - {endsec}' ) self . endsec = endsec videos = [ str ( v ) for v in videos ] # make sure it ' s a str ( not path ) medialist = vlc . MediaList ( videos ) self . medialistplayer . set_media_list ( medialist ) self . cnt_video = 0 self . n_video = len ( videos ) self . t = QTimer ( ) self . t . timeout . connect ( self . check_if_finished ) self . t . start ( 100 ) self . medialistplayer . play ( ) self . mediaplayer . set_time ( int ( begsec * 1000 ) )
def _read_http_data ( self , size , kind , flag ) : """Read HTTP / 2 DATA frames . Structure of HTTP / 2 DATA frame [ RFC 7540 ] : | Length ( 24 ) | | Type ( 8 ) | Flags ( 8 ) | | R | Stream Identifier ( 31 ) | | Pad Length ? ( 8 ) | | Data ( * ) . . . | Padding ( * ) . . . Octets Bits Name Description 0 0 http . length Length 3 24 http . type Type ( 0) 4 32 http . flags Flags 5 40 - Reserved 5 41 http . sid Stream Identifier 9 72 http . pad _ len Pad Length ( Optional ) 10 80 http . data Data ? ? - Padding ( Optional )"""
_plen = 0 _flag = dict ( END_STREAM = False , # bit 0 PADDED = False , # bit 3 ) for index , bit in enumerate ( flag ) : if index == 0 and bit : _flag [ 'END_STREAM' ] = True elif index == 3 and bit : _flag [ 'PADDED' ] = True _plen = self . _read_unpack ( 1 ) elif bit : raise ProtocolError ( f'HTTP/2: [Type {kind}] invalid format' , quiet = True ) else : continue if _plen > size - 10 : raise ProtocolError ( f'HTTP/2: [Type {kind}] invalid format' , quiet = True ) if _flag [ 'PADDED' ] : _dlen = size - _plen - 1 else : _dlen = size - _plen if _dlen < 0 : raise ProtocolError ( f'HTTP/2: [Type {kind}] invalid format' , quiet = True ) _data = self . _read_fileng ( _dlen ) padding = self . _read_binary ( _plen ) if any ( ( int ( bit , base = 2 ) for bit in padding ) ) : raise ProtocolError ( f'HTTP/2: [Type {kind}] invalid format' , quiet = True ) data = dict ( flags = _flag , data = _data , ) if _flag [ 'PADDED' ] : data [ 'ped_len' ] = _plen return data
def patch_qs ( url : Text , data : Dict [ Text , Text ] ) -> Text : """Given an URL , change the query string to include the values specified in the dictionary . If the keys of the dictionary can be found in the query string of the URL , then they will be removed . It is guaranteed that all other values of the query string will keep their order ."""
qs_id = 4 p = list ( urlparse ( url ) ) qs = parse_qsl ( p [ qs_id ] ) # type : List [ Tuple [ Text , Text ] ] patched_qs = list ( chain ( filter ( lambda x : x [ 0 ] not in data , qs ) , data . items ( ) , ) ) p [ qs_id ] = urlencode ( patched_qs ) return urlunparse ( p )
def destroy_window ( window ) : '''Destroys the specified window and its context . Wrapper for : void glfwDestroyWindow ( GLFWwindow * window ) ;'''
_glfw . glfwDestroyWindow ( window ) window_addr = ctypes . cast ( ctypes . pointer ( window ) , ctypes . POINTER ( ctypes . c_ulong ) ) . contents . value for callback_repository in _callback_repositories : if window_addr in callback_repository : del callback_repository [ window_addr ]
def _make_sparse_blocks_with_virtual ( self , variable , records , data ) : '''Handles the data for the variable with sparse records . Organizes the physical record numbers into blocks in a list : [ [ start _ rec1 , end _ rec1 , data _ 1 ] , [ start _ rec2 , enc _ rec2 , data _ 2 ] , . . . ] Place consecutive physical records into a single block Parameters : variable : dict the variable , returned from varinq ( ' variable ' , expand = True ) records : list a list of physical records data : varies bytes array , numpy . ndarray or list of str form with vitual data embedded , returned from varget ( ' variable ' ) call'''
# Gather the ranges for which we have physical data sparse_blocks = CDF . _make_blocks ( records ) sparse_data = [ ] if ( isinstance ( data , np . ndarray ) ) : for sblock in sparse_blocks : # each block in this list : [ starting _ rec # , ending _ rec # , data ] asparse = [ ] asparse . append ( sblock [ 0 ] ) asparse . append ( sblock [ 1 ] ) starting = sblock [ 0 ] ending = sblock [ 1 ] + 1 asparse . append ( data [ starting : ending ] ) sparse_data . append ( asparse ) return sparse_data elif ( isinstance ( data , bytes ) ) : y = 1 for z in range ( 0 , variable [ 'Num_Dims' ] ) : y = y * variable [ 'Dim_Sizes' ] [ z ] y = y * CDF . _datatype_size ( variable [ 'Data_Type' ] , variable [ 'Num_Elements' ] ) for x in sparse_blocks : # each block in this list : [ starting _ rec # , ending _ rec # , data ] asparse = [ ] asparse . append ( sblock [ 0 ] ) asparse . append ( sblock [ 1 ] ) starting = sblock [ 0 ] * y ending = ( sblock [ 1 ] + 1 ) * y asparse . append ( data [ starting : ending ] ) sparse_data . append ( asparse ) return sparse_data elif ( isinstance ( data , list ) ) : for x in sparse_blocks : # each block in this list : [ starting _ rec # , ending _ rec # , data ] asparse = [ ] asparse . append ( sblock [ 0 ] ) asparse . append ( sblock [ 1 ] ) records = sparse_blocks [ x ] [ 1 ] - sparse_blocks [ x ] [ 0 ] + 1 datax = [ ] ist = sblock [ 0 ] for z in range ( 0 , records ) : datax . append ( data [ ist + z ] ) asparse . append ( datax ) sparse_data . append ( asparse ) return sparse_data else : print ( 'Can not handle data... Skip' ) return None
def receive ( ) : """Receive a command from Training Service . Returns a tuple of command ( CommandType ) and payload ( str )"""
header = _in_file . read ( 8 ) logging . getLogger ( __name__ ) . debug ( 'Received command, header: [%s]' % header ) if header is None or len ( header ) < 8 : # Pipe EOF encountered logging . getLogger ( __name__ ) . debug ( 'Pipe EOF encountered' ) return None , None length = int ( header [ 2 : ] ) data = _in_file . read ( length ) command = CommandType ( header [ : 2 ] ) data = data . decode ( 'utf8' ) logging . getLogger ( __name__ ) . debug ( 'Received command, data: [%s]' % data ) return command , data
def deploy ( environment , zappa_settings ) : """Package , create and deploy to Lambda ."""
print ( ( "Deploying " + environment ) ) zappa , settings , lambda_name , zip_path = _package ( environment , zappa_settings ) s3_bucket_name = settings [ 's3_bucket' ] try : # Load your AWS credentials from ~ / . aws / credentials zappa . load_credentials ( ) # Make sure the necessary IAM execution roles are available zappa . create_iam_roles ( ) # Upload it to S3 zip_arn = zappa . upload_to_s3 ( zip_path , s3_bucket_name ) # Register the Lambda function with that zip as the source # You ' ll also need to define the path to your lambda _ handler code . lambda_arn = zappa . create_lambda_function ( bucket = s3_bucket_name , s3_key = zip_path , function_name = lambda_name , handler = 'handler.lambda_handler' , vpc_config = settings [ 'vpc_config' ] , memory_size = settings [ 'memory_size' ] ) # Create and configure the API Gateway api_id = zappa . create_api_gateway_routes ( lambda_arn , lambda_name ) # Deploy the API ! endpoint_url = zappa . deploy_api_gateway ( api_id , environment ) # Remove the uploaded zip from S3 , because it is now registered . . zappa . remove_from_s3 ( zip_path , s3_bucket_name ) if settings [ 'touch' ] : requests . get ( endpoint_url ) finally : try : # Finally , delete the local copy our zip package if settings [ 'delete_zip' ] : os . remove ( zip_path ) except : print ( "WARNING: Manual cleanup of the zip might be needed." ) print ( ( "Your Zappa deployment is live!: " + endpoint_url ) )
def app_uninstall ( self , package_name , keep_data = False ) : """Uninstall package Args : - package _ name ( string ) : package name ex : com . example . demo - keep _ data ( bool ) : keep the data and cache directories"""
if keep_data : return self . run_cmd ( 'uninstall' , '-k' , package_name ) else : return self . run_cmd ( 'uninstall' , package_name )
def __parse ( value ) : """Parse the string date . Supports the subset of ISO8601 used by xsd : time , but is lenient with what is accepted , handling most reasonable syntax . Subsecond information is rounded to microseconds due to a restriction in the python datetime . time implementation . @ param value : A time string . @ type value : str @ return : A time object . @ rtype : B { datetime } . I { time }"""
match_result = _RE_TIME . match ( value ) if match_result is None : raise ValueError ( "date data has invalid format '%s'" % ( value , ) ) time , round_up = _time_from_match ( match_result ) tzinfo = _tzinfo_from_match ( match_result ) if round_up : time = _bump_up_time_by_microsecond ( time ) return time . replace ( tzinfo = tzinfo )
def pretrain ( texts_loc , vectors_model , output_dir , width = 96 , depth = 4 , embed_rows = 2000 , loss_func = "cosine" , use_vectors = False , dropout = 0.2 , n_iter = 1000 , batch_size = 3000 , max_length = 500 , min_length = 5 , seed = 0 , n_save_every = None , ) : """Pre - train the ' token - to - vector ' ( tok2vec ) layer of pipeline components , using an approximate language - modelling objective . Specifically , we load pre - trained vectors , and train a component like a CNN , BiLSTM , etc to predict vectors which match the pre - trained ones . The weights are saved to a directory after each epoch . You can then pass a path to one of these pre - trained weights files to the ' spacy train ' command . This technique may be especially helpful if you have little labelled data . However , it ' s still quite experimental , so your mileage may vary . To load the weights back in during ' spacy train ' , you need to ensure all settings are the same between pretraining and training . The API and errors around this need some improvement ."""
config = dict ( locals ( ) ) msg = Printer ( ) util . fix_random_seed ( seed ) has_gpu = prefer_gpu ( ) msg . info ( "Using GPU" if has_gpu else "Not using GPU" ) output_dir = Path ( output_dir ) if not output_dir . exists ( ) : output_dir . mkdir ( ) msg . good ( "Created output directory" ) srsly . write_json ( output_dir / "config.json" , config ) msg . good ( "Saved settings to config.json" ) # Load texts from file or stdin if texts_loc != "-" : # reading from a file texts_loc = Path ( texts_loc ) if not texts_loc . exists ( ) : msg . fail ( "Input text file doesn't exist" , texts_loc , exits = 1 ) with msg . loading ( "Loading input texts..." ) : texts = list ( srsly . read_jsonl ( texts_loc ) ) msg . good ( "Loaded input texts" ) random . shuffle ( texts ) else : # reading from stdin msg . text ( "Reading input text from stdin..." ) texts = srsly . read_jsonl ( "-" ) with msg . loading ( "Loading model '{}'..." . format ( vectors_model ) ) : nlp = util . load_model ( vectors_model ) msg . good ( "Loaded model '{}'" . format ( vectors_model ) ) pretrained_vectors = None if not use_vectors else nlp . vocab . vectors . name model = create_pretraining_model ( nlp , Tok2Vec ( width , embed_rows , conv_depth = depth , pretrained_vectors = pretrained_vectors , bilstm_depth = 0 , # Requires PyTorch . Experimental . cnn_maxout_pieces = 3 , # You can try setting this higher subword_features = True , # Set to False for Chinese etc ) , ) optimizer = create_default_optimizer ( model . ops ) tracker = ProgressTracker ( frequency = 10000 ) msg . divider ( "Pre-training tok2vec layer" ) row_settings = { "widths" : ( 3 , 10 , 10 , 6 , 4 ) , "aligns" : ( "r" , "r" , "r" , "r" , "r" ) } msg . row ( ( "#" , "# Words" , "Total Loss" , "Loss" , "w/s" ) , ** row_settings ) def _save_model ( epoch , is_temp = False ) : is_temp_str = ".temp" if is_temp else "" with model . use_params ( optimizer . averages ) : with ( output_dir / ( "model%d%s.bin" % ( epoch , is_temp_str ) ) ) . open ( "wb" ) as file_ : file_ . write ( model . tok2vec . to_bytes ( ) ) log = { "nr_word" : tracker . nr_word , "loss" : tracker . loss , "epoch_loss" : tracker . epoch_loss , "epoch" : epoch , } with ( output_dir / "log.jsonl" ) . open ( "a" ) as file_ : file_ . write ( srsly . json_dumps ( log ) + "\n" ) for epoch in range ( n_iter ) : for batch_id , batch in enumerate ( util . minibatch_by_words ( ( ( text , None ) for text in texts ) , size = batch_size ) ) : docs = make_docs ( nlp , [ text for ( text , _ ) in batch ] , max_length = max_length , min_length = min_length , ) loss = make_update ( model , docs , optimizer , objective = loss_func , drop = dropout ) progress = tracker . update ( epoch , loss , docs ) if progress : msg . row ( progress , ** row_settings ) if texts_loc == "-" and tracker . words_per_epoch [ epoch ] >= 10 ** 7 : break if n_save_every and ( batch_id % n_save_every == 0 ) : _save_model ( epoch , is_temp = True ) _save_model ( epoch ) tracker . epoch_loss = 0.0 if texts_loc != "-" : # Reshuffle the texts if texts were loaded from a file random . shuffle ( texts )
def _ensure_append ( self , new_items , append_to , index = 0 ) : """Ensure an item is appended to a list or create a new empty list : param new _ items : the item ( s ) to append : type new _ items : list ( obj ) : param append _ to : the list on which to append the items : type append _ to : list ( ) : param index : index of the list on which to append the items : type index : int"""
append_to = append_to or [ ] append_to . insert ( index , new_items ) return append_to
def handle_erroneous_response ( self , response : requests . Response ) -> NoReturn : """Attempts to decode an erroneous response into an exception , and to subsequently throw that exception . Raises : BugZooException : the exception described by the error response . UnexpectedResponse : if the response cannot be decoded to an exception ."""
logger . debug ( "handling erroneous response: %s" , response ) try : err = BugZooException . from_dict ( response . json ( ) ) except Exception : err = UnexpectedResponse ( response ) raise err
async def filter_new_posts ( self , source_id , post_ids ) : """Filters ist of post _ id for new ones . : param source _ id : id of the source : type string : : param post _ ids : list of post ids : type list : : returns : list of unknown post ids ."""
new_ids = [ ] try : db_client = self . _db posts_in_db = await db_client . get_known_posts ( source_id , post_ids ) new_ids = [ p for p in post_ids if p not in posts_in_db ] except Exception as exc : logger . error ( "Error when filtering for new posts {} {}" . format ( source_id , post_ids ) ) logger . exception ( exc ) return new_ids
def min_feedback_arc_set ( edges , remove = False , maxcycles = 20000 ) : """A directed graph may contain directed cycles , when such cycles are undesirable , we wish to eliminate them and obtain a directed acyclic graph ( DAG ) . A feedback arc set has the property that it has at least one edge of every cycle in the graph . A minimum feedback arc set is the set that minimizes the total weight of the removed edges ; or alternatively maximize the remaining edges . See : < http : / / en . wikipedia . org / wiki / Feedback _ arc _ set > . The MIP formulation proceeds as follows : use 0/1 indicator variable to select whether an edge is in the set , subject to constraint that each cycle must pick at least one such edge . > > > g = [ ( 1 , 2 , 2 ) , ( 2 , 3 , 2 ) , ( 3 , 4 , 2 ) ] + [ ( 1 , 3 , 1 ) , ( 3 , 2 , 1 ) , ( 2 , 4 , 1 ) ] > > > min _ feedback _ arc _ set ( g ) ( [ ( 3 , 2 , 1 ) ] , 1) > > > min _ feedback _ arc _ set ( g , remove = True ) # Return DAG ( [ ( 1 , 2 , 2 ) , ( 2 , 3 , 2 ) , ( 3 , 4 , 2 ) , ( 1 , 3 , 1 ) , ( 2 , 4 , 1 ) ] , 1)"""
G = nx . DiGraph ( ) edge_to_index = { } for i , ( a , b , w ) in enumerate ( edges ) : G . add_edge ( a , b ) edge_to_index [ a , b ] = i nedges = len ( edges ) L = LPInstance ( ) L . add_objective ( edges , objective = MINIMIZE ) constraints = [ ] ncycles = 0 for c in nx . simple_cycles ( G ) : cycle_edges = [ ] rc = c + [ c [ 0 ] ] # Rotate the cycle for a , b in pairwise ( rc ) : cycle_edges . append ( edge_to_index [ a , b ] ) cc = summation ( cycle_edges ) constraints . append ( "{0} >= 1" . format ( cc ) ) ncycles += 1 if ncycles == maxcycles : break logging . debug ( "A total of {0} cycles found." . format ( ncycles ) ) L . constraints = constraints L . add_vars ( nedges ) selected , obj_val = L . lpsolve ( clean = False ) if remove : results = [ x for i , x in enumerate ( edges ) if i not in selected ] if selected else None else : results = [ x for i , x in enumerate ( edges ) if i in selected ] if selected else None return results , obj_val
def mass_3d ( self , R , Rs , rho0 ) : """mass enclosed a 3d sphere or radius r : param r : : param Ra : : param Rs : : return :"""
Rs = float ( Rs ) m_3d = 4. * np . pi * rho0 * Rs ** 3 * ( np . log ( ( Rs + R ) / Rs ) - R / ( Rs + R ) ) return m_3d
def _get_clearinghouse_vessel_handle ( vesselhandle ) : """< Purpose > Acquires the unique vessel identifier for a given vesselhandle . < Arguments > vesselhandle : A vessel handle expressed in the form node _ ip : node _ port : vesselname . < Side Effects > Opens a connection to the vessel to retrieve its nodekey . < Exceptions > None < Returns > A list of Clearinghouse vesselhandles for each vessel ."""
host , portstring , vesselname = vesselhandle . split ( ':' ) port = int ( portstring ) # get information about the node ' s vessels try : nodehandle = nmclient . nmclient_createhandle ( host , port , timeout = seash_global_variables . globalseashtimeout ) except NMClientException , e : return ( False , str ( e ) ) try : # We need to get the nodekey on this vessel vesseldict = nmclient . nmclient_getvesseldict ( nodehandle ) except NMClientException , e : return ( False , str ( e ) ) finally : nmclient . nmclient_destroyhandle ( nodehandle ) nodekeystr = rsa . rsa_publickey_to_string ( vesseldict [ 'nodekey' ] ) return ( True , nodekeystr + ':' + vesselname )
def read_excitation_energies ( self ) : """Read a excitation energies after a TD - DFT calculation . Returns : A list : A list of tuple for each transition such as [ ( energie ( eV ) , lambda ( nm ) , oscillatory strength ) , . . . ]"""
transitions = list ( ) # read in file with zopen ( self . filename , "r" ) as f : line = f . readline ( ) td = False while line != "" : if re . search ( r"^\sExcitation energies and oscillator strengths:" , line ) : td = True if td : if re . search ( r"^\sExcited State\s*\d" , line ) : val = [ float ( v ) for v in float_patt . findall ( line ) ] transitions . append ( tuple ( val [ 0 : 3 ] ) ) line = f . readline ( ) return transitions
def update ( self , request , * args , ** kwargs ) : """Update a resource ."""
# NOTE : Use the original method instead when support for locking is added : # https : / / github . com / encode / django - rest - framework / issues / 4675 # return super ( ) . update ( request , * args , * * kwargs ) with transaction . atomic ( ) : return self . _update ( request , * args , ** kwargs )
def _set_default_action ( self , v , load = False ) : """Setter method for default _ action , mapped from YANG variable / openflow _ state / detail / default _ action ( default - packet - action ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ default _ action is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ default _ action ( ) directly . YANG Description : Default action"""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'dcm-action-drop' : { 'value' : 1 } , u'dcm-action-invalid' : { 'value' : 0 } , u'dcm-action-send-to-controller' : { 'value' : 2 } } , ) , is_leaf = True , yang_name = "default-action" , rest_name = "default-action" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , namespace = 'urn:brocade.com:mgmt:brocade-openflow-operational' , defining_module = 'brocade-openflow-operational' , yang_type = 'default-packet-action' , is_config = False ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """default_action must be of a type compatible with default-packet-action""" , 'defined-type' : "brocade-openflow-operational:default-packet-action" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-action-drop': {'value': 1}, u'dcm-action-invalid': {'value': 0}, u'dcm-action-send-to-controller': {'value': 2}},), is_leaf=True, yang_name="default-action", rest_name="default-action", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='default-packet-action', is_config=False)""" , } ) self . __default_action = t if hasattr ( self , '_set' ) : self . _set ( )
def update_instance ( self ) : """Save bound form data into the XmlObject model instance and return the updated instance ."""
# NOTE : django model form has a save method - not applicable here , # since an XmlObject by itself is not expected to have a save method # ( only likely to be saved in context of a fedora or exist object ) if hasattr ( self , 'cleaned_data' ) : # possible to have an empty object / no data opts = self . _meta # NOTE : _ fields doesn ' t seem to order , which is # problematic for some xml ( e . g . , where order matters for validity ) # use field order as declared in the form for update order # when possible . # ( NOTE : this could be problematic also , since display order may # not always be the same as schema order ) fields_in_order = [ ] if hasattr ( self . Meta , 'fields' ) : fields_in_order . extend ( self . Meta . fields ) fields_in_order . extend ( [ name for name in six . iterkeys ( self . instance . _fields ) if name in self . Meta . fields ] ) else : fields_in_order = self . instance . _fields . keys ( ) for name in fields_in_order : # for name in self . instance . _ fields . iterkeys ( ) : # for name in self . declared _ fields . iterkeys ( ) : if opts . fields and name not in opts . parsed_fields . fields : continue if opts . exclude and name in opts . parsed_exclude . fields : continue if name in self . cleaned_data : # special case : we don ' t want empty attributes and elements # for fields which returned no data from the form # converting ' ' to None and letting XmlObject handle if self . cleaned_data [ name ] == '' : self . cleaned_data [ name ] = None setattr ( self . instance , name , self . cleaned_data [ name ] ) # update sub - model portions via any subforms for name , subform in six . iteritems ( self . subforms ) : self . _update_subinstance ( name , subform ) for formset in six . itervalues ( self . formsets ) : formset . update_instance ( ) return self . instance
def get_primary_group ( obj_name , obj_type = 'file' ) : r'''Gets the primary group of the passed object Args : obj _ name ( str ) : The path for which to obtain primary group information obj _ type ( str ) : The type of object to query . This value changes the format of the ` ` obj _ name ` ` parameter as follows : - file : indicates a file or directory - a relative path , such as ` ` FileName . txt ` ` or ` ` . . \ FileName ` ` - an absolute path , such as ` ` C : \ DirName \ FileName . txt ` ` - A UNC name , such as ` ` \ \ ServerName \ ShareName \ FileName . txt ` ` - service : indicates the name of a Windows service - printer : indicates the name of a printer - registry : indicates a registry key - Uses the following literal strings to denote the hive : - HKEY _ LOCAL _ MACHINE - MACHINE - HKLM - HKEY _ USERS - USERS - HKU - HKEY _ CURRENT _ USER - CURRENT _ USER - HKCU - HKEY _ CLASSES _ ROOT - CLASSES _ ROOT - HKCR - Should be in the format of ` ` HIVE \ Path \ To \ Key ` ` . For example , ` ` HKLM \ SOFTWARE \ Windows ` ` - registry32 : indicates a registry key under WOW64 . Formatting is the same as it is for ` ` registry ` ` - share : indicates a network share Returns : str : The primary group for the object Usage : . . code - block : : python salt . utils . win _ dacl . get _ primary _ group ( ' c : \ \ file ' )'''
# Not all filesystems mountable within windows have SecurityDescriptors . # For instance , some mounted SAMBA shares , or VirtualBox shared folders . If # we can ' t load a file descriptor for the file , we default to " Everyone " # http : / / support . microsoft . com / kb / 243330 # Validate obj _ type try : obj_type_flag = flags ( ) . obj_type [ obj_type . lower ( ) ] except KeyError : raise SaltInvocationError ( 'Invalid "obj_type" passed: {0}' . format ( obj_type ) ) if 'registry' in obj_type . lower ( ) : obj_name = dacl ( ) . get_reg_name ( obj_name ) log . debug ( 'Name converted to: %s' , obj_name ) try : security_descriptor = win32security . GetNamedSecurityInfo ( obj_name , obj_type_flag , win32security . GROUP_SECURITY_INFORMATION ) primary_group_gid = security_descriptor . GetSecurityDescriptorGroup ( ) except MemoryError : # Generic Memory Error ( Windows Server 2003 + ) primary_group_gid = 'S-1-0-0' except pywintypes . error as exc : # Incorrect function error ( Windows Server 2008 + ) if exc . winerror == 1 or exc . winerror == 50 : primary_group_gid = 'S-1-0-0' else : log . exception ( 'Failed to get the primary group: %s' , obj_name ) raise CommandExecutionError ( 'Failed to get primary group: {0}' . format ( obj_name ) , exc . strerror ) return get_name ( win32security . ConvertSidToStringSid ( primary_group_gid ) )
def normalize ( self , text , normalizations = None ) : """Normalize a given text applying all normalizations . Normalizations to apply can be specified through a list of parameters and will be executed in that order . Args : text : The text to be processed . normalizations : List of normalizations to apply . Returns : The text normalized ."""
for normalization , kwargs in self . _parse_normalizations ( normalizations or self . _config . normalizations ) : try : text = getattr ( self , normalization ) ( text , ** kwargs ) except AttributeError as e : self . _logger . debug ( 'Invalid normalization: %s' , e ) return text
def to_polygon ( self ) : """Return a 4 - cornered polygon equivalent to this rectangle"""
x , y = self . corners . T vertices = PixCoord ( x = x , y = y ) return PolygonPixelRegion ( vertices = vertices , meta = self . meta , visual = self . visual )
def readQuotes ( self , start , end ) : '''read quotes from Yahoo Financial'''
if self . symbol is None : LOG . debug ( 'Symbol is None' ) return [ ] return self . __yf . getQuotes ( self . symbol , start , end )
def draw_header ( canvas ) : """Draws the invoice header"""
canvas . setStrokeColorRGB ( 0.9 , 0.5 , 0.2 ) canvas . setFillColorRGB ( 0.2 , 0.2 , 0.2 ) canvas . setFont ( 'Helvetica' , 16 ) canvas . drawString ( 18 * cm , - 1 * cm , 'Invoice' ) canvas . drawInlineImage ( settings . INV_LOGO , 1 * cm , - 1 * cm , 250 , 16 ) canvas . setLineWidth ( 4 ) canvas . line ( 0 , - 1.25 * cm , 21.7 * cm , - 1.25 * cm )
def run_apidoc ( _ ) : """Generage API documentation"""
import better_apidoc better_apidoc . main ( [ 'better-apidoc' , '-t' , os . path . join ( '.' , '_templates' ) , '--force' , '--no-toc' , '--separate' , '-o' , os . path . join ( '.' , 'API' ) , os . path . join ( '..' , 'src' , 'qnet' ) , ] )
def _do_generate ( self , source_list , hang_type , crashed_thread , delimiter = ' | ' ) : """each element of signatureList names a frame in the crash stack ; and is : - a prefix of a relevant frame : Append this element to the signature - a relevant frame : Append this element and stop looking - irrelevant : Append this element only after seeing a prefix frame The signature is a ' | ' separated string of frame names ."""
notes = [ ] debug_notes = [ ] # shorten source _ list to the first signatureSentinel sentinel_locations = [ ] for a_sentinel in self . signature_sentinels : if type ( a_sentinel ) == tuple : a_sentinel , condition_fn = a_sentinel if not condition_fn ( source_list ) : continue try : sentinel_locations . append ( source_list . index ( a_sentinel ) ) except ValueError : pass if sentinel_locations : min_index = min ( sentinel_locations ) debug_notes . append ( 'sentinel; starting at "{}" index {}' . format ( source_list [ min_index ] , min_index ) ) source_list = source_list [ min_index : ] # Get all the relevant frame signatures . Note that these function signatures # have already been normalized at this point . new_signature_list = [ ] for a_signature in source_list : # If the signature matches the irrelevant signatures regex , skip to the next frame . if self . irrelevant_signature_re . match ( a_signature ) : debug_notes . append ( 'irrelevant; ignoring: "{}"' . format ( a_signature ) ) continue # If the frame signature is a dll , remove the @ xxxxx part . if '.dll' in a_signature . lower ( ) : a_signature = a_signature . split ( '@' ) [ 0 ] # If this trimmed DLL signature is the same as the previous frame ' s , skip it . if new_signature_list and a_signature == new_signature_list [ - 1 ] : continue new_signature_list . append ( a_signature ) # If the signature does not match the prefix signatures regex , then it is the last # one we add to the list . if not self . prefix_signature_re . match ( a_signature ) : debug_notes . append ( 'not a prefix; stop: "{}"' . format ( a_signature ) ) break debug_notes . append ( 'prefix; continue iterating: "{}"' . format ( a_signature ) ) # Add a special marker for hang crash reports . if hang_type : debug_notes . append ( 'hang_type {}: prepending {}' . format ( hang_type , self . hang_prefixes [ hang_type ] ) ) new_signature_list . insert ( 0 , self . hang_prefixes [ hang_type ] ) signature = delimiter . join ( new_signature_list ) # Handle empty signatures to explain why we failed generating them . if signature == '' or signature is None : if crashed_thread is None : notes . append ( "CSignatureTool: No signature could be created because we do not know which " "thread crashed" ) signature = "EMPTY: no crashing thread identified" else : notes . append ( "CSignatureTool: No proper signature could be created because no good data " "for the crashing thread ({}) was found" . format ( crashed_thread ) ) try : signature = source_list [ 0 ] except IndexError : signature = "EMPTY: no frame data available" return signature , notes , debug_notes
def get_compatible_generator_action ( self , filename ) : """Return the * * first * * compatible : class : ` GeneratorAction ` for a given filename or ` ` None ` ` if none is found . Args : filename ( str ) : The filename of the template to process ."""
# find first compatible generator action for action in self . __generator_actions : if action . act_on_file ( filename ) : return action return None
def get_hg_revision ( repopath ) : """Return Mercurial revision for the repository located at repopath Result is a tuple ( global , local , branch ) , with None values on error For example : > > > get _ hg _ revision ( " . " ) ( ' eba7273c69df + ' , ' 2015 + ' , ' default ' )"""
try : assert osp . isdir ( osp . join ( repopath , '.hg' ) ) proc = programs . run_program ( 'hg' , [ 'id' , '-nib' , repopath ] ) output , _err = proc . communicate ( ) # output is now : ( ' eba7273c69df + 2015 + default \ n ' , None ) # Split 2 times max to allow spaces in branch names . return tuple ( output . decode ( ) . strip ( ) . split ( None , 2 ) ) except ( subprocess . CalledProcessError , AssertionError , AttributeError , OSError ) : return ( None , None , None )
def replyToComment ( self , repo_user , repo_name , pull_number , body , in_reply_to ) : """POST / repos / : owner / : repo / pulls / : number / comments Like create , but reply to an existing comment . : param body : The text of the comment . : param in _ reply _ to : The comment ID to reply to ."""
return self . api . makeRequest ( [ "repos" , repo_user , repo_name , "pulls" , str ( pull_number ) , "comments" ] , method = "POST" , data = dict ( body = body , in_reply_to = in_reply_to ) )
def validate ( tmpl , path ) : """Validate a path against the path template . . . code - block : : python > > > validate ( ' users / * / messages / * ' , ' users / me / messages / 123 ' ) True > > > validate ( ' users / * / messages / * ' , ' users / me / drafts / 123 ' ) False > > > validate ( ' / v1 / { name = shelves / * / books / * } ' , / v1 / shelves / 1 / books / 3) True > > > validate ( ' / v1 / { name = shelves / * / books / * } ' , / v1 / shelves / 1 / tapes / 3) False Args : tmpl ( str ) : The path template . path ( str ) : The expanded path . Returns : bool : True if the path matches ."""
pattern = _generate_pattern_for_template ( tmpl ) + "$" return True if re . match ( pattern , path ) is not None else False
def get_from ( self , x , y ) : """Get the character at the specified location . : param x : The column ( x coord ) of the character . : param y : The row ( y coord ) of the character . : return : A 4 - tuple of ( ascii code , foreground , attributes , background ) for the character at the location ."""
# Convert to buffer coordinates y -= self . _start_line if y < 0 or y >= self . _buffer_height or x < 0 or x >= self . width : return None cell = self . _buffer . get ( x , y ) return cell [ 0 ] , cell [ 1 ] , cell [ 2 ] , cell [ 3 ]
def weld_str_lower ( array ) : """Convert values to lowercase . Parameters array : numpy . ndarray or WeldObject Input data . Returns WeldObject Representation of this computation ."""
obj_id , weld_obj = create_weld_object ( array ) weld_template = """map( {array}, |e: vec[i8]| result( for(e, appender[i8], |c: appender[i8], j: i64, f: i8| if(f > 64c && f < 91c, merge(c, f + 32c), merge(c, f)) ) ) )""" weld_obj . weld_code = weld_template . format ( array = obj_id ) return weld_obj
def parse ( self , record , is_first_dir_record_of_root , bytes_to_skip , continuation ) : # type : ( bytes , bool , int , bool ) - > None '''Method to parse a rock ridge record . Parameters : record - The record to parse . is _ first _ dir _ record _ of _ root - Whether this is the first directory record of the root directory record ; certain Rock Ridge entries are only valid there . bytes _ to _ skip - The number of bytes to skip at the beginning of the record . continuation - Whether the new entries should go in the continuation list or in the DR list . Returns : Nothing .'''
# Note that we very explicitly do not check if self . _ initialized is True # here ; this can be called multiple times in the case where there is # a continuation entry . if continuation : entry_list = self . ce_entries else : entry_list = self . dr_entries self . bytes_to_skip = bytes_to_skip offset = bytes_to_skip left = len ( record ) px_record_length = None has_es_record = False sf_record_length = None er_id = None while True : if left == 0 : break elif left == 1 : # There may be a padding byte on the end . if bytes ( bytearray ( [ record [ offset ] ] ) ) != b'\x00' : raise pycdlibexception . PyCdlibInvalidISO ( 'Invalid pad byte' ) break elif left < 4 : raise pycdlibexception . PyCdlibInvalidISO ( 'Not enough bytes left in the System Use field' ) ( rtype , su_len , su_entry_version ) = struct . unpack_from ( '=2sBB' , record [ : offset + 4 ] , offset ) if su_entry_version != SU_ENTRY_VERSION : raise pycdlibexception . PyCdlibInvalidISO ( 'Invalid RR version %d!' % su_entry_version ) recslice = record [ offset : ] if rtype in ( b'SP' , b'RR' , b'CE' , b'PX' , b'ST' , b'ER' , b'PN' , b'CL' , b'PL' , b'RE' , b'TF' , b'SF' ) : recname = rtype . decode ( 'utf-8' ) . lower ( ) + '_record' if self . has_entry ( recname ) : raise pycdlibexception . PyCdlibInvalidISO ( 'Only single SP record supported' ) if rtype == b'SP' : if left < 7 or not is_first_dir_record_of_root : raise pycdlibexception . PyCdlibInvalidISO ( 'Invalid SUSP SP record' ) # OK , this is the first Directory Record of the root # directory , which means we should check it for the SUSP / RR # extension , which is exactly 7 bytes and starts with ' SP ' . entry_list . sp_record = RRSPRecord ( ) entry_list . sp_record . parse ( recslice ) elif rtype == b'RR' : entry_list . rr_record = RRRRRecord ( ) entry_list . rr_record . parse ( recslice ) elif rtype == b'CE' : if self . has_entry ( 'ce_record' ) : raise pycdlibexception . PyCdlibInvalidISO ( 'Only single CE record supported' ) entry_list . ce_record = RRCERecord ( ) entry_list . ce_record . parse ( recslice ) elif rtype == b'PX' : entry_list . px_record = RRPXRecord ( ) px_record_length = entry_list . px_record . parse ( recslice ) elif rtype == b'PD' : pd = RRPDRecord ( ) pd . parse ( recslice ) entry_list . pd_records . append ( pd ) elif rtype == b'ST' : if entry_list . st_record is not None : raise pycdlibexception . PyCdlibInvalidISO ( 'Only one ST record per SUSP area supported' ) if su_len != 4 : raise pycdlibexception . PyCdlibInvalidISO ( 'Invalid length on rock ridge extension' ) entry_list . st_record = RRSTRecord ( ) entry_list . st_record . parse ( recslice ) elif rtype == b'ER' : entry_list . er_record = RRERRecord ( ) entry_list . er_record . parse ( recslice ) er_id = entry_list . er_record . ext_id elif rtype == b'ES' : es = RRESRecord ( ) es . parse ( recslice ) entry_list . es_records . append ( es ) has_es_record = True elif rtype == b'PN' : entry_list . pn_record = RRPNRecord ( ) entry_list . pn_record . parse ( recslice ) elif rtype == b'SL' : new_sl_record = RRSLRecord ( ) new_sl_record . parse ( recslice ) entry_list . sl_records . append ( new_sl_record ) elif rtype == b'NM' : new_nm_record = RRNMRecord ( ) new_nm_record . parse ( recslice ) entry_list . nm_records . append ( new_nm_record ) elif rtype == b'CL' : entry_list . cl_record = RRCLRecord ( ) entry_list . cl_record . parse ( recslice ) elif rtype == b'PL' : entry_list . pl_record = RRPLRecord ( ) entry_list . pl_record . parse ( recslice ) elif rtype == b'RE' : entry_list . re_record = RRRERecord ( ) entry_list . re_record . parse ( recslice ) elif rtype == b'TF' : entry_list . tf_record = RRTFRecord ( ) entry_list . tf_record . parse ( recslice ) elif rtype == b'SF' : entry_list . sf_record = RRSFRecord ( ) entry_list . sf_record . parse ( recslice ) sf_record_length = len ( recslice ) else : raise pycdlibexception . PyCdlibInvalidISO ( 'Unknown SUSP record' ) offset += su_len left -= su_len # Now let ' s determine the version of Rock Ridge that we have ( 1.09, # 1.10 , or 1.12 ) . Unfortunately , there is no direct information from # Rock Ridge , so we infer it from what is present . In an ideal world , # the following table would tell us : # | Feature / Rock Ridge version | 1.09 | 1.10 | 1.12 | # | Has RR Record ? | True or False | False | False | # | Has ES Record ? | False | False | True or False | # | Has SF Record ? | False | True or False | True or False | # | PX Record length | 36 | 36 | 44 | # | SF Record length | N / A | 12 | 21 | # | ER Desc string | RRIP _ 1991A | RRIP _ 1991A | IEEE _ P1282 | # While that is a good start , we don ' t live in an ideal world . In # particular , we ' ve seen ISOs in the wild ( OpenSolaris 2008 ) that put an # RR record into an otherwise 1.12 Rock Ridge entry . So we ' ll use the # above as a hint , and allow for some wiggle room . if px_record_length == 44 or sf_record_length == 21 or has_es_record or er_id == EXT_ID_112 : self . rr_version = '1.12' else : # Not 1.12 , so either 1.09 or 1.10. if sf_record_length == 12 : self . rr_version = '1.10' else : self . rr_version = '1.09' namelist = [ nm . posix_name for nm in self . dr_entries . nm_records ] namelist . extend ( [ nm . posix_name for nm in self . ce_entries . nm_records ] ) self . _full_name = b'' . join ( namelist ) self . _initialized = True
def update_product_version ( id , ** kwargs ) : """Update the ProductVersion with ID id with new values ."""
content = update_product_version_raw ( id , ** kwargs ) if content : return utils . format_json ( content )
def transform ( self , X , lenscale = None ) : r"""Apply the sigmoid basis function to X . Parameters X : ndarray ( N , d ) array of observations where N is the number of samples , and d is the dimensionality of X . lenscale : float the length scale ( scalar ) of the RBFs to apply to X . If not input , this uses the value of the initial length scale . Returns ndarray : of shape ( N , D ) where D is number of centres ."""
N , d = X . shape lenscale = self . _check_dim ( d , lenscale ) return expit ( cdist ( X / lenscale , self . C / lenscale , 'euclidean' ) )
def from_dict ( cls , d , identifier_str = None ) : """Load a ` ResolvedContext ` from a dict . Args : d ( dict ) : Dict containing context data . identifier _ str ( str ) : String identifying the context , this is only used to display in an error string if a serialization version mismatch is detected . Returns : ` ResolvedContext ` object ."""
# check serialization version def _print_version ( value ) : return '.' . join ( str ( x ) for x in value ) toks = str ( d [ "serialize_version" ] ) . split ( '.' ) load_ver = tuple ( int ( x ) for x in toks ) curr_ver = ResolvedContext . serialize_version if load_ver [ 0 ] > curr_ver [ 0 ] : msg = [ "The context" ] if identifier_str : msg . append ( "in %s" % identifier_str ) msg . append ( "was written by a newer version of Rez. The load may " "fail (serialize version %d > %d)" % ( _print_version ( load_ver ) , _print_version ( curr_ver ) ) ) print >> sys . stderr , ' ' . join ( msg ) # create and init the context r = ResolvedContext . __new__ ( ResolvedContext ) r . load_path = None r . pre_resolve_bindings = None r . timestamp = d [ "timestamp" ] r . building = d [ "building" ] r . caching = d [ "caching" ] r . implicit_packages = [ PackageRequest ( x ) for x in d [ "implicit_packages" ] ] r . _package_requests = [ PackageRequest ( x ) for x in d [ "package_requests" ] ] r . package_paths = d [ "package_paths" ] r . rez_version = d [ "rez_version" ] r . rez_path = d [ "rez_path" ] r . user = d [ "user" ] r . host = d [ "host" ] r . platform = d [ "platform" ] r . arch = d [ "arch" ] r . os = d [ "os" ] r . created = d [ "created" ] r . verbosity = d . get ( "verbosity" , 0 ) r . status_ = ResolverStatus [ d [ "status" ] ] r . failure_description = d [ "failure_description" ] r . solve_time = d [ "solve_time" ] r . load_time = d [ "load_time" ] r . graph_string = d [ "graph" ] r . graph_ = None r . _resolved_packages = [ ] for d_ in d [ "resolved_packages" ] : variant_handle = d_ if load_ver < ( 4 , 0 ) : # - - SINCE SERIALIZE VERSION 4.0 from rez . utils . backcompat import convert_old_variant_handle variant_handle = convert_old_variant_handle ( variant_handle ) variant = get_variant ( variant_handle ) variant . set_context ( r ) r . _resolved_packages . append ( variant ) # - - SINCE SERIALIZE VERSION 1 r . requested_timestamp = d . get ( "requested_timestamp" , 0 ) # - - SINCE SERIALIZE VERSION 2 r . parent_suite_path = d . get ( "parent_suite_path" ) r . suite_context_name = d . get ( "suite_context_name" ) # - - SINCE SERIALIZE VERSION 3 r . default_patch_lock = PatchLock [ d . get ( "default_patch_lock" , "no_lock" ) ] patch_locks = d . get ( "patch_locks" , { } ) r . patch_locks = dict ( ( k , PatchLock [ v ] ) for k , v in patch_locks ) # - - SINCE SERIALIZE VERSION 4.0 r . from_cache = d . get ( "from_cache" , False ) # - - SINCE SERIALIZE VERSION 4.1 data = d . get ( "package_filter" , [ ] ) r . package_filter = PackageFilterList . from_pod ( data ) # - - SINCE SERIALIZE VERSION 4.2 data = d . get ( "package_orderers" ) if data : r . package_orderers = [ package_order . from_pod ( x ) for x in data ] else : r . package_orderers = None # - - SINCE SERIALIZE VERSION 4.3 r . num_loaded_packages = d . get ( "num_loaded_packages" , - 1 ) # track context usage if config . context_tracking_host : data = dict ( ( k , v ) for k , v in d . iteritems ( ) if k in config . context_tracking_context_fields ) r . _track_context ( data , action = "sourced" ) return r
def default_strlen ( strlen = None ) : """Sets the default string length for lstring and ilwd : char , if they are treated as strings . Default is 50."""
if strlen is not None : _default_types_status [ 'default_strlen' ] = strlen # update the typeDicts as needed lstring_as_obj ( _default_types_status [ 'lstring_as_obj' ] ) ilwd_as_int ( _default_types_status [ 'ilwd_as_int' ] ) return _default_types_status [ 'default_strlen' ]
def thermal_conductivity ( self , temperature , volume ) : """Eq ( 17 ) in 10.1103 / PhysRevB . 90.174107 Args : temperature ( float ) : temperature in K volume ( float ) : in Ang ^ 3 Returns : float : thermal conductivity in W / K / m"""
gamma = self . gruneisen_parameter ( temperature , volume ) theta_d = self . debye_temperature ( volume ) theta_a = theta_d * self . natoms ** ( - 1. / 3. ) prefactor = ( 0.849 * 3 * 4 ** ( 1. / 3. ) ) / ( 20. * np . pi ** 3 ) # kg / K ^ 3 / s ^ 3 prefactor = prefactor * ( self . kb / self . hbar ) ** 3 * self . avg_mass kappa = prefactor / ( gamma ** 2 - 0.514 * gamma + 0.228 ) # kg / K / s ^ 3 * Ang = ( kg m / s ^ 2 ) / ( Ks ) * 1e - 10 # = N / ( Ks ) * 1e - 10 = Nm / ( Kms ) * 1e - 10 = W / K / m * 1e - 10 kappa = kappa * theta_a ** 2 * volume ** ( 1. / 3. ) * 1e-10 return kappa
def serialize ( self , attr , obj , accessor = None , ** kwargs ) : """Pulls the value for the given key from the object , applies the field ' s formatting and returns the result . : param str attr : The attribute or key to get from the object . : param str obj : The object to pull the key from . : param callable accessor : Function used to pull values from ` ` obj ` ` . : param dict kwargs ' : Field - specific keyword arguments . : raise ValidationError : In case of formatting problem"""
if self . _CHECK_ATTRIBUTE : value = self . get_value ( obj , attr , accessor = accessor ) if value is missing_ and hasattr ( self , 'default' ) : default = self . default value = default ( ) if callable ( default ) else default if value is missing_ : return value else : value = None return self . _serialize ( value , attr , obj , ** kwargs )
def add_new_enriched_bins_matrixes ( region_files , dfs , bin_size ) : """Add enriched bins based on bed files . There is no way to find the correspondence between region file and matrix file , but it does not matter ."""
dfs = _remove_epic_enriched ( dfs ) names = [ "Enriched_" + os . path . basename ( r ) for r in region_files ] regions = region_files_to_bins ( region_files , names , bin_size ) new_dfs = OrderedDict ( ) assert len ( regions . columns ) == len ( dfs ) for region , ( n , df ) in zip ( regions , dfs . items ( ) ) : region_col = regions [ region ] df = df . join ( region_col , how = "outer" ) . fillna ( 0 ) new_dfs [ n ] = df return new_dfs
def mkdir ( path , mode = 0o755 , delete = False ) : """Make a directory . Create a leaf directory and all intermediate ones . Works like ` ` mkdir ` ` , except that any intermediate path segment ( not just the rightmost ) will be created if it does not exist . This is recursive . Args : path ( str ) : Directory to create mode ( int ) : Directory mode delete ( bool ) : Delete directory / file if exists Returns : bool : True if succeeded else False"""
logger . info ( "mkdir: %s" % path ) if os . path . isdir ( path ) : if not delete : return True if not remove ( path ) : return False try : os . makedirs ( path , mode ) return True except Exception : logger . exception ( "Failed to mkdir: %s" % path ) return False
def _read_mode_route ( self , size , kind ) : """Read options with route data . Positional arguments : * size - int , length of option * kind - int , 7/131/137 ( RR / LSR / SSR ) Returns : * dict - - extracted option with route data Structure of these options : * [ RFC 791 ] Loose Source Route | 10000011 | length | pointer | route data | * [ RFC 791 ] Strict Source Route | 10001001 | length | pointer | route data | * [ RFC 791 ] Record Route | 00000111 | length | pointer | route data | Octets Bits Name Description 0 0 ip . opt . kind Kind ( 7/131/137) 0 0 ip . opt . type . copy Copied Flag ( 0) 0 1 ip . opt . type . class Option Class ( 0/1) 0 3 ip . opt . type . number Option Number ( 3/7/9) 1 8 ip . opt . length Length 2 16 ip . opt . pointer Pointer ( ≥ 4) 3 24 ip . opt . data Route Data"""
if size < 3 or ( size - 3 ) % 4 != 0 : raise ProtocolError ( f'{self.alias}: [Optno {kind}] invalid format' ) _rptr = self . _read_unpack ( 1 ) if _rptr < 4 : raise ProtocolError ( f'{self.alias}: [Optno {kind}] invalid format' ) data = dict ( kind = kind , type = self . _read_opt_type ( kind ) , length = size , pointer = _rptr , ) counter = 4 address = list ( ) endpoint = min ( _rptr , size ) while counter < endpoint : counter += 4 address . append ( self . _read_ipv4_addr ( ) ) data [ 'ip' ] = address or None return data
def _from_dict ( cls , _dict ) : """Initialize a Workspace object from a json dictionary ."""
args = { } if 'name' in _dict : args [ 'name' ] = _dict . get ( 'name' ) else : raise ValueError ( 'Required property \'name\' not present in Workspace JSON' ) if 'description' in _dict : args [ 'description' ] = _dict . get ( 'description' ) if 'language' in _dict : args [ 'language' ] = _dict . get ( 'language' ) else : raise ValueError ( 'Required property \'language\' not present in Workspace JSON' ) if 'metadata' in _dict : args [ 'metadata' ] = _dict . get ( 'metadata' ) if 'learning_opt_out' in _dict : args [ 'learning_opt_out' ] = _dict . get ( 'learning_opt_out' ) else : raise ValueError ( 'Required property \'learning_opt_out\' not present in Workspace JSON' ) if 'system_settings' in _dict : args [ 'system_settings' ] = WorkspaceSystemSettings . _from_dict ( _dict . get ( 'system_settings' ) ) if 'workspace_id' in _dict : args [ 'workspace_id' ] = _dict . get ( 'workspace_id' ) else : raise ValueError ( 'Required property \'workspace_id\' not present in Workspace JSON' ) if 'status' in _dict : args [ 'status' ] = _dict . get ( 'status' ) if 'created' in _dict : args [ 'created' ] = string_to_datetime ( _dict . get ( 'created' ) ) if 'updated' in _dict : args [ 'updated' ] = string_to_datetime ( _dict . get ( 'updated' ) ) if 'intents' in _dict : args [ 'intents' ] = [ Intent . _from_dict ( x ) for x in ( _dict . get ( 'intents' ) ) ] if 'entities' in _dict : args [ 'entities' ] = [ Entity . _from_dict ( x ) for x in ( _dict . get ( 'entities' ) ) ] if 'dialog_nodes' in _dict : args [ 'dialog_nodes' ] = [ DialogNode . _from_dict ( x ) for x in ( _dict . get ( 'dialog_nodes' ) ) ] if 'counterexamples' in _dict : args [ 'counterexamples' ] = [ Counterexample . _from_dict ( x ) for x in ( _dict . get ( 'counterexamples' ) ) ] return cls ( ** args )
def apply_connectivity_changes ( self , request ) : """Handle apply connectivity changes request json , trigger add or remove vlan methods , get responce from them and create json response : param request : json with all required action to configure or remove vlans from certain port : return Serialized DriverResponseRoot to json : rtype json"""
if request is None or request == "" : raise Exception ( self . __class__ . __name__ , "request is None or empty" ) holder = JsonRequestDeserializer ( jsonpickle . decode ( request ) ) if not holder or not hasattr ( holder , "driverRequest" ) : raise Exception ( self . __class__ . __name__ , "Deserialized request is None or empty" ) driver_response = DriverResponse ( ) add_vlan_thread_list = [ ] remove_vlan_thread_list = [ ] driver_response_root = DriverResponseRoot ( ) for action in holder . driverRequest . actions : self . _logger . info ( "Action: " , action . __dict__ ) self . _validate_request_action ( action ) action_id = action . actionId full_name = action . actionTarget . fullName port_mode = action . connectionParams . mode . lower ( ) if action . type == "setVlan" : qnq = False ctag = "" for attribute in action . connectionParams . vlanServiceAttributes : if attribute . attributeName . lower ( ) == "qnq" and attribute . attributeValue . lower ( ) == "true" : qnq = True if attribute . attributeName . lower ( ) == "ctag" : ctag = attribute . attributeValue for vlan_id in self . _get_vlan_list ( action . connectionParams . vlanId ) : add_vlan_thread = Thread ( target = self . add_vlan , name = action_id , args = ( vlan_id , full_name , port_mode , qnq , ctag ) ) add_vlan_thread_list . append ( add_vlan_thread ) elif action . type == "removeVlan" : for vlan_id in self . _get_vlan_list ( action . connectionParams . vlanId ) : remove_vlan_thread = Thread ( target = self . remove_vlan , name = action_id , args = ( vlan_id , full_name , port_mode , ) ) remove_vlan_thread_list . append ( remove_vlan_thread ) else : self . _logger . warning ( "Undefined action type determined '{}': {}" . format ( action . type , action . __dict__ ) ) continue # Start all created remove _ vlan _ threads for thread in remove_vlan_thread_list : thread . start ( ) # Join all remove _ vlan _ threads . Main thread will wait completion of all remove _ vlan _ thread for thread in remove_vlan_thread_list : thread . join ( ) # Start all created add _ vlan _ threads for thread in add_vlan_thread_list : thread . start ( ) # Join all add _ vlan _ threads . Main thread will wait completion of all add _ vlan _ thread for thread in add_vlan_thread_list : thread . join ( ) request_result = [ ] for action in holder . driverRequest . actions : result_statuses , message = zip ( * self . result . get ( action . actionId ) ) if all ( result_statuses ) : action_result = ConnectivitySuccessResponse ( action , "Add Vlan {vlan} configuration successfully completed" . format ( vlan = action . connectionParams . vlanId ) ) else : message_details = "\n\t" . join ( message ) action_result = ConnectivityErrorResponse ( action , "Add Vlan {vlan} configuration failed." "\nAdd Vlan configuration details:\n{message_details}" . format ( vlan = action . connectionParams . vlanId , message_details = message_details ) ) request_result . append ( action_result ) driver_response . actionResults = request_result driver_response_root . driverResponse = driver_response return serialize_to_json ( driver_response_root )
def check_unique_attr ( self , attrs , user = None , form = None , flash_msg = None ) : """Checks that an attribute of the current user is unique amongst all users . If no value is provided , the current form will be used ."""
user = user or self . current ucol = self . options [ "username_column" ] email = self . options [ "email_column" ] if not isinstance ( attrs , ( list , tuple , dict ) ) : attrs = [ attrs ] for name in attrs : if isinstance ( attrs , dict ) : value = attrs [ name ] else : form = form or current_context . data . get ( "form" ) if not form : raise OptionMissingError ( "Missing 'value' option or form in 'check_user_unique_attr' action" ) value = form [ name ] . data if name == ucol and not self . options [ "username_case_sensitive" ] : filters = ( ucol + '_lcase' , value . strip ( ) . lower ( ) ) elif name == emailcol : filters = ( emailcol , value . strip ( ) . lower ( ) ) else : filters = ( name , value . strip ( ) ) if self . query . filter ( { "$and" : [ filters , ( "id__ne" , user . id ) ] } ) . count ( ) > 0 : if flash_msg is None : flash_msg = "The %s is already in use" % name if flash_msg : flash ( flash_msg , "error" ) current_context . exit ( trigger_action_group = "user_attr_not_unique" )
def get_precinctsreporting ( self , obj ) : """Precincts reporting if vote is top level result else ` ` None ` ` ."""
if obj . division . level == obj . candidate_election . election . division . level : return obj . candidate_election . election . meta . precincts_reporting return None
def tail_threshold ( vals , N = 1000 ) : """Determine a threshold above which there are N louder values"""
vals = numpy . array ( vals ) if len ( vals ) < N : raise RuntimeError ( 'Not enough input values to determine threshold' ) vals . sort ( ) return min ( vals [ - N : ] )
def increment ( self , subname = None , delta = 1 ) : '''Increment the gauge with ` delta ` : keyword subname : The subname to report the data to ( appended to the client name ) : type subname : str : keyword delta : The delta to add to the gauge : type delta : int > > > gauge = Gauge ( ' application _ name ' ) > > > gauge . increment ( ' gauge _ name ' , 10) True > > > gauge . increment ( delta = 10) True > > > gauge . increment ( ' gauge _ name ' ) True'''
delta = int ( delta ) sign = "+" if delta >= 0 else "" return self . _send ( subname , "%s%d" % ( sign , delta ) )
def scan_file ( self , this_file , this_filename ) : """Submit a file to be scanned by Malwares : param this _ file : File to be scanned ( 200MB file size limit ) : param this _ filename : Filename for scanned file : return : JSON response that contains scan _ id and permalink ."""
params = { 'api_key' : self . api_key , 'filename' : this_filename } try : files = { 'file' : ( this_file . name , open ( this_file . name , 'rb' ) , 'application/octet-stream' ) } except TypeError as e : return dict ( error = e . message ) try : response = requests . post ( self . base + 'file/upload' , files = files , data = params ) except requests . RequestException as e : return dict ( error = e . message ) return _return_response_and_status_code ( response )
def tuple_division ( tup1 , tup2 ) : """Function to divide corresponding elements in two given tuples . > > > tuple _ division ( ( 10 , 4 , 6 , 9 ) , ( 5 , 2 , 3 , 3 ) ) (2 , 2 , 2 , 3) > > > tuple _ division ( ( 12 , 6 , 8 , 16 ) , ( 6 , 3 , 4 , 4 ) ) (2 , 2 , 2 , 4) > > > tuple _ division ( ( 20 , 14 , 36 , 18 ) , ( 5 , 7 , 6 , 9 ) ) (4 , 2 , 6 , 2) : param tup1 : First tuple of integers . : param tup2 : Second tuple of integers . : return : New tuple with each element being the integer division of elements from input tuples at the corresponding position ."""
return tuple ( ele1 // ele2 for ele1 , ele2 in zip ( tup1 , tup2 ) )
def playbook_treeview ( playbook ) : """Creates a fake filesystem with playbook files and uses generate _ tree ( ) to recurse and return a JSON structure suitable for bootstrap - treeview ."""
fs = fake_filesystem . FakeFilesystem ( ) mock_os = fake_filesystem . FakeOsModule ( fs ) files = models . File . query . filter ( models . File . playbook_id . in_ ( [ playbook ] ) ) paths = { } for file in files : fs . create_file ( file . path ) paths [ file . path ] = file . id return jsonutils . dumps ( generate_tree ( '/' , paths , mock_os ) , sort_keys = True , indent = 2 )
def embed_data_in_blockchain ( data , private_key , blockchain_client = BlockchainInfoClient ( ) , fee = OP_RETURN_FEE , change_address = None , format = 'bin' ) : """Builds , signs , and dispatches an OP _ RETURN transaction ."""
# build and sign the tx signed_tx = make_op_return_tx ( data , private_key , blockchain_client , fee = fee , change_address = change_address , format = format ) # dispatch the signed transction to the network response = broadcast_transaction ( signed_tx , blockchain_client ) # return the response return response
def most_frequent_item ( lst ) : """This function determines the element with the highest frequency in a given list . Examples : most _ frequent _ item ( [ 1 , 2 , 3 , 1 , 2 , 3 , 12 , 4 , 2 ] ) - > 2 most _ frequent _ item ( [ 1 , 2 , 6 , 7 , 0 , 1 , 0 , 1 , 0 ] ) - > 1 most _ frequent _ item ( [ 1 , 2 , 3 , 1 , 2 , 4 , 1 ] ) - > 1 : param lst : A list of elements , which could be integers or any other data type . : return : Returns the item with the highest occurrence in the input list ."""
max_count = 0 frequent_item = lst [ 0 ] for item in lst : item_count = lst . count ( item ) if ( item_count > max_count ) : max_count = item_count frequent_item = item return frequent_item
def readDataAsync ( self , fileName , callback ) : """Interprets the specified data file asynchronously . When interpreting is over , the specified callback is called . The file is interpreted as data . As a side effect , it invalidates all entities ( as the passed file can contain any arbitrary command ) ; the lists of entities will be re - populated lazily ( at first access ) Args : fileName : Full path to the file . callback : Callback to be executed when the file has been interpreted ."""
def async_call ( ) : self . _lock . acquire ( ) try : self . _impl . readData ( fileName ) self . _errorhandler_wrapper . check ( ) except Exception : self . _lock . release ( ) raise else : self . _lock . release ( ) callback . run ( ) Thread ( target = async_call ) . start ( )
def impute_dataframe_zero ( df_impute ) : """Replaces all ` ` NaNs ` ` , ` ` - infs ` ` and ` ` + infs ` ` from the DataFrame ` df _ impute ` with 0s . The ` df _ impute ` will be modified in place . All its columns will be into converted into dtype ` ` np . float64 ` ` . : param df _ impute : DataFrame to impute : type df _ impute : pandas . DataFrame : return df _ impute : imputed DataFrame : rtype df _ impute : pandas . DataFrame"""
df_impute . replace ( [ np . PINF , np . NINF ] , 0 , inplace = True ) df_impute . fillna ( 0 , inplace = True ) # Ensure a type of " np . float64" df_impute . astype ( np . float64 , copy = False ) return df_impute
def get_uri ( source ) : """Check a media source as a valid file or uri and return the proper uri"""
import gst src_info = source_info ( source ) if src_info [ 'is_file' ] : # Is this a file ? return get_uri ( src_info [ 'uri' ] ) elif gst . uri_is_valid ( source ) : # Is this a valid URI source for Gstreamer uri_protocol = gst . uri_get_protocol ( source ) if gst . uri_protocol_is_supported ( gst . URI_SRC , uri_protocol ) : return source else : raise IOError ( 'Invalid URI source for Gstreamer' ) else : raise IOError ( 'Failed getting uri for path %s: no such file' % source )
def rebase_array ( d , recursive = False ) : """Transform an indexed dictionary ( such as those returned by the dzn2dict function when parsing arrays ) into an multi - dimensional list . Parameters d : dict The indexed dictionary to convert . bool : recursive Whether to rebase the array recursively . Returns list A multi - dimensional list ."""
arr = [ ] min_val , max_val = _extremes ( d . keys ( ) ) for idx in range ( min_val , max_val + 1 ) : v = d [ idx ] if recursive and _is_dict ( v ) : v = rebase_array ( v ) arr . append ( v ) return arr
def flush ( self ) : """Write all data from buffer to socket and reset write buffer ."""
if self . _wbuf : buffer = '' . join ( self . _wbuf ) self . _wbuf = [ ] self . write ( buffer )
def POST ( self , ** kwargs ) : '''Start an execution command and immediately return the job id . . http : post : : / minions : reqheader X - Auth - Token : | req _ token | : reqheader Accept : | req _ accept | : reqheader Content - Type : | req _ ct | : resheader Content - Type : | res _ ct | : status 200 : | 200 | : status 400 : | 400 | : status 401 : | 401 | : status 406 : | 406 | Lowstate data describing Salt commands must be sent in the request body . The ` ` client ` ` option will be set to : py : meth : ` ~ salt . client . LocalClient . local _ async ` . * * Example request : * * . . code - block : : bash curl - sSi localhost : 8000 / minions \ - b ~ / cookies . txt \ - H " Accept : application / x - yaml " \ - d ' [ { " tgt " : " * " , " fun " : " status . diskusage " } ] ' . . code - block : : text POST / minions HTTP / 1.1 Host : localhost : 8000 Accept : application / x - yaml Content - Type : application / json tgt = * & fun = status . diskusage * * Example response : * * . . code - block : : text HTTP / 1.1 202 Accepted Content - Length : 86 Content - Type : application / x - yaml return : - jid : ' 20130603122505459265' minions : [ ms - 4 , ms - 3 , ms - 2 , ms - 1 , ms - 0] _ links : jobs : - href : / jobs / 20130603122505459265'''
job_data = list ( self . exec_lowstate ( client = 'local_async' , token = cherrypy . session . get ( 'token' ) ) ) cherrypy . response . status = 202 return { 'return' : job_data , '_links' : { 'jobs' : [ { 'href' : '/jobs/{0}' . format ( i [ 'jid' ] ) } for i in job_data if i ] , } , }