signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def ceil_nearest ( x , dx = 1 ) :
"""ceil a number to within a given rounding accuracy"""
|
precision = get_sig_digits ( dx )
return round ( math . ceil ( float ( x ) / dx ) * dx , precision )
|
def distanceMetric ( thing_A , thing_B ) :
'''A " universal distance " metric that can be used as a default in many settings .
Parameters
thing _ A : object
A generic object .
thing _ B : object
Another generic object .
Returns :
distance : float
The " distance " between thing _ A and thing _ B .'''
|
# Get the types of the two inputs
typeA = type ( thing_A )
typeB = type ( thing_B )
if typeA is list and typeB is list :
lenA = len ( thing_A )
# If both inputs are lists , then the distance between
lenB = len ( thing_B )
# them is the maximum distance between corresponding
if lenA == lenB : # elements in the lists . If they differ in length ,
distance_temp = [ ]
# the distance is the difference in lengths .
for n in range ( lenA ) :
distance_temp . append ( distanceMetric ( thing_A [ n ] , thing_B [ n ] ) )
distance = max ( distance_temp )
else :
distance = float ( abs ( lenA - lenB ) )
# If both inputs are numbers , return their difference
elif ( typeA is int or typeB is float ) and ( typeB is int or typeB is float ) :
distance = float ( abs ( thing_A - thing_B ) )
# If both inputs are array - like , return the maximum absolute difference b / w
# corresponding elements ( if same shape ) ; return largest difference in dimensions
# if shapes do not align .
elif hasattr ( thing_A , 'shape' ) and hasattr ( thing_B , 'shape' ) :
if thing_A . shape == thing_B . shape :
distance = np . max ( abs ( thing_A - thing_B ) )
else :
distance = np . max ( abs ( thing_A . shape - thing_B . shape ) )
# If none of the above cases , but the objects are of the same class , call
# the distance method of one on the other
elif thing_A . __class__ . __name__ == thing_B . __class__ . __name__ :
if thing_A . __class__ . __name__ == 'function' :
distance = 0.0
else :
distance = thing_A . distance ( thing_B )
else : # Failsafe : the inputs are very far apart
distance = 1000.0
return distance
|
def process_extensions ( headers : Headers , available_extensions : Optional [ Sequence [ ClientExtensionFactory ] ] , ) -> List [ Extension ] :
"""Handle the Sec - WebSocket - Extensions HTTP response header .
Check that each extension is supported , as well as its parameters .
Return the list of accepted extensions .
Raise : exc : ` ~ websockets . exceptions . InvalidHandshake ` to abort the
connection .
: rfc : ` 6455 ` leaves the rules up to the specification of each
: extension .
To provide this level of flexibility , for each extension accepted by
the server , we check for a match with each extension available in the
client configuration . If no match is found , an exception is raised .
If several variants of the same extension are accepted by the server ,
it may be configured severel times , which won ' t make sense in general .
Extensions must implement their own requirements . For this purpose ,
the list of previously accepted extensions is provided .
Other requirements , for example related to mandatory extensions or the
order of extensions , may be implemented by overriding this method ."""
|
accepted_extensions : List [ Extension ] = [ ]
header_values = headers . get_all ( "Sec-WebSocket-Extensions" )
if header_values :
if available_extensions is None :
raise InvalidHandshake ( "No extensions supported" )
parsed_header_values : List [ ExtensionHeader ] = sum ( [ parse_extension ( header_value ) for header_value in header_values ] , [ ] )
for name , response_params in parsed_header_values :
for extension_factory in available_extensions : # Skip non - matching extensions based on their name .
if extension_factory . name != name :
continue
# Skip non - matching extensions based on their params .
try :
extension = extension_factory . process_response_params ( response_params , accepted_extensions )
except NegotiationError :
continue
# Add matching extension to the final list .
accepted_extensions . append ( extension )
# Break out of the loop once we have a match .
break
# If we didn ' t break from the loop , no extension in our list
# matched what the server sent . Fail the connection .
else :
raise NegotiationError ( f"Unsupported extension: " f"name = {name}, params = {response_params}" )
return accepted_extensions
|
def handle_hotmaps_results ( permutation_result ) :
"""Takes in output from multiprocess _ permutation function and converts to
a better formatted dataframe .
Parameters
permutation _ result : list
output from multiprocess _ permutation
Returns
permutation _ df : pd . DataFrame
formatted output suitable to save"""
|
if len ( permutation_result [ 0 ] ) == 6 :
mycols = [ 'gene' , 'window length' , 'codon position' , 'mutation count' , 'windowed sum' , 'p-value' ]
else :
mycols = [ 'gene' , 'window length' , 'codon position' , 'index' , 'mutation count' , 'windowed sum' , 'p-value' ]
permutation_df = pd . DataFrame ( permutation_result , columns = mycols )
# get benjamani hochberg adjusted p - values
permutation_df [ 'q-value' ] = 1
for w in permutation_df [ 'window length' ] . unique ( ) :
is_window = permutation_df [ 'window length' ] == w
permutation_df . loc [ is_window , 'q-value' ] = mypval . bh_fdr ( permutation_df . loc [ is_window , 'p-value' ] )
# permutation _ df [ ' q - value ' ] = mypval . bh _ fdr ( permutation _ df [ ' p - value ' ] )
# order output
# permutation _ df = permutation _ df . set _ index ( ' gene ' , drop = False ) # make sure genes are indices
col_order = mycols + [ 'q-value' ]
permutation_df = permutation_df . sort_values ( by = [ 'window length' , 'p-value' ] )
return permutation_df [ col_order ]
|
def is_url_ok ( self ) :
"""Verify Keystone Auth URL"""
|
response = requests . head ( settings . KEYSTONE_AUTH_URL )
if response . status_code == 200 :
return True
return False
|
def md5 ( self , path ) :
"""Use different md5 commands depending on the OS :
- Darwin ' s ` md5 ` returns BSD - style checksums by default
- Linux ' s ` md5sum ` needs the ` - - tag ` flag for a similar output
Example :
MD5 ( foo . txt ) = f3d220a856b52aabbf294351e8a24300"""
|
uname = self . execute ( "uname" ) . strip ( )
command = { "Darwin" : "md5 {}" . format ( path ) , "Linux" : "md5sum --tag {}" . format ( path ) , } . get ( uname )
if not command :
raise DvcException ( "'{uname}' is not supported as a remote" . format ( uname = uname ) )
md5 = self . execute ( command ) . split ( ) [ - 1 ]
assert len ( md5 ) == 32
return md5
|
def sample_from_distribution ( self , distribution , k , proportions = False ) :
"""Return a new table with the same number of rows and a new column .
The values in the distribution column are define a multinomial .
They are replaced by sample counts / proportions in the output .
> > > sizes = Table ( [ ' size ' , ' count ' ] ) . with _ rows ( [
. . . [ ' small ' , 50 ] ,
. . . [ ' medium ' , 100 ] ,
. . . [ ' big ' , 50 ] ,
> > > sizes . sample _ from _ distribution ( ' count ' , 1000 ) # doctest : + SKIP
size | count | count sample
small | 50 | 239
medium | 100 | 496
big | 50 | 265
> > > sizes . sample _ from _ distribution ( ' count ' , 1000 , True ) # doctest : + SKIP
size | count | count sample
small | 50 | 0.24
medium | 100 | 0.51
big | 50 | 0.25"""
|
dist = self . _get_column ( distribution )
total = sum ( dist )
assert total > 0 and np . all ( dist >= 0 ) , 'Counts or a distribution required'
dist = dist / sum ( dist )
sample = np . random . multinomial ( k , dist )
if proportions :
sample = sample / sum ( sample )
label = self . _unused_label ( self . _as_label ( distribution ) + ' sample' )
return self . with_column ( label , sample )
|
def readBerSANS ( filename ) :
"""Read a header from a SANS file ( produced usually by BerSANS )"""
|
hed = { 'Comment' : '' }
translate = { 'Lambda' : 'Wavelength' , 'Title' : 'Owner' , 'SampleName' : 'Title' , 'BeamcenterX' : 'BeamPosY' , 'BeamcenterY' : 'BeamPosX' , 'Time' : 'MeasTime' , 'TotalTime' : 'MeasTime' , 'Moni1' : 'Monitor' , 'Moni2' : 'Monitor' , 'Moni' : 'Monitor' , 'Transmission' : 'Transm' , }
with open ( filename , 'rt' ) as f :
comment_next = False
for l in f :
l = l . strip ( )
if comment_next :
hed [ 'Comment' ] = hed [ 'Comment' ] + '\n' + l
comment_next = False
elif l . startswith ( '%Counts' ) :
break
elif l . startswith ( '%Comment' ) :
comment_next = True
elif l . startswith ( '%' ) :
continue
elif l . split ( '=' , 1 ) [ 0 ] in translate :
hed [ translate [ l . split ( '=' , 1 ) [ 0 ] ] ] = misc . parse_number ( l . split ( '=' , 1 ) [ 1 ] )
else :
try :
hed [ l . split ( '=' , 1 ) [ 0 ] ] = misc . parse_number ( l . split ( '=' , 1 ) [ 1 ] )
except IndexError :
print ( l . split ( '=' , 1 ) )
if 'FileName' in hed :
m = re . match ( 'D(\d+)\.(\d+)' , hed [ 'FileName' ] )
if m is not None :
hed [ 'FSN' ] = int ( m . groups ( ) [ 0 ] )
hed [ 'suffix' ] = int ( m . groups ( ) [ 1 ] )
if 'FileDate' in hed :
hed [ 'Date' ] = dateutil . parser . parse ( hed [ 'FileDate' ] )
if 'FileTime' in hed :
hed [ 'Date' ] = datetime . datetime . combine ( hed [ 'Date' ] . date ( ) , dateutil . parser . parse ( hed [ 'FileTime' ] ) . time ( ) )
hed [ '__Origin__' ] = 'BerSANS'
if 'SD' in hed :
hed [ 'Dist' ] = hed [ 'SD' ] * 1000
if hed [ 'Comment' ] . startswith ( '\n' ) :
hed [ 'Comment' ] = hed [ 'Comment' ] [ 1 : ]
hed [ '__particle__' ] = 'neutron'
hed [ 'Wavelength' ] *= 10
# convert from nanometres to Angstroems
return hed
|
def conference ( self , id , ** options ) :
"""This object allows multiple lines in separate sessions to be conferenced together so that the parties on each line can talk to each other simultaneously .
This is a voice channel only feature .
Argument : " id " is a String
Argument : * * options is a set of optional keyword arguments .
See https : / / www . tropo . com / docs / webapi / conference"""
|
self . _steps . append ( Conference ( id , ** options ) . obj )
|
def best_training_job ( self ) :
"""Return name of the best training job for the latest hyperparameter tuning job .
Raises :
Exception : If there is no best training job available for the hyperparameter tuning job ."""
|
self . _ensure_last_tuning_job ( )
tuning_job_describe_result = self . estimator . sagemaker_session . sagemaker_client . describe_hyper_parameter_tuning_job ( HyperParameterTuningJobName = self . latest_tuning_job . name )
try :
return tuning_job_describe_result [ 'BestTrainingJob' ] [ 'TrainingJobName' ]
except KeyError :
raise Exception ( 'Best training job not available for tuning job: {}' . format ( self . latest_tuning_job . name ) )
|
def _parse_redirect ( self , element ) :
"""Parse a trigger redirect
: param element : The XML Element object
: type element : etree . _ Element"""
|
self . _log . info ( 'Parsing new redirected Response' )
self . _responses . add ( Response ( self , element , self . file_path ) )
|
def convert_to_relative_paths ( src , dst ) :
"""Converts all file paths in a smother report to relative paths , relative
to the current directory ."""
|
result = Smother . convert_to_relative_paths ( Smother . load ( src ) )
result . write ( dst )
|
def _generate_route_method_decl ( self , namespace , route , arg_data_type , request_binary_body , method_name_suffix = '' , extra_args = None ) :
"""Generates the method prototype for a route ."""
|
args = [ 'self' ]
if extra_args :
args += extra_args
if request_binary_body :
args . append ( 'f' )
if is_struct_type ( arg_data_type ) :
for field in arg_data_type . all_fields :
if is_nullable_type ( field . data_type ) :
args . append ( '{}=None' . format ( field . name ) )
elif field . has_default : # TODO ( kelkabany ) : Decide whether we really want to set the
# default in the argument list . This will send the default
# over the wire even if it isn ' t overridden . The benefit is
# it locks in a default even if it is changed server - side .
if is_user_defined_type ( field . data_type ) :
ns = field . data_type . namespace
else :
ns = None
arg = '{}={}' . format ( field . name , self . _generate_python_value ( ns , field . default ) )
args . append ( arg )
else :
args . append ( field . name )
elif is_union_type ( arg_data_type ) :
args . append ( 'arg' )
elif not is_void_type ( arg_data_type ) :
raise AssertionError ( 'Unhandled request type: %r' % arg_data_type )
method_name = fmt_func ( route . name + method_name_suffix , version = route . version )
namespace_name = fmt_underscores ( namespace . name )
self . generate_multiline_list ( args , 'def {}_{}' . format ( namespace_name , method_name ) , ':' )
|
def configure_for_kerberos ( self , datanode_transceiver_port = None , datanode_web_port = None ) :
"""Command to configure the cluster to use Kerberos for authentication .
This command will configure all relevant services on a cluster for
Kerberos usage . This command will trigger a GenerateCredentials command
to create Kerberos keytabs for all roles in the cluster .
@ param datanode _ transceiver _ port : The HDFS DataNode transceiver port to use .
This will be applied to all DataNode role configuration groups . If
not specified , this will default to 1004.
@ param datanode _ web _ port : The HDFS DataNode web port to use . This will be
applied to all DataNode role configuration groups . If not specified ,
this will default to 1006.
@ return : Reference to the submitted command .
@ since : API v11"""
|
args = dict ( )
if datanode_transceiver_port :
args [ 'datanodeTransceiverPort' ] = datanode_transceiver_port
if datanode_web_port :
args [ 'datanodeWebPort' ] = datanode_web_port
return self . _cmd ( 'configureForKerberos' , data = args , api_version = 11 )
|
def run_one ( self , name , migrator , fake = True , downgrade = False , force = False ) :
"""Run / emulate a migration with given name ."""
|
try :
migrate , rollback = self . read ( name )
if fake :
with mock . patch ( 'peewee.Model.select' ) :
with mock . patch ( 'peewee.Query._execute' ) :
migrate ( migrator , self . database , fake = fake )
if force :
self . model . create ( name = name )
self . logger . info ( 'Done %s' , name )
migrator . clean ( )
return migrator
with self . database . transaction ( ) :
if not downgrade :
self . logger . info ( 'Migrate "%s"' , name )
migrate ( migrator , self . database , fake = fake )
migrator . run ( )
self . model . create ( name = name )
else :
self . logger . info ( 'Rolling back %s' , name )
rollback ( migrator , self . database , fake = fake )
migrator . run ( )
self . model . delete ( ) . where ( self . model . name == name ) . execute ( )
self . logger . info ( 'Done %s' , name )
except Exception :
self . database . rollback ( )
operation = 'Migration' if not downgrade else 'Rollback'
self . logger . exception ( '%s failed: %s' , operation , name )
raise
|
def bandstructure_flow ( workdir , scf_input , nscf_input , dos_inputs = None , manager = None , flow_class = Flow , allocate = True ) :
"""Build a : class : ` Flow ` for band structure calculations .
Args :
workdir : Working directory .
scf _ input : Input for the GS SCF run .
nscf _ input : Input for the NSCF run ( band structure run ) .
dos _ inputs : Input ( s ) for the NSCF run ( dos run ) .
manager : : class : ` TaskManager ` object used to submit the jobs
Initialized from manager . yml if manager is None .
flow _ class : Flow subclass
allocate : True if the flow should be allocated before returning .
Returns :
: class : ` Flow ` object"""
|
flow = flow_class ( workdir , manager = manager )
work = BandStructureWork ( scf_input , nscf_input , dos_inputs = dos_inputs )
flow . register_work ( work )
# Handy aliases
flow . scf_task , flow . nscf_task , flow . dos_tasks = work . scf_task , work . nscf_task , work . dos_tasks
if allocate :
flow . allocate ( )
return flow
|
def _profile_loglike ( self , x ) :
"""Internal function to calculate and cache the profile likelihood"""
|
x = np . array ( x , ndmin = 1 )
z = [ ]
y = [ ]
for xtmp in x :
def fn ( t ) :
return - self . loglike ( xtmp , t )
ytmp = opt . fmin ( fn , 1.0 , disp = False ) [ 0 ]
ztmp = self . loglike ( xtmp , ytmp )
z . append ( ztmp )
y . append ( ytmp )
self . _prof_y = np . array ( y )
self . _prof_z = np . array ( z )
self . _prof_z = self . _prof_z . max ( ) - self . _prof_z
self . _prof_interp = castro . Interpolator ( x , self . _prof_z )
return self . _prof_y , self . _prof_z
|
def Romeo_2002 ( Re , eD ) :
r'''Calculates Darcy friction factor using the method in Romeo ( 2002)
[2 ] _ as shown in [ 1 ] _ .
. . math : :
\ frac { 1 } { \ sqrt { f _ d } } = - 2 \ log \ left \ { \ frac { \ epsilon } { 3.7065D } \ times
\ frac { 5.0272 } { Re } \ times \ log \ left [ \ frac { \ epsilon } { 3.827D } -
\ frac { 4.567 } { Re } \ times \ log \ left ( \ frac { \ epsilon } { 7.7918D } ^ { 0.9924 } +
\ left ( \ frac { 5.3326 } { 208.815 + Re } \ right ) ^ { 0.9345 } \ right ) \ right ] \ right \ }
Parameters
Re : float
Reynolds number , [ - ]
eD : float
Relative roughness , [ - ]
Returns
fd : float
Darcy friction factor [ - ]
Notes
Range is 3E3 < = Re < = 1.5E8 ; 0 < = eD < = 5E - 2
Examples
> > > Romeo _ 2002(1E5 , 1E - 4)
0.018530291219676177
References
. . [ 1 ] Winning , H . and T . Coole . " Explicit Friction Factor Accuracy and
Computational Efficiency for Turbulent Flow in Pipes . " Flow , Turbulence
and Combustion 90 , no . 1 ( January 1 , 2013 ) : 1-27.
doi : 10.1007 / s10494-012-9419-7
. . [ 2 ] Romeo , Eva , Carlos Royo , and Antonio Monzon . " Improved Explicit
Equations for Estimation of the Friction Factor in Rough and Smooth
Pipes . " Chemical Engineering Journal 86 , no . 3 ( April 28 , 2002 ) : 369-74.
doi : 10.1016 / S1385-8947(01)00254-6.'''
|
fd = ( - 2 * log10 ( eD / 3.7065 - 5.0272 / Re * log10 ( eD / 3.827 - 4.567 / Re * log10 ( ( eD / 7.7918 ) ** 0.9924 + ( 5.3326 / ( 208.815 + Re ) ) ** 0.9345 ) ) ) ) ** - 2
return fd
|
def flush ( self , overlap = 0 ) :
"""Flush buffered data and return it ."""
|
self . buf += self . empty . join ( self . tmpbuf )
self . tmpbuf = [ ]
if overlap and overlap < self . pos :
data = self . buf [ : - overlap ]
self . buf = self . buf [ - overlap : ]
else :
data = self . buf
self . buf = self . empty
return data
|
def resolve_nested_dict ( nested_dict ) :
"""Flattens a nested dict by joining keys into tuple of paths .
Can then be passed into ` format _ vars ` ."""
|
res = { }
for k , v in nested_dict . items ( ) :
if isinstance ( v , dict ) :
for k_ , v_ in resolve_nested_dict ( v ) . items ( ) :
res [ ( k , ) + k_ ] = v_
else :
res [ ( k , ) ] = v
return res
|
def timestamps ( self , col : str , ** kwargs ) :
"""Add a timestamps column from a date column
: param col : name of the timestamps column to add
: type col : str
: param \ * \ * kwargs : keyword arguments for ` ` pd . to _ datetime ` `
for date conversions
: type \ * \ * kwargs : optional
: example : ` ` ds . timestamps ( " mycol " ) ` `"""
|
try :
name = "Timestamps"
if "name" in kwargs :
name = kwargs [ "name" ]
if "errors" not in kwargs :
kwargs [ "errors" ] = "coerce"
if "unit" in kwargs :
kwargs [ "unit" ] = "ms"
try :
self . df [ col ] = pd . to_datetime ( self . df [ col ] , ** kwargs )
except TypeError :
pass
ts = [ ]
for el in self . df [ col ] :
ts . append ( arrow . get ( el ) . timestamp )
self . df [ name ] = ts
except Exception as e :
self . err ( e , "Can not convert to timestamps" )
|
def tryload_cache_list ( dpath , fname , cfgstr_list , verbose = False ) :
"""loads a list of similar cached datas . Returns flags that needs to be computed"""
|
data_list = [ tryload_cache ( dpath , fname , cfgstr , verbose ) for cfgstr in cfgstr_list ]
ismiss_list = [ data is None for data in data_list ]
return data_list , ismiss_list
|
def dispatch_event ( event ) :
"""Dispatch the event being represented by the Event object .
Args :
event : Object holding information about the request to be dispatched to the Optimizely backend ."""
|
try :
if event . http_verb == enums . HTTPVerbs . GET :
requests . get ( event . url , params = event . params , timeout = REQUEST_TIMEOUT ) . raise_for_status ( )
elif event . http_verb == enums . HTTPVerbs . POST :
requests . post ( event . url , data = json . dumps ( event . params ) , headers = event . headers , timeout = REQUEST_TIMEOUT ) . raise_for_status ( )
except request_exception . RequestException as error :
logging . error ( 'Dispatch event failed. Error: %s' % str ( error ) )
|
def unpack ( self , gpsd_socket_response ) :
"""Sets new socket data as DataStream attributes in those initialised dictionaries
Arguments :
gpsd _ socket _ response ( json object ) :
Provides :
self attributes , e . g . , self . lat , self . gdop
Raises :
AttributeError : ' str ' object has no attribute ' keys ' when the device falls out of the system
ValueError , KeyError : most likely extra , or mangled JSON data , should not happen , but that
applies to a lot of things ."""
|
try :
fresh_data = json . loads ( gpsd_socket_response )
# ' class ' is popped for iterator lead
class_name = fresh_data . pop ( 'class' )
for key in self . packages [ class_name ] : # Fudge around the namespace collision with GST data package lat / lon being standard deviations
if class_name == 'GST' and key == 'lat' or 'lon' :
setattr ( self , 'sd' + key , fresh_data . get ( key , 'n/a' ) )
setattr ( self , key , fresh_data . get ( key , 'n/a' ) )
# Updates and restores ' n / a ' if attribute is absent in the data
except AttributeError : # ' str ' object has no attribute ' keys '
sys . stderr . write ( 'There is an unexpected exception unpacking JSON object' )
return
except ( ValueError , KeyError ) as error :
sys . stderr . write ( str ( error ) )
# Extra data or aberrant data in stream .
return
|
def minicalendar ( context ) :
"""Displays a little ajax version of the calendar ."""
|
today = dt . date . today ( )
request = context [ 'request' ]
home = request . site . root_page
cal = CalendarPage . objects . live ( ) . descendant_of ( home ) . first ( )
calUrl = cal . get_url ( request ) if cal else None
if cal :
events = cal . _getEventsByWeek ( request , today . year , today . month )
else :
events = getAllEventsByWeek ( request , today . year , today . month )
return { 'request' : request , 'today' : today , 'year' : today . year , 'month' : today . month , 'calendarUrl' : calUrl , 'monthName' : calendar . month_name [ today . month ] , 'weekdayInfo' : zip ( weekday_abbr , weekday_name ) , 'events' : events }
|
def run_script ( self , script , in_shell = True , echo = None , note = None , loglevel = logging . DEBUG ) :
"""Run the passed - in string as a script on the target ' s command line .
@ param script : String representing the script . It will be de - indented
and stripped before being run .
@ param in _ shell : Indicate whether we are in a shell or not . ( Default : True )
@ param note : See send ( )
@ type script : string
@ type in _ shell : boolean"""
|
shutit = self . shutit
shutit . handle_note ( note , 'Script: ' + str ( script ) )
shutit . log ( 'Running script beginning: "' + '' . join ( script . split ( ) ) [ : 30 ] + ' [...]' , level = logging . INFO )
# Trim any whitespace lines from start and end of script , then dedent
lines = script . split ( '\n' )
while lines and re . match ( '^[ \t]*$' , lines [ 0 ] ) :
lines = lines [ 1 : ]
while lines and re . match ( '^[ \t]*$' , lines [ - 1 ] ) :
lines = lines [ : - 1 ]
if not lines :
return True
script = '\n' . join ( lines )
script = textwrap . dedent ( script )
# Send the script and run it in the manner specified
if shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) and in_shell :
script = ( 'set -o xtrace \n\n' + script + '\n\nset +o xtrace' )
self . quick_send ( 'command mkdir -p ' + shutit_global . shutit_global_object . shutit_state_dir + '/scripts && chmod 777 ' + shutit_global . shutit_global_object . shutit_state_dir + '/scripts' , echo = False )
self . send_file ( shutit_global . shutit_global_object . shutit_state_dir + '/scripts/shutit_script.sh' , script , echo = False , loglevel = loglevel )
self . quick_send ( 'command chmod +x ' + shutit_global . shutit_global_object . shutit_state_dir + '/scripts/shutit_script.sh' , echo = False )
shutit . build [ 'shutit_command_history' ] . append ( ' ' + script . replace ( '\n' , '\n ' ) )
if in_shell :
ret = self . send ( ShutItSendSpec ( self , send = ' . ' + shutit_global . shutit_global_object . shutit_state_dir + '/scripts/shutit_script.sh && rm -f ' + shutit_global . shutit_global_object . shutit_state_dir + '/scripts/shutit_script.sh && rm -f ' + shutit_global . shutit_global_object . shutit_state_dir + '/scripts/shutit_script.sh' , echo = False , loglevel = loglevel ) )
else :
ret = self . send ( ShutItSendSpec ( self , send = ' ' + shutit_global . shutit_global_object . shutit_state_dir + '/scripts/shutit_script.sh && rm -f ' + shutit_global . shutit_global_object . shutit_state_dir + '/scripts/shutit_script.sh' , echo = False , loglevel = loglevel ) )
shutit . handle_note_after ( note = note )
return ret
|
def _GetEventLogProviderKey ( self , log_source ) :
"""Retrieves the Event Log provider key .
Args :
log _ source ( str ) : Event Log source .
Returns :
str : Event Log provider key or None if not available .
Raises :
RuntimeError : if more than one value is found in the database ."""
|
table_names = [ 'event_log_providers' ]
column_names = [ 'event_log_provider_key' ]
condition = 'log_source == "{0:s}"' . format ( log_source )
values_list = list ( self . _database_file . GetValues ( table_names , column_names , condition ) )
number_of_values = len ( values_list )
if number_of_values == 0 :
return None
if number_of_values == 1 :
values = values_list [ 0 ]
return values [ 'event_log_provider_key' ]
raise RuntimeError ( 'More than one value found in database.' )
|
def _set_esp ( self , v , load = False ) :
"""Setter method for esp , mapped from YANG variable / routing _ system / interface / ve / ipv6 / interface _ ospfv3 _ conf / authentication / ipsec _ auth _ key _ config / esp ( algorithm - type - esp )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ esp is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ esp ( ) directly .
YANG Description : Specify Encapsulating Security Payload ( ESP ) as the protocol to provide packet - level security ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'NULL' : { 'value' : 1 } } , ) , is_leaf = True , yang_name = "esp" , rest_name = "esp" , parent = self , choice = ( u'ch-algorithm' , u'ca-esp-algorithm' ) , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Specify Encapsulating Security Payload (ESP)' , u'cli-incomplete-command' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-ospfv3' , defining_module = 'brocade-ospfv3' , yang_type = 'algorithm-type-esp' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """esp must be of a type compatible with algorithm-type-esp""" , 'defined-type' : "brocade-ospfv3:algorithm-type-esp" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NULL': {'value': 1}},), is_leaf=True, yang_name="esp", rest_name="esp", parent=self, choice=(u'ch-algorithm', u'ca-esp-algorithm'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify Encapsulating Security Payload (ESP)', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='algorithm-type-esp', is_config=True)""" , } )
self . __esp = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def _variant_sv ( checkpoints ) :
"""Structural variant workflow ."""
|
if not checkpoints . get ( "sv" ) :
return [ ] , [ ]
sv = [ s ( "detect_sv" , "batch-single" , [ [ "sv_batch_rec" ] ] , [ cwlout ( "sv_rec" , "record" , fields = [ cwlout ( [ "sv" , "variantcaller" ] , [ "string" , "null" ] ) , cwlout ( [ "sv" , "vrn_file" ] , [ "File" , "null" ] , [ ".tbi" ] ) , cwlout ( [ "sv" , "supplemental" ] , { "type" : "array" , "items" : [ "File" ] } ) , cwlout ( [ "svvalidate" , "summary" ] , [ "File" , "null" ] ) , cwlout ( "inherit" , exclude = [ [ "align_bam" ] , [ "work_bam_plus" ] , [ "reference" , "snpeff" ] ] ) ] ) ] , "bcbio-vc" , [ "bedtools" , "cnvkit" , "delly" , "duphold" , "extract-sv-reads" , "gsort" , "lumpy-sv;env=python2" , "manta;env=python2" , "break-point-inspector" , "mosdepth" , "samtools" , "smoove;env=python2" , "pysam>=0.13.0" , "seq2c" , "simple_sv_annotation;env=python2" , "survivor" , "svtools;env=python2" , "svtyper;env=python2" , "r=3.5.1" , "r-base" , "xorg-libxt" , "vawk;env=python2" ] , disk = { "files" : 2.0 } ) ]
sv_batch_inputs = [ [ "analysis" ] , [ "genome_build" ] , [ "work_bam_plus" , "disc" ] , [ "work_bam_plus" , "sr" ] , [ "config" , "algorithm" , "background" , "cnv_reference" ] , [ "config" , "algorithm" , "tools_on" ] , [ "config" , "algorithm" , "tools_off" ] , [ "config" , "algorithm" , "svprioritize" ] , [ "config" , "algorithm" , "svvalidate" ] , [ "regions" , "sample_callable" ] , [ "genome_resources" , "variation" , "gc_profile" ] , [ "genome_resources" , "variation" , "germline_het_pon" ] , [ "genome_resources" , "aliases" , "snpeff" ] , [ "reference" , "snpeff" , "genome_build" ] , [ "sv_coverage_rec" ] ]
if checkpoints . get ( "vc" ) :
sv_batch_inputs . append ( [ "variants" , "samples" ] )
steps = [ s ( "calculate_sv_bins" , "multi-combined" , [ [ "align_bam" ] , [ "reference" , "fasta" , "base" ] , [ "metadata" , "batch" ] , [ "metadata" , "phenotype" ] , [ "config" , "algorithm" , "background" , "cnv_reference" ] , [ "config" , "algorithm" , "callable_regions" ] , [ "config" , "algorithm" , "coverage_interval" ] , [ "config" , "algorithm" , "exclude_regions" ] , [ "config" , "algorithm" , "sv_regions" ] , [ "config" , "algorithm" , "variant_regions" ] , [ "config" , "algorithm" , "variant_regions_merged" ] , [ "config" , "algorithm" , "seq2c_bed_ready" ] , [ "config" , "algorithm" , "svcaller" ] , [ "depth" , "variant_regions" , "regions" ] , [ "genome_resources" , "variation" , "lcr" ] , [ "genome_resources" , "variation" , "polyx" ] , [ "genome_resources" , "variation" , "encode_blacklist" ] , [ "genome_resources" , "rnaseq" , "gene_bed" ] ] , [ cwlout ( "sv_bin_rec" , "record" , fields = [ cwlout ( [ "regions" , "bins" , "target" ] , [ "File" , "null" ] ) , cwlout ( [ "regions" , "bins" , "antitarget" ] , [ "File" , "null" ] ) , cwlout ( [ "regions" , "bins" , "gcannotated" ] , [ "File" , "null" ] ) , cwlout ( [ "regions" , "bins" , "group" ] , [ "string" , "null" ] ) , cwlout ( "inherit" ) ] ) ] , "bcbio-vc" , [ "bedtools" , "cnvkit" ] , disk = { "files" : 1.5 } , cores = 1 ) , s ( "calculate_sv_coverage" , "multi-parallel" , [ [ "sv_bin_rec" ] ] , [ cwlout ( "sv_rawcoverage_rec" , "record" , fields = [ cwlout ( [ "depth" , "bins" , "target" ] , [ "File" , "null" ] ) , cwlout ( [ "depth" , "bins" , "antitarget" ] , [ "File" , "null" ] ) , cwlout ( [ "depth" , "bins" , "seq2c" ] , [ "File" , "null" ] ) , cwlout ( "inherit" ) ] ) ] , "bcbio-vc" , [ "mosdepth" , "cnvkit" , "seq2c" ] , disk = { "files" : 1.5 } ) , s ( "normalize_sv_coverage" , "multi-combined" , [ [ "sv_rawcoverage_rec" ] ] , [ cwlout ( "sv_coverage_rec" , "record" , fields = [ cwlout ( [ "depth" , "bins" , "normalized" ] , [ "File" , "null" ] ) , cwlout ( [ "depth" , "bins" , "background" ] , [ "File" , "null" ] ) , cwlout ( "inherit" ) ] ) ] , "bcbio-vc" , [ "cnvkit" ] , disk = { "files" : 1.5 } ) , s ( "batch_for_sv" , "multi-batch" , sv_batch_inputs , [ cwlout ( "sv_batch_rec" , "record" ) ] , "bcbio-vc" , unlist = [ [ "config" , "algorithm" , "svcaller" ] ] ) , w ( "svcall" , "multi-parallel" , sv , [ ] ) , s ( "summarize_sv" , "multi-combined" , [ [ "sv_rec" ] ] , [ cwlout ( [ "sv" , "calls" ] , { "type" : "array" , "items" : [ "File" , "null" ] } ) , cwlout ( [ "sv" , "supplemental" ] , { "type" : "array" , "items" : [ "File" ] } ) , cwlout ( [ "sv" , "prioritize" , "tsv" ] , { "type" : "array" , "items" : [ "File" , "null" ] } ) , cwlout ( [ "sv" , "prioritize" , "raw" ] , { "type" : "array" , "items" : [ "File" , "null" ] } ) , cwlout ( [ "svvalidate" , "grading_summary" ] , [ "File" , "null" ] ) , cwlout ( [ "svvalidate" , "grading_plots" ] , { "type" : "array" , "items" : [ "File" , "null" ] } ) ] , "bcbio-vc" , [ "bcbio-prioritize" ] , disk = { "files" : 1.0 } , cores = 1 ) ]
final_outputs = [ [ "sv" , "calls" ] , [ "svvalidate" , "grading_summary" ] , [ "sv" , "prioritize" , "tsv" ] , [ "sv" , "prioritize" , "raw" ] , [ "sv" , "supplemental" ] ]
return steps , final_outputs
|
def parse_fingerprint ( self , cmdline , key = None , sep = None ) :
"""Given a psutil . Process . cmdline , parse and return a fingerprint .
: param list cmdline : The psutil . Process . cmdline of the current process .
: param string key : The key for fingerprint discovery .
: param string sep : The key / value separator for fingerprint discovery .
: returns : The parsed fingerprint or ` None ` .
: rtype : string or ` None `"""
|
key = key or self . FINGERPRINT_CMD_KEY
if key :
sep = sep or self . FINGERPRINT_CMD_SEP
cmdline = cmdline or [ ]
for cmd_part in cmdline :
if cmd_part . startswith ( '{}{}' . format ( key , sep ) ) :
return cmd_part . split ( sep ) [ 1 ]
|
def set_beam_prop ( self , prop , values , repeat = "up" ) :
"""Specify the properties of the beams
: param values :
: param repeat : if ' up ' then duplicate up the structure
: return :"""
|
values = np . array ( values )
if repeat == "up" :
assert len ( values . shape ) == 1
values = [ values for ss in range ( self . n_storeys ) ]
else :
assert len ( values . shape ) == 2
if len ( values [ 0 ] ) != self . n_bays :
raise ModelError ( "beam depths does not match number of bays (%i)." % self . n_bays )
for ss in range ( self . n_storeys ) :
for i in range ( self . n_bays ) :
self . _beams [ ss ] [ i ] . set_section_prop ( prop , values [ 0 ] [ i ] )
|
def get_format_modules ( lang = None , reverse = False ) :
"""Returns a list of the format modules found"""
|
if lang is None :
lang = get_language ( )
modules = _format_modules_cache . setdefault ( lang , list ( iter_format_modules ( lang ) ) )
if reverse :
return list ( reversed ( modules ) )
return modules
|
def activate ( self , * , filter_func = None ) :
'''Activate the type safety checker . After the call all functions
that need to be checked will be .'''
|
if self . active :
raise RuntimeError ( "Type safety check already active" )
self . __module_finder = ModuleFinder ( Validator . decorate )
if filter_func is not None :
self . __module_finder . set_filter ( filter_func )
self . __module_finder . install ( )
|
def get_graph ( self ) :
"""Returns the most recent solve graph .
This gives a graph showing the latest state of the solve . The specific
graph returned depends on the solve status . When status is :
unsolved : latest unsolved graph is returned ;
solved : final solved graph is returned ;
failed : most appropriate failure graph is returned ( see ` failure _ reason ` ) ;
cyclic : last failure is returned ( contains cycle ) .
Returns :
A pygraph . digraph object ."""
|
st = self . status
if st in ( SolverStatus . solved , SolverStatus . unsolved ) :
phase = self . _latest_nonfailed_phase ( )
return phase . get_graph ( )
else :
return self . get_fail_graph ( )
|
def _fake_enumerateclassnames ( self , namespace , ** params ) :
"""Implements a mock server responder for
: meth : ` ~ pywbem . WBEMConnection . EnumerateClassNames ` .
Enumerates the classnames of the classname in the ' classname ' parameter
or from the top of the tree if ' classname is None .
Returns :
return tuple including list of classnames
Raises :
CIMError : CIM _ ERR _ INVALID _ NAMESPACE : invalid namespace ,
CIMError : CIM _ ERR _ INVALID _ CLASS : class defined by the classname
parameter does not exist"""
|
self . _validate_namespace ( namespace )
classname = params . get ( 'ClassName' , None )
if classname :
assert ( isinstance ( classname , CIMClassName ) )
if not self . _class_exists ( classname . classname , namespace ) :
raise CIMError ( CIM_ERR_INVALID_CLASS , _format ( "The class {0!A} defined by 'ClassName' parameter " "does not exist in namespace {1!A}" , classname , namespace ) )
clns = self . _get_subclass_names ( classname , namespace , params [ 'DeepInheritance' ] )
rtn_clns = [ CIMClassName ( cn , namespace = namespace , host = self . host ) for cn in clns ]
return self . _make_tuple ( rtn_clns )
|
def _str_desc ( self , reader ) :
"""String containing information about the current GO DAG ."""
|
data_version = reader . data_version
if data_version is not None :
data_version = data_version . replace ( "releases/" , "" )
desc = "{OBO}: fmt({FMT}) rel({REL}) {N:,} GO Terms" . format ( OBO = reader . obo_file , FMT = reader . format_version , REL = data_version , N = len ( self ) )
if reader . optobj :
desc = "{D}; optional_attrs({A})" . format ( D = desc , A = " " . join ( sorted ( reader . optobj . optional_attrs ) ) )
return desc
|
def generate ( self , local_go_targets ) :
"""Automatically generates a Go target graph for the given local go targets .
: param iter local _ go _ targets : The target roots to fill in a target graph for .
: raises : : class : ` GoTargetGenerator . GenerationError ` if any missing targets cannot be generated ."""
|
visited = { l . import_path : l . address for l in local_go_targets }
with temporary_dir ( ) as gopath :
for local_go_target in local_go_targets :
deps = self . _list_deps ( gopath , local_go_target . address )
self . _generate_missing ( gopath , local_go_target . address , deps , visited )
return list ( visited . items ( ) )
|
def low ( data , ** kwargs ) :
'''Execute a single low data call
This function is mostly intended for testing the state system
CLI Example :
. . code - block : : bash
salt ' * ' state . low ' { " state " : " pkg " , " fun " : " installed " , " name " : " vi " } ' '''
|
st_kwargs = __salt__ . kwargs
__opts__ [ 'grains' ] = __grains__
chunks = [ data ]
st_ = salt . client . ssh . state . SSHHighState ( __opts__ , __pillar__ , __salt__ , __context__ [ 'fileclient' ] )
for chunk in chunks :
chunk [ '__id__' ] = chunk [ 'name' ] if not chunk . get ( '__id__' ) else chunk [ '__id__' ]
err = st_ . state . verify_data ( data )
if err :
return err
file_refs = salt . client . ssh . state . lowstate_file_refs ( chunks , _merge_extra_filerefs ( kwargs . get ( 'extra_filerefs' , '' ) , __opts__ . get ( 'extra_filerefs' , '' ) ) )
roster = salt . roster . Roster ( __opts__ , __opts__ . get ( 'roster' , 'flat' ) )
roster_grains = roster . opts [ 'grains' ]
# Create the tar containing the state pkg and relevant files .
trans_tar = salt . client . ssh . state . prep_trans_tar ( __context__ [ 'fileclient' ] , chunks , file_refs , __pillar__ , st_kwargs [ 'id_' ] , roster_grains )
trans_tar_sum = salt . utils . hashutils . get_hash ( trans_tar , __opts__ [ 'hash_type' ] )
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}' . format ( __opts__ [ 'thin_dir' ] , trans_tar_sum , __opts__ [ 'hash_type' ] )
single = salt . client . ssh . Single ( __opts__ , cmd , fsclient = __context__ [ 'fileclient' ] , minion_opts = __salt__ . minion_opts , ** st_kwargs )
single . shell . send ( trans_tar , '{0}/salt_state.tgz' . format ( __opts__ [ 'thin_dir' ] ) )
stdout , stderr , _ = single . cmd_block ( )
# Clean up our tar
try :
os . remove ( trans_tar )
except ( OSError , IOError ) :
pass
# Read in the JSON data and return the data structure
try :
return salt . utils . json . loads ( stdout )
except Exception as e :
log . error ( "JSON Render failed for: %s\n%s" , stdout , stderr )
log . error ( six . text_type ( e ) )
# If for some reason the json load fails , return the stdout
return stdout
|
def dropdb ( self , name ) :
'''Deletes an * * entire database * * ( i . e . a table ) , losing all data .'''
|
if self . readonly :
raise s_exc . IsReadOnly ( )
while True :
try :
if not self . dbexists ( name ) :
return
db = self . initdb ( name )
self . dirty = True
self . xact . drop ( db . db , delete = True )
self . forcecommit ( )
return
except lmdb . MapFullError :
self . _handle_mapfull ( )
|
def get_health ( self ) :
"""Summarize health of managed system
This provides a summary of the health of the managed system .
It additionally provides an iterable list of reasons for
warning , critical , or failed assessments ."""
|
summary = { 'badreadings' : [ ] , 'health' : const . Health . Ok }
fallbackreadings = [ ]
try :
self . oem_init ( )
fallbackreadings = self . _oem . get_health ( summary )
for reading in self . get_sensor_data ( ) :
if reading . health != const . Health . Ok :
summary [ 'health' ] |= reading . health
summary [ 'badreadings' ] . append ( reading )
except exc . BypassGenericBehavior :
pass
if not summary [ 'badreadings' ] :
summary [ 'badreadings' ] = fallbackreadings
return summary
|
def pack ( self , value = None ) :
r"""Pack the value as a binary representation .
Considering an example with UBInt8 class , that inherits from
GenericType :
> > > from pyof . foundation . basic _ types import UBInt8
> > > objectA = UBInt8(1)
> > > objectB = 5
> > > objectA . pack ( )
b ' \ x01'
> > > objectA . pack ( objectB )
b ' \ x05'
Args :
value : If the value is None , then we will pack the value of the
current instance . Otherwise , if value is an instance of the
same type as the current instance , then we call the pack of the
value object . Otherwise , we will use the current instance pack
method on the passed value .
Returns :
bytes : The binary representation .
Raises :
: exc : ` ~ . exceptions . BadValueException ` : If the value does not
fit the binary format ."""
|
if isinstance ( value , type ( self ) ) :
return value . pack ( )
if value is None :
value = self . value
elif 'value' in dir ( value ) : # if it is enum or bitmask gets only the ' int ' value
value = value . value
try :
return struct . pack ( self . _fmt , value )
except struct . error :
expected_type = type ( self ) . __name__
actual_type = type ( value ) . __name__
msg_args = expected_type , value , actual_type
msg = 'Expected {}, found value "{}" of type {}' . format ( * msg_args )
raise PackException ( msg )
|
def parse_bound_data ( self , tmin0 , tmax0 , specimen ) :
"""converts Kelvin / Tesla temperature / AF data from the MagIC / Redo format
to that of Celsius / milliTesla which is used by the GUI as it is
often more intuitive
Parameters
tmin0 : the input temperature / AF lower bound value to convert
tmax0 : the input temperature / AF upper bound value to convert
specimen : the specimen these bounds are for
tmin : the converted lower bound temperature / AF or None if input
format was wrong
tmax : the converted upper bound temperature / AF or None if the input
format was wrong"""
|
if specimen not in self . Data :
print ( ( "no measurement data found loaded for specimen %s and will be ignored" % ( specimen ) ) )
return ( None , None )
if self . Data [ specimen ] [ 'measurement_step_unit' ] == "C" :
if float ( tmin0 ) == 0 or float ( tmin0 ) == 273 :
tmin = "0"
else :
tmin = "%.0fC" % ( float ( tmin0 ) - 273 )
if float ( tmax0 ) == 0 or float ( tmax0 ) == 273 :
tmax = "0"
else :
tmax = "%.0fC" % ( float ( tmax0 ) - 273 )
elif self . Data [ specimen ] [ 'measurement_step_unit' ] == "mT" :
if float ( tmin0 ) == 0 :
tmin = "0"
else :
tmin = "%.1fmT" % ( float ( tmin0 ) * 1000 )
if float ( tmax0 ) == 0 :
tmax = "0"
else :
tmax = "%.1fmT" % ( float ( tmax0 ) * 1000 )
else : # combimned experiment T : AF
if float ( tmin0 ) == 0 :
tmin = "0"
elif "%.0fC" % ( float ( tmin0 ) - 273 ) in self . Data [ specimen ] [ 'zijdblock_steps' ] :
tmin = "%.0fC" % ( float ( tmin0 ) - 273 )
elif "%.1fmT" % ( float ( tmin0 ) * 1000 ) in self . Data [ specimen ] [ 'zijdblock_steps' ] :
tmin = "%.1fmT" % ( float ( tmin0 ) * 1000 )
else :
tmin = None
if float ( tmax0 ) == 0 :
tmax = "0"
elif "%.0fC" % ( float ( tmax0 ) - 273 ) in self . Data [ specimen ] [ 'zijdblock_steps' ] :
tmax = "%.0fC" % ( float ( tmax0 ) - 273 )
elif "%.1fmT" % ( float ( tmax0 ) * 1000 ) in self . Data [ specimen ] [ 'zijdblock_steps' ] :
tmax = "%.1fmT" % ( float ( tmax0 ) * 1000 )
else :
tmax = None
return tmin , tmax
|
def get_relation_fields_from_model ( model_class ) :
"""Get related fields ( m2m , FK , and reverse FK )"""
|
relation_fields = [ ]
all_fields_names = _get_all_field_names ( model_class )
for field_name in all_fields_names :
field , model , direct , m2m = _get_field_by_name ( model_class , field_name )
# get _ all _ field _ names will return the same field
# both with and without _ id . Ignore the duplicate .
if field_name [ - 3 : ] == '_id' and field_name [ : - 3 ] in all_fields_names :
continue
if m2m or not direct or _get_remote_field ( field ) :
field . field_name_override = field_name
relation_fields += [ field ]
return relation_fields
|
def first ( args ) :
"""% prog first N fastqfile ( s )
Get first N reads from file ."""
|
from jcvi . apps . base import need_update
p = OptionParser ( first . __doc__ )
p . set_outfile ( )
opts , args = p . parse_args ( args )
if len ( args ) < 2 :
sys . exit ( not p . print_help ( ) )
N = int ( args [ 0 ] )
nlines = N * 4
fastqfiles = args [ 1 : ]
fastqfile = fastqfiles [ 0 ]
outfile = opts . outfile
if not need_update ( fastqfiles , outfile ) :
logging . debug ( "File `{0}` exists. Will not overwrite." . format ( outfile ) )
return
gz = fastqfile . endswith ( ".gz" )
for fastqfile in fastqfiles :
if gz :
cmd = "zcat {0} | head -n {1}" . format ( fastqfile , nlines )
else :
cmd = "head -n {0} {1}" . format ( nlines , fastqfile )
sh ( cmd , outfile = opts . outfile , append = True )
|
def has_column_at_position ( self , column_name , pos = 0 ) :
""": type column _ name : str
: type pos : int
: rtype : bool"""
|
column_name = self . _trim_quotes ( column_name . lower ( ) )
index_columns = [ c . lower ( ) for c in self . get_unquoted_columns ( ) ]
return index_columns . index ( column_name ) == pos
|
def get_directed_graph_paths ( element , arrow_length ) :
"""Computes paths for a directed path which include an arrow to
indicate the directionality of each edge ."""
|
edgepaths = element . _split_edgepaths
edges = edgepaths . split ( datatype = 'array' , dimensions = edgepaths . kdims )
arrows = [ ]
for e in edges :
sx , sy = e [ 0 ]
ex , ey = e [ 1 ]
rad = np . arctan2 ( ey - sy , ex - sx )
xa0 = ex - np . cos ( rad + np . pi / 8 ) * arrow_length
ya0 = ey - np . sin ( rad + np . pi / 8 ) * arrow_length
xa1 = ex - np . cos ( rad - np . pi / 8 ) * arrow_length
ya1 = ey - np . sin ( rad - np . pi / 8 ) * arrow_length
arrow = np . array ( [ ( sx , sy ) , ( ex , ey ) , ( np . nan , np . nan ) , ( xa0 , ya0 ) , ( ex , ey ) , ( xa1 , ya1 ) ] )
arrows . append ( arrow )
return arrows
|
def squeeze ( self , dim = None ) :
"""Return a new object with squeezed data .
Parameters
dim : None or str or tuple of str , optional
Selects a subset of the length one dimensions . If a dimension is
selected with length greater than one , an error is raised . If
None , all length one dimensions are squeezed .
Returns
squeezed : same type as caller
This object , but with with all or a subset of the dimensions of
length 1 removed .
See Also
numpy . squeeze"""
|
dims = common . get_squeeze_dims ( self , dim )
return self . isel ( { d : 0 for d in dims } )
|
def checkpath ( path_ , verbose = VERYVERBOSE , n = None , info = VERYVERBOSE ) :
r"""verbose wrapper around ` ` os . path . exists ` `
Returns :
true if ` ` path _ ` ` exists on the filesystem show only the
top ` n ` directories
Args :
path _ ( str ) : path string
verbose ( bool ) : verbosity flag ( default = False )
n ( int ) : ( default = None )
info ( bool ) : ( default = False )
CommandLine :
python - m utool . util _ path - - test - checkpath
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . util _ path import * # NOQA
> > > import utool as ut
> > > path _ = ut . _ _ file _ _
> > > verbose = True
> > > n = None
> > > info = False
> > > result = checkpath ( path _ , verbose , n , info )
> > > print ( result )
True
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . util _ path import * # NOQA
> > > import utool as ut
> > > path _ = ut . _ _ file _ _ + ' foobar '
> > > verbose = True
> > > result = checkpath ( path _ , verbose , n = None , info = True )
> > > print ( result )
False"""
|
assert isinstance ( path_ , six . string_types ) , ( 'path_=%r is not a string. type(path_) = %r' % ( path_ , type ( path_ ) ) )
path_ = normpath ( path_ )
if sys . platform . startswith ( 'win32' ) : # convert back to windows style path if using unix style
if path_ . startswith ( '\\' ) :
dirs = path_ . split ( '\\' )
if len ( dirs ) > 1 and len ( dirs [ 0 ] ) == 0 and len ( dirs [ 1 ] ) == 1 :
dirs [ 1 ] = dirs [ 1 ] . upper ( ) + ':'
path_ = '\\' . join ( dirs [ 1 : ] )
does_exist = exists ( path_ )
if verbose : # print _ ( ' [ utool ] checkpath ( % r ) ' % ( path _ ) )
pretty_path = path_ndir_split ( path_ , n )
caller_name = util_dbg . get_caller_name ( allow_genexpr = False )
print ( '[%s] checkpath(%r)' % ( caller_name , pretty_path ) )
if does_exist :
path_type = get_path_type ( path_ )
# path _ type = ' file ' if isfile ( path _ ) else ' directory '
print ( '[%s] ...(%s) exists' % ( caller_name , path_type , ) )
else :
print ( '[%s] ... does not exist' % ( caller_name ) )
if not does_exist and info : # print ( ' [ util _ path ] ! Does not exist ' )
_longest_path = longest_existing_path ( path_ )
_longest_path_type = get_path_type ( _longest_path )
print ( '[util_path] ... The longest existing path is: %r' % _longest_path )
print ( '[util_path] ... and has type %r' % ( _longest_path_type , ) )
return does_exist
|
def pi_zoom ( self , viewer , event , msg = True ) :
"""Zoom and / or rotate the viewer by a pinch gesture .
( the back end must support gestures )"""
|
return self . _pinch_zoom_rotate ( viewer , event . state , event . rot_deg , event . scale , msg = msg )
|
def _run_hooked_methods ( self , hook : str ) :
"""Iterate through decorated methods to find those that should be
triggered by the current hook . If conditions exist , check them before
running otherwise go ahead and run ."""
|
for method in self . _potentially_hooked_methods :
for callback_specs in method . _hooked :
if callback_specs [ 'hook' ] != hook :
continue
when = callback_specs . get ( 'when' )
if when :
if self . _check_callback_conditions ( callback_specs ) :
method ( )
else :
method ( )
|
def urldecode ( query ) :
"""Decode a query string in x - www - form - urlencoded format into a sequence
of two - element tuples .
Unlike urlparse . parse _ qsl ( . . . , strict _ parsing = True ) urldecode will enforce
correct formatting of the query string by validation . If validation fails
a ValueError will be raised . urllib . parse _ qsl will only raise errors if
any of name - value pairs omits the equals sign ."""
|
# Check if query contains invalid characters
if query and not set ( query ) <= urlencoded :
error = ( "Error trying to decode a non urlencoded string. " "Found invalid characters: %s " "in the string: '%s'. " "Please ensure the request/response body is " "x-www-form-urlencoded." )
raise ValueError ( error % ( set ( query ) - urlencoded , query ) )
# Check for correctly hex encoded values using a regular expression
# All encoded values begin with % followed by two hex characters
# correct = % 00 , % A0 , % 0A , % FF
# invalid = % G0 , % 5H , % PO
if INVALID_HEX_PATTERN . search ( query ) :
raise ValueError ( 'Invalid hex encoding in query string.' )
# We encode to utf - 8 prior to parsing because parse _ qsl behaves
# differently on unicode input in python 2 and 3.
# Python 2.7
# > > > urlparse . parse _ qsl ( u ' % E5%95 % A6 % E5%95 % A6 ' )
# u ' \ xe5 \ x95 \ xa6 \ xe5 \ x95 \ xa6'
# Python 2.7 , non unicode input gives the same
# > > > urlparse . parse _ qsl ( ' % E5%95 % A6 % E5%95 % A6 ' )
# ' \ xe5 \ x95 \ xa6 \ xe5 \ x95 \ xa6'
# but now we can decode it to unicode
# > > > urlparse . parse _ qsl ( ' % E5%95 % A6 % E5%95 % A6 ' ) . decode ( ' utf - 8 ' )
# u ' \ u5566 \ u5566'
# Python 3.3 however
# > > > urllib . parse . parse _ qsl ( u ' % E5%95 % A6 % E5%95 % A6 ' )
# u ' \ u5566 \ u5566'
query = query . encode ( 'utf-8' ) if not PY3 and isinstance ( query , unicode_type ) else query
# We want to allow queries such as " c2 " whereas urlparse . parse _ qsl
# with the strict _ parsing flag will not .
params = urlparse . parse_qsl ( query , keep_blank_values = True )
# unicode all the things
return decode_params_utf8 ( params )
|
def sensor_values ( self ) :
"""Returns the values of all sensors for this cluster"""
|
self . update_instance_sensors ( opt = "all" )
return { "light" : self . lux , "water" : self . soil_moisture , "humidity" : self . humidity , "temperature" : self . temp }
|
def cdc ( i ) : # pragma : no cover
"""Input : {
( repo _ uoa ) - repo UOA
module _ uoa - module UOA
data _ uoa - data UOA
or
cid
Output : {
Output of the ' load ' function"""
|
r = cd ( i )
if r [ 'return' ] > 0 :
return r
s = r . get ( 'string' , '' )
if s != '' :
rx = copy_to_clipboard ( { 'string' : s } )
if rx [ 'return' ] > 0 :
return rx
return r
|
def from_ssl ( self , ca_certs , client_cert , client_key , hosts = default . ELASTICSEARCH_HOSTS , use_ssl = True , verify_certs = True , ** kwargs ) :
"""Initialize a Elasticsearch client by SSL .
: param ca _ certs : optional path to CA bundle . See
https : / / urllib3 . readthedocs . io / en / latest / security . html # using - certifi - with - urllib3
: param client _ cert : path to the file containing the private key and the
certificate , or cert only if using client _ key
: param client _ key : path to the file containing the private key if using
separate cert and key files ( client _ cert will contain only the cert )
: param hosts : hostname of the node
: param use _ ssl : use ssl for the connection if ` True `
: param verify _ certs : whether to verify SSL certificates
: return : void"""
|
self . client = Elasticsearch ( hosts = hosts , use_ssl = use_ssl , verify_certs = verify_certs , ca_certs = ca_certs , client_cert = client_cert , client_key = client_key , ** kwargs )
logger . info ( 'Initialize SSL Elasticsearch Client: %s.' % self . client )
|
def wait_on_event ( event , timeout = None ) :
"""Waits on a single threading Event , with an optional timeout .
This is here for compatibility reasons as python 2 can ' t reliably wait
on an event without a timeout and python 3 doesn ' t define a ` maxint ` ."""
|
if timeout is not None :
event . wait ( timeout )
return
if six . PY2 : # Thanks to a bug in python 2 ' s threading lib , we can ' t simply call
# . wait ( ) with no timeout since it would wind up ignoring signals .
while not event . is_set ( ) :
event . wait ( sys . maxint )
else :
event . wait ( )
|
def _page ( q , chunk = 1000 ) :
"""Quick utility to page a query , 1000 items at a time .
We need this so we don ' t OOM ( out of memory ) ourselves loading the world ."""
|
offset = 0
while True :
r = False
for elem in q . limit ( chunk ) . offset ( offset ) :
r = True
yield elem
offset += chunk
if not r :
break
|
def signed_session ( self , session = None ) :
"""Create token - friendly Requests session , using auto - refresh .
Used internally when a request is made .
If a session object is provided , configure it directly . Otherwise ,
create a new session and return it .
: param session : The session to configure for authentication
: type session : requests . Session"""
|
self . set_token ( )
# Adal does the caching .
self . _parse_token ( )
return super ( AADMixin , self ) . signed_session ( session )
|
def from_filename ( cls , path_string , origin = MISSING , ** kwargs ) :
"""Read Sass source from a String specifying the path"""
|
path = Path ( path_string )
return cls . from_path ( path , origin , ** kwargs )
|
def summarize ( urls ) :
"""Calls extract for each of the URLs ,
Returns the list of Extracted instances as summaries ,
the result of the process , and the speed ."""
|
import time
from summary import Summary
fails = 0
err = lambda e : e . __class__ . __name__
summaries = [ ]
start = time . time ( )
for url in urls :
try :
print "-> %s" % url
summary = Summary ( url )
summary . extract ( )
except KeyboardInterrupt :
break
except Exception , e :
fails += 1
summary = { 'titles' : [ "[%s]" % err ( e ) ] , 'urls' : [ url ] , 'descriptions' : [ str ( e ) ] , 'source' : url , }
print "[%s] (%s): %s" % ( err ( e ) , e , url )
summaries . append ( summary )
end = time . time ( )
result = fails and "Fails: %s out of %s." % ( fails , len ( summaries ) ) or "Success: %s." % len ( summaries )
print result
duration = end - start
speed = "%.2f" % ( duration / len ( summaries ) )
return summaries , result , speed
|
def _debug ( self , out , print_prefix = True ) :
"""Print out to stderr , if debugging is enabled ."""
|
if self . debug :
if print_prefix :
pre = self . __class__ . __name__
if hasattr ( self , 'debug_prefix' ) :
pre = getattr ( self , 'debug_prefix' )
sys . stderr . write ( "%s: " % pre )
sys . stderr . write ( out )
|
def _get_prog_memory ( resources , cores_per_job ) :
"""Get expected memory usage , in Gb per core , for a program from resource specification ."""
|
out = None
for jvm_opt in resources . get ( "jvm_opts" , [ ] ) :
if jvm_opt . startswith ( "-Xmx" ) :
out = _str_memory_to_gb ( jvm_opt [ 4 : ] )
memory = resources . get ( "memory" )
if memory :
out = _str_memory_to_gb ( memory )
prog_cores = resources . get ( "cores" )
# if a single core with memory is requested for the job
# and we run multiple cores , scale down to avoid overscheduling
if out and prog_cores and int ( prog_cores ) == 1 and cores_per_job > int ( prog_cores ) :
out = out / float ( cores_per_job )
return out
|
def lnlike ( self , X ) :
"""Use a softened version of the interpolant as a likelihood ."""
|
return - 3.5 * np . log ( self . _interpolant ( X [ 0 ] , X [ 1 ] , grid = False ) )
|
def stdlib ( self ) :
"""A boolean flag . ` ` True ` ` if frame is in stdlib .
: type : bool"""
|
if self . module == 'pkg_resources' or self . module . startswith ( 'pkg_resources.' ) :
return False
elif self . filename . startswith ( SITE_PACKAGES_PATHS ) : # if it ' s in site - packages then its definitely not stdlib
return False
elif self . filename . startswith ( SYS_PREFIX_PATHS ) :
return True
else :
return False
|
def verify_true ( self , expr , msg = None ) :
"""Soft assert for whether the condition is true
: params expr : the statement to evaluate
: params msg : ( Optional ) msg explaining the difference"""
|
try :
self . assert_true ( expr , msg )
except AssertionError , e :
if msg :
m = "%s:\n%s" % ( msg , str ( e ) )
else :
m = str ( e )
self . verification_erorrs . append ( m )
|
def send_feedback ( self , reason , type = None , author_ip = None , author_id = None , author_open_id = None , content_id = None , captcha_id = None , source = None ) :
"""Sends feedback to Mollom in the case of false negative or false positives .
Keyword arguments :
reason - - Feedback to give . Can be : " approve " , " spam " , " unwanted " .
" approve " - - Report a false positive ( legitimate content that was incorrectly classified as spam ) .
" spam " - - Report a false negative ( spam that was incorrectly classified as ham ) .
" unwanted " - - Report content that isn ' t spam , but still unwanted on the site ( e . g . offensive , profane , etc )
type - - A string denoting the type of feedback submitted : flag for end users flagging content to submit feedback ; moderate for administrative moderation . Can be " flag " or " moderate " . Defaults to " moderate " .
author _ ip - - The IP address of the content author .
author _ id - - The local user ID on the client site of the content author .
author _ open _ id - - Open IDs of the content author , separated by whitespace .
content _ id - - Existing content ID .
captcha _ id - - Existing CAPTCHA ID .
source - - A single word string identifier for the user interface source . This is tracked along with the feedback to provide a more complete picture of how feedback is used and submitted on the site ."""
|
send_feedback_endpoint = Template ( "${rest_root}/feedback" )
url = send_feedback_endpoint . substitute ( rest_root = self . _rest_root )
data = { "contentId" : content_id , "reason" : reason }
if type :
data [ "type" ] = type
if author_ip :
data [ "authorIp" ] = author_ip
if author_id :
data [ "authorId" ] = author_id
if author_open_id :
data [ "authorOpenId" ] = author_open_id
if content_id :
data [ "contentId" ] = content_id
if captcha_id :
data [ "captchaId" ] = captcha_id
if source :
data [ "source" ] = source
self . __post_request ( url , data )
|
def add_edge_bearings ( G ) :
"""Calculate the compass bearing from origin node to destination node for each
edge in the directed graph then add each bearing as a new edge attribute .
Parameters
G : networkx multidigraph
Returns
G : networkx multidigraph"""
|
for u , v , data in G . edges ( keys = False , data = True ) :
if u == v : # a self - loop has an undefined compass bearing
data [ 'bearing' ] = np . nan
else : # calculate bearing from edge ' s origin to its destination
origin_point = ( G . nodes [ u ] [ 'y' ] , G . nodes [ u ] [ 'x' ] )
destination_point = ( G . nodes [ v ] [ 'y' ] , G . nodes [ v ] [ 'x' ] )
bearing = get_bearing ( origin_point , destination_point )
# round to thousandth of a degree
data [ 'bearing' ] = round ( bearing , 3 )
return G
|
def production_url ( path , original ) :
"""For a production environment ( DEBUG = False ) , replaces original path
created by Django ' s { % static % } template tag with relevant path from
our mapping ."""
|
mapping = _get_mapping ( )
if mapping :
if path in mapping :
return original . replace ( path , mapping [ path ] )
return original
else :
return dev_url ( original )
|
def _bandpass ( self , fc_low = 5 , fc_high = 20 ) :
"""Apply a bandpass filter onto the signal , and save the filtered
signal ."""
|
self . fc_low = fc_low
self . fc_high = fc_high
b , a = signal . butter ( 2 , [ float ( fc_low ) * 2 / self . fs , float ( fc_high ) * 2 / self . fs ] , 'pass' )
self . sig_f = signal . filtfilt ( b , a , self . sig [ self . sampfrom : self . sampto ] , axis = 0 )
# Save the passband gain ( x2 due to double filtering )
self . filter_gain = get_filter_gain ( b , a , np . mean ( [ fc_low , fc_high ] ) , self . fs ) * 2
|
def annToRLE ( self , ann ) :
"""Convert annotation which can be polygons , uncompressed RLE to RLE .
: return : binary mask ( numpy 2D array )"""
|
t = self . imgs [ ann [ 'image_id' ] ]
h , w = t [ 'height' ] , t [ 'width' ]
segm = ann [ 'segmentation' ]
if type ( segm ) == list : # polygon - - a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils . frPyObjects ( segm , h , w )
rle = maskUtils . merge ( rles )
elif type ( segm [ 'counts' ] ) == list : # uncompressed RLE
rle = maskUtils . frPyObjects ( segm , h , w )
else : # rle
rle = ann [ 'segmentation' ]
return rle
|
def cleanup ( self ) :
"""Clean up before quitting"""
|
self . pre_exit_trigger = True
self . logger . info ( "Shutting down %s, please wait a moment." , self . name )
for t in threading . enumerate ( ) :
if isinstance ( t , TimerClass ) :
t . cancel ( )
self . logger . debug ( 'Timers cancelled' )
for i in self . objects :
i . cleanup ( )
self . logger . debug ( 'Sensors etc cleanups done' )
for ser in ( i for i in self . services if isinstance ( i , AbstractUserService ) ) :
ser . cleanup_system ( )
self . logger . debug ( 'User services cleaned up' )
if self . worker_thread . is_alive ( ) :
self . worker_thread . stop ( )
self . logger . debug ( 'Worker thread really stopped' )
for ser in ( i for i in self . services if isinstance ( i , AbstractSystemService ) ) :
ser . cleanup_system ( )
self . logger . debug ( 'System services cleaned up' )
threads = list ( t . name for t in threading . enumerate ( ) if t . is_alive ( ) and not t . daemon )
if threads :
self . logger . info ( 'After cleanup, we have still the following threads ' 'running: %s' , ', ' . join ( threads ) )
|
def get_parent_path ( index = 2 ) : # type : ( int ) - > str
"""Get the caller ' s parent path to sys . path
If the caller is a CLI through stdin , the parent of the current working
directory is used"""
|
try :
path = _caller_path ( index )
except RuntimeError :
path = os . getcwd ( )
path = os . path . abspath ( os . path . join ( path , os . pardir ) )
return path
|
def format_parameters ( self , params ) : # type : ( Dict [ str , str ] ) - > None
"""Format parameters into a valid query string .
It ' s assumed all parameters have already been quoted as
valid URL strings .
: param dict params : A dictionary of parameters ."""
|
query = urlparse ( self . url ) . query
if query :
self . url = self . url . partition ( '?' ) [ 0 ]
existing_params = { p [ 0 ] : p [ - 1 ] for p in [ p . partition ( '=' ) for p in query . split ( '&' ) ] }
params . update ( existing_params )
query_params = [ "{}={}" . format ( k , v ) for k , v in params . items ( ) ]
query = '?' + '&' . join ( query_params )
self . url = self . url + query
|
def loadPng ( varNumVol , tplPngSize , strPathPng ) :
"""Load PNG files .
Parameters
varNumVol : float
Number of volumes , i . e . number of time points in all runs .
tplPngSize : tuple
Shape of the stimulus image ( i . e . png ) .
strPathPng : str
Path to the folder cointaining the png files .
Returns
aryPngData : 2d numpy array , shape [ png _ x , png _ y , n _ vols ]
Stack of stimulus data ."""
|
print ( '------Load PNGs' )
# Create list of png files to load :
lstPngPaths = [ None ] * varNumVol
for idx01 in range ( 0 , varNumVol ) :
lstPngPaths [ idx01 ] = ( strPathPng + str ( idx01 ) + '.png' )
# Load png files . The png data will be saved in a numpy array of the
# following order : aryPngData [ x - pixel , y - pixel , PngNumber ] . The
# sp . misc . imread function actually contains three values per pixel ( RGB ) ,
# but since the stimuli are black - and - white , any one of these is sufficient
# and we discard the others .
aryPngData = np . zeros ( ( tplPngSize [ 0 ] , tplPngSize [ 1 ] , varNumVol ) )
for idx01 in range ( 0 , varNumVol ) :
aryPngData [ : , : , idx01 ] = np . array ( Image . open ( lstPngPaths [ idx01 ] ) )
# Convert RGB values ( 0 to 255 ) to integer ones and zeros :
aryPngData = ( aryPngData > 0 ) . astype ( int )
return aryPngData
|
def _pyproj_inv ( self , other , ellipse = 'WGS84' ) :
'''Perform Pyproj ' s inv operation on two LatLon objects
Returns the initial heading and reverse heading in degrees , and the distance
in km .'''
|
lat1 , lon1 = self . lat . decimal_degree , self . lon . decimal_degree
lat2 , lon2 = other . lat . decimal_degree , other . lon . decimal_degree
g = pyproj . Geod ( ellps = ellipse )
heading_initial , heading_reverse , distance = g . inv ( lon1 , lat1 , lon2 , lat2 , radians = False )
distance = distance / 1000.0
if heading_initial == 0.0 : # Reverse heading not well handled for coordinates that are directly south
heading_reverse = 180.0
return { 'heading_initial' : heading_initial , 'heading_reverse' : heading_reverse , 'distance' : distance }
|
def generate_random_missense_variants ( num_variants = 10 , max_search = 100000 , reference = "GRCh37" ) :
"""Generate a random collection of missense variants by trying random variants repeatedly ."""
|
variants = [ ]
for i in range ( max_search ) :
bases = [ "A" , "C" , "T" , "G" ]
random_ref = choice ( bases )
bases . remove ( random_ref )
random_alt = choice ( bases )
random_contig = choice ( [ "1" , "2" , "3" , "4" , "5" ] )
random_variant = Variant ( contig = random_contig , start = randint ( 1 , 1000000 ) , ref = random_ref , alt = random_alt , ensembl = reference )
try :
effects = random_variant . effects ( )
for effect in effects :
if isinstance ( effect , Substitution ) :
variants . append ( random_variant )
break
except :
continue
if len ( variants ) == num_variants :
break
return VariantCollection ( variants )
|
def _annotation_stmt ( self , stmt : Statement , sctx : SchemaContext ) -> None :
"""Handle annotation statement ."""
|
if not sctx . schema_data . if_features ( stmt , sctx . text_mid ) :
return
dst = stmt . find1 ( "description" )
self . annotations [ ( stmt . argument , sctx . default_ns ) ] = Annotation ( DataType . _resolve_type ( stmt . find1 ( "type" , required = True ) , sctx ) , dst . argument if dst else None )
|
def exec ( self , command_str , ** command_env ) :
"""Execute the given command ( command will be split into tokens , every space that is a part of a token
must be quoted )
: param command _ str : command to execute
: param command _ env : command environment
: return : WCommandResultProto"""
|
env = self . __vars . copy ( )
env . update ( command_env )
command_tokens = WCommandProto . split_command ( command_str )
command_obj = self . commands ( ) . select ( * command_tokens , ** env )
if command_obj is None :
raise WCommandSet . NoCommandFound ( 'No suitable command found: "%s"' % command_str )
result = command_obj . exec ( * command_tokens , ** env )
self . __track_vars ( result )
return result
|
def _read_object ( self , correlation_id , parameters ) :
"""Reads configuration file , parameterizes its content and converts it into JSON object .
: param correlation _ id : ( optional ) transaction id to trace execution through call chain .
: param parameters : values to parameters the configuration .
: return : a JSON object with configuration ."""
|
path = self . get_path ( )
if path == None :
raise ConfigException ( correlation_id , "NO_PATH" , "Missing config file path" )
if not os . path . isfile ( path ) :
raise FileException ( correlation_id , 'FILE_NOT_FOUND' , 'Config file was not found at ' + path )
try :
with open ( path , 'r' ) as file :
config = file . read ( )
config = self . _parameterize ( config , parameters )
return yaml . load ( config )
except Exception as ex :
raise FileException ( correlation_id , "READ_FAILED" , "Failed reading configuration " + path + ": " + str ( ex ) ) . with_details ( "path" , path ) . with_cause ( ex )
|
def get_sample_data ( sample_file ) :
"""Read and returns sample data to fill form with default sample sequence ."""
|
sequence_sample_in_fasta = None
with open ( sample_file ) as handle :
sequence_sample_in_fasta = handle . read ( )
return sequence_sample_in_fasta
|
def median_filter ( data , size = 3 , cval = 0 , res_g = None , sub_blocks = None ) :
"""median filter of given size
Parameters
data : 2 or 3 dimensional ndarray or OCLArray of type float32
input data
size : scalar , tuple
the size of the patch to consider
cval : scalar ,
the constant value for out of border access ( cf mode = " constant " )
res _ g : OCLArray
store result in buffer if given
sub _ blocks :
perform over subblock tiling ( only if data is ndarray )
Returns
filtered image or None ( if OCLArray )"""
|
if data . ndim == 2 :
_filt = make_filter ( _median_filter_gpu_2d ( ) )
elif data . ndim == 3 :
_filt = make_filter ( _median_filter_gpu_3d ( ) )
else :
raise ValueError ( "currently only 2 or 3 dimensional data is supported" )
return _filt ( data = data , size = size , cval = cval , res_g = res_g , sub_blocks = sub_blocks )
|
def get ( self , instance_name ) :
"""Get an ObjectRocket instance by name .
: param str instance _ name : The name of the instance to retrieve .
: returns : A subclass of : py : class : ` bases . BaseInstance ` , or None if instance does not exist .
: rtype : : py : class : ` bases . BaseInstance `"""
|
url = self . _url + instance_name + '/'
response = requests . get ( url , ** self . _default_request_kwargs )
data = self . _get_response_data ( response )
return self . _concrete_instance ( data )
|
def setup_logger ( log_level , log_file = None ) :
"""setup root logger with ColoredFormatter ."""
|
level = getattr ( logging , log_level . upper ( ) , None )
if not level :
color_print ( "Invalid log level: %s" % log_level , "RED" )
sys . exit ( 1 )
# hide traceback when log level is INFO / WARNING / ERROR / CRITICAL
if level >= logging . INFO :
sys . tracebacklimit = 0
formatter = ColoredFormatter ( u"%(log_color)s%(bg_white)s%(levelname)-8s%(reset)s %(message)s" , datefmt = None , reset = True , log_colors = log_colors_config )
if log_file :
handler = logging . FileHandler ( log_file , encoding = "utf-8" )
else :
handler = logging . StreamHandler ( )
handler . setFormatter ( formatter )
logger . addHandler ( handler )
logger . setLevel ( level )
|
def cancelled ( self ) :
"""Return if the gesture ended normally , or if it was cancelled .
For gesture events that are not of type
: attr : ` ~ libinput . constant . EventType . GESTURE _ SWIPE _ END ` or
: attr : ` ~ libinput . constant . EventType . GESTURE _ PINCH _ END ` , this property
raises : exc : ` AttributeError ` .
Returns :
bool : : obj : ` True ` indicating that the gesture was cancelled .
Raises :
AttributeError"""
|
if self . type not in { EventType . GESTURE_SWIPE_END , EventType . GESTURE_PINCH_END } :
raise AttributeError ( _wrong_prop . format ( self . type ) )
return self . _libinput . libinput_event_gesture_get_cancelled ( self . _handle )
|
def find_column ( self , token ) :
"""Compute column :
- token is a token instance"""
|
i = token . lexpos
while i > 0 :
if self . input_data [ i - 1 ] == '\n' :
break
i -= 1
column = token . lexpos - i + 1
return column
|
def is_string ( val ) :
"""Determines whether the passed value is a string , safe for 2/3."""
|
try :
basestring
except NameError :
return isinstance ( val , str )
return isinstance ( val , basestring )
|
def version_keyword ( dist , attr , value ) :
"""Implements the actual version setup ( ) keyword ."""
|
if value == "PBR" :
from pbr . util import setup_cfg_to_setup_kwargs
path = "setup.cfg"
parser = ConfigParser ( )
if not os . path . exists ( path ) :
raise ValueError ( "file '%s' does not exist" % os . path . abspath ( path ) )
parser . read ( path )
config = { }
for section in parser . sections ( ) :
config [ section ] = dict ( parser . items ( section ) )
attrs = setup_cfg_to_setup_kwargs ( config )
version = str ( Version ( attrs [ "name" ] ) )
os . environ [ "PBR_VERSION" ] = version
else :
version = str ( Version ( dist . metadata . get_name ( ) ) )
dist . metadata . version = version
|
def xcoord ( self ) :
"""The x coordinate : class : ` xarray . Variable `"""
|
return self . decoder . get_x ( self . data , coords = self . data . coords )
|
def _CheckAndCreateNewGroup ( self , group_name , group_class ) :
"""Checks if the last method ( a possible group ) is an instance of our
group _ class . Adds the current method to this group or creates a new one .
Args :
group _ name : the name of the group .
group _ class : the class used to create instance of this new group"""
|
group = self . GetPossibleGroup ( )
# If this is a group , and it is the correct group , add the method .
if isinstance ( group , group_class ) and group . group_name ( ) == group_name :
group . AddMethod ( self )
return self
# Create a new group and add the method .
new_group = group_class ( group_name )
new_group . AddMethod ( self )
self . _call_queue . append ( new_group )
return self
|
def update_cnt ( uid , post_data ) :
'''Update the content by ID .'''
|
entry = TabPostHist . update ( user_name = post_data [ 'user_name' ] , cnt_md = tornado . escape . xhtml_escape ( post_data [ 'cnt_md' ] ) , time_update = tools . timestamp ( ) , ) . where ( TabPostHist . uid == uid )
entry . execute ( )
|
def _set_socket_timeout ( cls , sock , timeout = None ) :
"""Temporarily set a socket timeout in order to respect a timeout provided to . iter _ chunks ( ) ."""
|
if timeout is not None :
prev_timeout = sock . gettimeout ( )
try :
if timeout is not None :
sock . settimeout ( timeout )
yield
except socket . timeout :
raise cls . ProcessStreamTimeout ( "socket read timed out with timeout {}" . format ( timeout ) )
finally :
if timeout is not None :
sock . settimeout ( prev_timeout )
|
def match_zipfile_members ( zipfile_path : str , pattern : Pattern ) :
"""Match files to a pattern within a zip file ' s content ."""
|
with ZipFile ( zipfile_path , mode = 'r' ) as zfile :
members = zfile . namelist ( )
yield from match_files ( members , pattern )
|
def toggle_grid ( self , event = None , show = None ) :
"toggle grid on top / bottom panels"
|
if show is None :
show = not self . panel . conf . show_grid
for p in ( self . panel , self . panel_bot ) :
p . conf . enable_grid ( show )
|
def convert_halo_to_array_form ( self , halo ) :
"""Converts the : samp : ` { halo } ` argument to a : samp : ` ( { self } . array _ shape . size , 2 ) `
shaped array .
: type halo : : samp : ` None ` , : obj : ` int ` , : samp : ` self . array _ shape . size ` length sequence
of : samp : ` int ` or : samp : ` ( self . array _ shape . size , 2 ) ` shaped array
of : samp : ` int `
: param halo : Halo to be converted to : samp : ` ( len ( self . array _ shape ) , 2 ) ` shaped array form .
: rtype : : obj : ` numpy . ndarray `
: return : A : samp : ` ( len ( self . array _ shape ) , 2 ) ` shaped array of : obj : ` numpy . int64 ` elements ."""
|
return convert_halo_to_array_form ( halo = halo , ndim = len ( self . array_shape ) )
|
def pdf ( self ) :
"""Returns the probability density function ( pdf ) .
Returns
function : The probability density function of the distribution .
Examples
> > > from pgmpy . factors . distributions import GaussianDistribution
> > > dist = GD ( variables = [ ' x1 ' , ' x2 ' , ' x3 ' ] ,
. . . mean = [ 1 , - 3 , 4 ] ,
. . . cov = [ [ 4 , 2 , - 2 ] ,
. . . [ 2 , 5 , - 5 ] ,
. . . [ - 2 , - 5 , 8 ] ] )
> > > dist . pdf
< function pgmpy . factors . distributions . GaussianDistribution . GaussianDistribution . pdf . < locals > . < lambda > >
> > > dist . pdf ( [ 0 , 0 , 0 ] )
0.0014805631279234139"""
|
return lambda * args : multivariate_normal . pdf ( args , self . mean . reshape ( 1 , len ( self . variables ) ) [ 0 ] , self . covariance )
|
def sum2diag ( A , D , out = None ) :
r"""Add values ` ` D ` ` to the diagonal of matrix ` ` A ` ` .
Args :
A ( array _ like ) : Left - hand side .
D ( array _ like or float ) : Values to add .
out ( : class : ` numpy . ndarray ` , optional ) : copy result to .
Returns :
: class : ` numpy . ndarray ` : Resulting matrix ."""
|
A = asarray ( A , float )
D = asarray ( D , float )
if out is None :
out = copy ( A )
else :
copyto ( out , A )
einsum ( "ii->i" , out ) [ : ] += D
return out
|
def equals ( self , controller ) :
"""Verify if the controller corresponds
to the current one ."""
|
if controller is None :
return False
return self . user == controller . user and self . enterprise == controller . enterprise and self . url == controller . url
|
def libvlc_media_save_meta ( p_md ) :
'''Save the meta previously set .
@ param p _ md : the media desriptor .
@ return : true if the write operation was successful .'''
|
f = _Cfunctions . get ( 'libvlc_media_save_meta' , None ) or _Cfunction ( 'libvlc_media_save_meta' , ( ( 1 , ) , ) , None , ctypes . c_int , Media )
return f ( p_md )
|
def today ( self , symbol ) :
"""GET / today / : symbol
curl " https : / / api . bitfinex . com / v1 / today / btcusd "
{ " low " : " 550.09 " , " high " : " 572.2398 " , " volume " : " 7305.33119836 " }"""
|
data = self . _get ( self . url_for ( PATH_TODAY , ( symbol ) ) )
# convert all values to floats
return self . _convert_to_floats ( data )
|
def remove_duplicates ( lst ) :
"""Emulate what a Python ` ` set ( ) ` ` does , but keeping the element ' s order ."""
|
dset = set ( )
return [ l for l in lst if l not in dset and not dset . add ( l ) ]
|
def writeNumber ( self , n ) :
"""Write number to the data stream .
@ param n : The number data to be encoded to the AMF0 data stream ."""
|
self . writeType ( TYPE_NUMBER )
self . stream . write_double ( float ( n ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.