signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def main ( ) :
"""Start the poor _ consumer ."""
|
try :
opts , args = getopt . getopt ( sys . argv [ 1 : ] , "h:v" , [ "help" , "nack=" , "servers=" , "queues=" ] )
except getopt . GetoptError as err :
print str ( err )
usage ( )
sys . exit ( )
# defaults
nack = 0.0
verbose = False
servers = "localhost:7712,localhost:7711"
queues = "test"
for o , a in opts :
if o == "-v" :
verbose = True
elif o in ( "-h" , "--help" ) :
usage ( )
sys . exit ( )
elif o in ( "--nack" ) :
nack = float ( a )
elif o in ( "--servers" ) :
servers = a
elif o in ( "--queues" ) :
queues = a
else :
assert False , "unhandled option"
# prepare servers and queus for pydisque
servers = servers . split ( "," )
queues = queues . split ( "," )
c = Client ( servers )
c . connect ( )
while True :
jobs = c . get_job ( queues )
for queue_name , job_id , job in jobs :
rnd = random . random ( )
# as this is a test processor , we don ' t do any validation on
# the actual job body , so lets just pay attention to id ' s
if rnd >= nack :
print ">>> received job:" , job_id
c . ack_job ( job_id )
else :
print ">>> bouncing job:" , job_id
c . nack_job ( job_id )
|
def eval_string ( stri ) :
'evaluate expressions passed as string'
|
tokens = shlex . split ( stri )
return run_write_read ( [ 'plash' , 'eval' ] , '\n' . join ( tokens ) . encode ( ) ) . decode ( )
|
async def get_offers ( connection : Connection ) -> dict :
"""Retrieves all pending credential offers for a given connection .
: param connection : A connection handle
: return : A list of dictionary objects representing offers from a given connection .
Example :
credential = await Credential . create _ with _ msgid ( source _ id , connection , msg _ id )
offers = await credential . get _ offers ( connection )"""
|
if not hasattr ( Credential . get_offers , "cb" ) :
Credential . get_offers . cb = create_cb ( CFUNCTYPE ( None , c_uint32 , c_uint32 , c_char_p ) )
c_connection_handle = c_uint32 ( connection . handle )
data = await do_call ( 'vcx_credential_get_offers' , c_connection_handle , Credential . get_offers . cb )
return json . loads ( data . decode ( ) )
|
def dirsplit ( path ) :
r"""Args :
path ( str ) :
Returns :
list : components of the path
CommandLine :
python - m utool . util _ path - - exec - dirsplit
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . util _ path import * # NOQA
> > > paths = [ ]
> > > paths . append ( ' E : / window file / foo ' )
> > > paths . append ( ' / normal / foo ' )
> > > paths . append ( ' ~ / relative / path ' )
> > > results = [ dirsplit ( path ) for path in paths ]
> > > import re
> > > results2 = [ re . split ( ' \ \ / ' , path ) for path in paths ]
> > > print ( results2)
> > > result = ut . repr2 ( results )
> > > print ( result )"""
|
# return path . split ( os . sep )
parts = [ ]
remain = path
part = True
# while True :
while part != '' and remain != '' :
remain , part = split ( remain )
parts . append ( part )
parts = [ p for p in parts if p != '' ]
if remain != '' :
parts . append ( remain )
parts = parts [ : : - 1 ]
return parts
|
def write_from_sid_df_pairs ( self , country_code , data , scaling_factors = None ) :
"""Parameters
country _ code : str
The ISO 3166 alpha - 2 country code for this country .
data : iterable [ tuple [ int , pandas . DataFrame ] ]
The data chunks to write . Each chunk should be a tuple of
sid and the data for that asset .
scaling _ factors : dict [ str , float ] , optional
A dict mapping each OHLCV field to a scaling factor , which
is applied ( as a multiplier ) to the values of field to
efficiently store them as uint32 , while maintaining desired
precision . These factors are written to the file as metadata ,
which is consumed by the reader to adjust back to the original
float values . Default is None , in which case
DEFAULT _ SCALING _ FACTORS is used ."""
|
data = list ( data )
if not data :
empty_frame = pd . DataFrame ( data = None , index = np . array ( [ ] , dtype = 'datetime64[ns]' ) , columns = np . array ( [ ] , dtype = 'int64' ) , )
return self . write ( country_code , { f : empty_frame . copy ( ) for f in FIELDS } , scaling_factors , )
sids , frames = zip ( * data )
ohlcv_frame = pd . concat ( frames )
# Repeat each sid for each row in its corresponding frame .
sid_ix = np . repeat ( sids , [ len ( f ) for f in frames ] )
# Add id to the index , so the frame is indexed by ( date , id ) .
ohlcv_frame . set_index ( sid_ix , append = True , inplace = True )
frames = { field : ohlcv_frame [ field ] . unstack ( ) for field in FIELDS }
return self . write ( country_code , frames , scaling_factors )
|
def scores_to_preds ( self , threshold , use_probs = True ) :
"""use _ probs : boolean , default True
if True , use probabilities for predictions , else use scores ."""
|
self . threshold = threshold
if use_probs :
if self . probs is None :
raise DataError ( "Probabilities are not available to make " "predictions." )
else :
word = "probabilities"
scores = self . probs
else :
if self . scores is None :
raise DataError ( "Scores are not available to make predictions." )
else :
word = "scores"
scores = self . scores
if threshold > np . max ( scores ) or threshold < np . min ( scores ) :
warnings . warn ( "Threshold {} is outside the range of the " "{}." . format ( self . threshold , word ) )
if self . preds is not None :
warnings . warn ( "Overwriting predictions" )
self . preds = ( scores >= threshold ) * 1
|
def encode_string ( self , value ) :
"""Convert ASCII , Latin - 1 or UTF - 8 to pure Unicode"""
|
if not isinstance ( value , str ) :
return value
try :
return unicode ( value , 'utf-8' )
except : # really , this should throw an exception .
# in the interest of not breaking current
# systems , however :
arr = [ ]
for ch in value :
arr . append ( unichr ( ord ( ch ) ) )
return u"" . join ( arr )
|
def _setauto ( self , s , length , offset ) :
"""Set bitstring from a bitstring , file , bool , integer , array , iterable or string ."""
|
# As s can be so many different things it ' s important to do the checks
# in the correct order , as some types are also other allowed types .
# So basestring must be checked before Iterable
# and bytes / bytearray before Iterable but after basestring !
if isinstance ( s , Bits ) :
if length is None :
length = s . len - offset
self . _setbytes_unsafe ( s . _datastore . rawbytes , length , s . _offset + offset )
return
if isinstance ( s , file ) :
if offset is None :
offset = 0
if length is None :
length = os . path . getsize ( s . name ) * 8 - offset
byteoffset , offset = divmod ( offset , 8 )
bytelength = ( length + byteoffset * 8 + offset + 7 ) // 8 - byteoffset
m = MmapByteArray ( s , bytelength , byteoffset )
if length + byteoffset * 8 + offset > m . filelength * 8 :
raise CreationError ( "File is not long enough for specified " "length and offset." )
self . _datastore = ConstByteStore ( m , length , offset )
return
if length is not None :
raise CreationError ( "The length keyword isn't applicable to this initialiser." )
if offset :
raise CreationError ( "The offset keyword isn't applicable to this initialiser." )
if isinstance ( s , basestring ) :
bs = self . _converttobitstring ( s )
assert bs . _offset == 0
self . _setbytes_unsafe ( bs . _datastore . rawbytes , bs . length , 0 )
return
if isinstance ( s , ( bytes , bytearray ) ) :
self . _setbytes_unsafe ( bytearray ( s ) , len ( s ) * 8 , 0 )
return
if isinstance ( s , array . array ) :
b = s . tostring ( )
self . _setbytes_unsafe ( bytearray ( b ) , len ( b ) * 8 , 0 )
return
if isinstance ( s , numbers . Integral ) : # Initialise with s zero bits .
if s < 0 :
msg = "Can't create bitstring of negative length {0}."
raise CreationError ( msg , s )
data = bytearray ( ( s + 7 ) // 8 )
self . _datastore = ByteStore ( data , s , 0 )
return
if isinstance ( s , collections . Iterable ) : # Evaluate each item as True or False and set bits to 1 or 0.
self . _setbin_unsafe ( '' . join ( str ( int ( bool ( x ) ) ) for x in s ) )
return
raise TypeError ( "Cannot initialise bitstring from {0}." . format ( type ( s ) ) )
|
def nvlist2 ( thelist , names = None ) :
'''Like nvlist but applied one more time to each returned value .
So , given a list , args , of arguments to a state like this : :
- name : echo test
- cwd : /
- require :
- file : test . sh
nvlist2 ( args , [ ' require ' ] ) would yield the tuple ,
( dict _ item , ' file ' , ' test . sh ' ) where dict _ item is the single - key
dictionary of { ' file ' : ' test . sh ' } .'''
|
for _ , _ , value in nvlist ( thelist , names ) :
for each in nvlist ( value ) :
yield each
|
def join ( self , other , attrlist = None , auto_create_indexes = True , ** kwargs ) :
"""Join the objects of one table with the objects of another , based on the given
matching attributes in the named arguments . The attrlist specifies the attributes to
be copied from the source tables - if omitted , all attributes will be copied . Entries
in the attrlist may be single attribute names , or if there are duplicate names in both
tables , then a C { ( table , attributename ) } tuple can be given to disambiguate which
attribute is desired . A C { ( table , attributename , alias ) } tuple can also be passed , to
rename an attribute from a source table .
This method may be called directly , or can be constructed using the L { join _ on } method and
the ' + ' operator . Using this syntax , the join is specified using C { table . join _ on ( " xyz " ) }
to create a JoinTerm containing both table and joining attribute . Multiple JoinTerm
or tables can be added to construct a compound join expression . When complete , the
join expression gets executed by calling the resulting join definition ,
using C { join _ expression ( [ attrlist ] ) } .
@ param other : other table to join to
@ param attrlist : list of attributes to be copied to the new joined table ; if
none provided , all attributes of both tables will be used ( taken from the first
object in each table )
@ type attrlist : string , or list of strings or C { ( table , attribute [ , alias ] ) } tuples
( list may contain both strings and tuples )
@ param kwargs : attributes to join on , given as additional named arguments
of the form C { table1attr = " table2attr " } , or a dict mapping attribute names .
@ returns : a new Table containing the joined data as new DataObjects"""
|
if not kwargs :
raise TypeError ( "must specify at least one join attribute as a named argument" )
thiscol , othercol = next ( iter ( kwargs . items ( ) ) )
retname = ( "(%s:%s^%s:%s)" % ( self . table_name , thiscol , other . table_name , othercol ) )
# make sure both tables contain records to join - if not , just return empty list
if not ( self . obs and other . obs ) :
return Table ( retname )
if isinstance ( attrlist , basestring ) :
attrlist = re . split ( r'[,\s]+' , attrlist )
# expand attrlist to full ( table , name , alias ) tuples
thisnames = set ( _object_attrnames ( self . obs [ 0 ] ) )
othernames = set ( _object_attrnames ( other . obs [ 0 ] ) )
fullcols = [ ]
if attrlist is not None :
for col in attrlist :
if isinstance ( col , tuple ) : # assume col contains at least ( table , colname ) , fill in alias if missing
# to be same as colname
fullcols . append ( ( col + ( col [ 1 ] , ) ) [ : 3 ] )
else :
if col in thisnames :
fullcols . append ( ( self , col , col ) )
elif col in othernames :
fullcols . append ( ( other , col , col ) )
else :
raise ValueError ( "join attribute not found: " + col )
else :
fullcols = [ ( self , n , n ) for n in thisnames ]
fullcols += [ ( other , n , n ) for n in othernames ]
thiscols = list ( filter ( lambda o : o [ 0 ] is self , fullcols ) )
othercols = list ( filter ( lambda o : o [ 0 ] is other , fullcols ) )
if auto_create_indexes :
if thiscol not in self . _indexes :
self . create_index ( thiscol )
if othercol not in other . _indexes :
other . create_index ( othercol )
if thiscol in self . _indexes :
thiscolindex = self . _indexes [ thiscol ]
else :
raise ValueError ( "indexed attribute required for join: " + thiscol )
if othercol in other . _indexes :
othercolindex = other . _indexes [ othercol ]
else :
raise ValueError ( "indexed attribute required for join: " + othercol )
# use table with fewer keys to drive join
if len ( thiscolindex ) < len ( othercolindex ) :
shortindex , longindex = ( thiscolindex , othercolindex )
swap = False
else :
shortindex , longindex = ( othercolindex , thiscolindex )
swap = True
# find matching rows
matchingrows = list ( ( longindex [ key ] , rows ) if swap else ( rows , longindex [ key ] ) for key , rows in shortindex . items ( ) )
joinrows = [ ]
for thisrows , otherrows in matchingrows :
for trow , orow in product ( thisrows , otherrows ) :
retobj = DataObject ( )
do_all ( setattr ( retobj , a , getattr ( trow , c ) ) for _ , c , a in thiscols )
do_all ( setattr ( retobj , a , getattr ( orow , c ) ) for _ , c , a in othercols if not hasattr ( retobj , a ) )
joinrows . append ( retobj )
ret = Table ( retname )
for tbl , collist in zip ( [ self , other ] , [ thiscols , othercols ] ) :
for _ , c , a in collist :
if c in tbl . _indexes :
if a not in ret . _indexes :
ret . create_index ( a )
# no unique indexes in join results
ret . insert_many ( joinrows )
return ret
|
def cylindrical_window ( self , radius , lat0 , long0 ) :
'''Cylindrical projection of a window centered
at ( lat0 , long0 ) with a given radius ( km ) .
Args :
radius ( float ) : Radius of the window ( km ) .
lat0 ( float ) : Latitude at the center ( degree ) .
long0 ( float ) : Longitude at the center ( degree ) .
Returns :
A tuple ` ` ( longll , longtr , latll , lattr ) ` ` with ` ` longll ` `
the longitude of the lower left corner , ` ` longtr ` ` the
longitude of the top right corner , ` ` latll ` ` the latitude
of the lower left corner and ` ` lattr ` ` the latitude of the
top right corner .
Note :
All return coordinates are in degree'''
|
# Passage en radian
radi = radius * 2 * np . pi / ( 2 * 1734.4 * np . pi )
lamb0 = long0 * np . pi / 180.0
phi0 = lat0 * np . pi / 180.0
# Long / lat min ( voir wikipedia )
longll = - radi / np . cos ( phi0 ) + lamb0
latll = np . arcsin ( ( - radi + np . sin ( phi0 ) / np . cos ( phi0 ) ) * np . cos ( phi0 ) )
if np . isnan ( latll ) :
latll = - 90 * np . pi / 180.0
# Long / lat max ( voir wikipedia )
longtr = radi / np . cos ( phi0 ) + lamb0
lattr = np . arcsin ( ( radi + np . tan ( phi0 ) ) * np . cos ( phi0 ) )
return longll * 180 / np . pi , longtr * 180 / np . pi , latll * 180 / np . pi , lattr * 180 / np . pi
|
def get ( self , objectType , * args , ** coolArgs ) :
"""Raba Magic inside . This is th function that you use for
querying pyGeno ' s DB .
Usage examples :
* myGenome . get ( " Gene " , name = ' TPST2 ' )
* myGene . get ( Protein , id = ' ENSID . . . ' )
* myGenome . get ( Transcript , { ' start > ' : x , ' end < ' : y } )"""
|
ret = [ ]
for e in self . _makeLoadQuery ( objectType , * args , ** coolArgs ) . iterRun ( ) :
if issubclass ( objectType , pyGenoRabaObjectWrapper ) :
ret . append ( objectType ( wrapped_object_and_bag = ( e , self . bagKey ) ) )
else :
ret . append ( e )
return ret
|
def attachPurrlog ( self , purrlog , watchdirs = [ ] ) :
"""Attaches Purr to the given purrlog directory . Arguments are passed to Purrer object as is ."""
|
# check purrer stack for a Purrer already watching this directory
dprint ( 1 , "attaching to purrlog" , purrlog )
for i , purrer in enumerate ( self . purrer_stack ) :
if os . path . samefile ( purrer . logdir , purrlog ) :
dprint ( 1 , "Purrer object found on stack (#%d),reusing\n" , i )
# found ? move to front of stack
self . purrer_stack . pop ( i )
self . purrer_stack . insert ( 0 , purrer )
# update purrer with watched directories , in case they have changed
for dd in ( watchdirs or [ ] ) :
purrer . addWatchedDirectory ( dd , watching = None )
break
# no purrer found , make a new one
else :
dprint ( 1 , "creating new Purrer object" )
try :
purrer = Purr . Purrer ( purrlog , watchdirs )
except Purr . Purrer . LockedError as err : # check that we could attach , display message if not
QMessageBox . warning ( self , "Catfight!" , """<P><NOBR>It appears that another PURR process (%s)</NOBR>
is already attached to <tt>%s</tt>, so we're not allowed to touch it. You should exit the other PURR
process first.</P>""" % ( err . args [ 0 ] , os . path . abspath ( purrlog ) ) , QMessageBox . Ok , 0 )
return False
except Purr . Purrer . LockFailError as err :
QMessageBox . warning ( self , "Failed to obtain lock" , """<P><NOBR>PURR was unable to obtain a lock</NOBR>
on directory <tt>%s</tt> (error was "%s"). The most likely cause is insufficient permissions.</P>""" % ( os . path . abspath ( purrlog ) , err . args [ 0 ] ) , QMessageBox . Ok , 0 )
return False
self . purrer_stack . insert ( 0 , purrer )
# discard end of stack
self . purrer_stack = self . purrer_stack [ : 3 ]
# attach signals
self . connect ( purrer , SIGNAL ( "disappearedFile" ) , self . new_entry_dialog . dropDataProducts )
self . connect ( purrer , SIGNAL ( "disappearedFile" ) , self . view_entry_dialog . dropDataProducts )
# have we changed the current purrer ? Update our state then
# reopen Purr pipes
self . purrpipes = { }
for dd , state in purrer . watchedDirectories ( ) :
self . purrpipes [ dd ] = Purr . Pipe . open ( dd )
if purrer is not self . purrer :
self . message ( "Attached to %s" % purrer . logdir , ms = 10000 )
dprint ( 1 , "current Purrer changed, updating state" )
# set window title
path = Kittens . utils . collapseuser ( os . path . join ( purrer . logdir , '' ) )
self . setWindowTitle ( "PURR - %s" % path )
# other init
self . purrer = purrer
self . new_entry_dialog . hide ( )
self . new_entry_dialog . reset ( )
dirs = [ path for path , state in purrer . watchedDirectories ( ) ]
self . new_entry_dialog . setDefaultDirs ( * dirs )
self . view_entry_dialog . setDefaultDirs ( * dirs )
self . view_entry_dialog . hide ( )
self . viewer_dialog . hide ( )
self . _viewing_ientry = None
self . _setEntries ( self . purrer . getLogEntries ( ) )
# print self . _ index _ paths
self . _viewer_timestamp = None
self . _updateViewer ( )
self . _updateNames ( )
# update directory widgets
self . wdirlist . clear ( )
for pathname , state in purrer . watchedDirectories ( ) :
self . wdirlist . add ( pathname , state )
# Reset _ pounce to false - - this will cause checkPounceStatus ( ) into a rescan
self . _pounce = False
self . _checkPounceStatus ( )
return True
|
def _search_url ( search_term : str , size : str = '>400*300' , format : str = 'jpg' ) -> str :
"Return a Google Images Search URL for a given search term ."
|
return ( 'https://www.google.com/search?q=' + quote ( search_term ) + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + _url_params ( size , format ) + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg' )
|
def load_package_template ( license , header = False ) :
"""Load license template distributed with package ."""
|
content = StringIO ( )
filename = 'template-%s-header.txt' if header else 'template-%s.txt'
with resource_stream ( __name__ , filename % license ) as licfile :
for line in licfile :
content . write ( line . decode ( "utf-8" ) )
# write utf - 8 string
return content
|
def get_template_for_path ( path , use_cache = True ) :
'''Convenience method that retrieves a template given a direct path to it .'''
|
dmp = apps . get_app_config ( 'django_mako_plus' )
app_path , template_name = os . path . split ( path )
return dmp . engine . get_template_loader_for_path ( app_path , use_cache = use_cache ) . get_template ( template_name )
|
def create_list_stories ( list_id_stories , number_of_stories , shuffle , max_threads ) :
"""Show in a formatted way the stories for each item of the list ."""
|
list_stories = [ ]
with ThreadPoolExecutor ( max_workers = max_threads ) as executor :
futures = { executor . submit ( get_story , new ) for new in list_id_stories [ : number_of_stories ] }
for future in tqdm ( as_completed ( futures ) , desc = 'Getting results' , unit = ' news' , ) :
list_stories . append ( future . result ( ) )
if shuffle :
random . shuffle ( list_stories )
return list_stories
|
def render ( msgpack_data , saltenv = 'base' , sls = '' , ** kws ) :
'''Accepts a message pack string or a file object , renders said data back to
a python dict .
. . note :
This renderer is NOT intended for use in creating sls files by hand ,
but exists to allow for data backends to serialize the highdata
structure in an easily transportable way . This is to allow for more
fluid fileserver backends that rely on pure data sources .
: rtype : A Python data structure'''
|
if not isinstance ( msgpack_data , six . string_types ) :
msgpack_data = msgpack_data . read ( )
if msgpack_data . startswith ( '#!' ) :
msgpack_data = msgpack_data [ ( msgpack_data . find ( '\n' ) + 1 ) : ]
if not msgpack_data . strip ( ) :
return { }
return salt . utils . msgpack . loads ( msgpack_data )
|
def fill_empty ( self , fixed_values , input ) :
"""Fill in random values for all empty - valued ItemData elements in an ODM document"""
|
odm_elements = etree . fromstring ( input )
for v in odm_elements . iter ( E_ODM . ITEM_DATA . value ) :
if v . get ( A_ODM . VALUE . value ) == "" :
oid = v . get ( A_ODM . ITEM_OID . value )
if fixed_values is not None and oid in fixed_values :
d = fixed_values [ oid ]
else :
d = self . scramble_itemdata ( v . get ( A_ODM . ITEM_OID . value ) , v . get ( A_ODM . VALUE . value ) )
v . set ( A_ODM . VALUE . value , d )
else : # Remove ItemData if it already has a value
v . getparent ( ) . remove ( v )
# Remove empty ItemGroupData elements
for v in odm_elements . iter ( E_ODM . ITEM_GROUP_DATA . value ) :
if len ( v ) == 0 :
v . getparent ( ) . remove ( v )
# Remove empty FormData elements
for v in odm_elements . iter ( E_ODM . FORM_DATA . value ) :
if len ( v ) == 0 :
v . getparent ( ) . remove ( v )
# Remove empty StudyEventData elements
for v in odm_elements . iter ( E_ODM . STUDY_EVENT_DATA . value ) :
if len ( v ) == 0 :
v . getparent ( ) . remove ( v )
return etree . tostring ( odm_elements )
|
def set_trig_start ( self , time , pass_to_command_line = True ) :
"""Set the trig start time of the analysis node by setting a
- - trig - start - time option to the node when it is executed .
@ param time : trig start time of job .
@ bool pass _ to _ command _ line : add trig - start - time as a variable option ."""
|
if pass_to_command_line :
self . add_var_opt ( 'trig-start-time' , time )
self . __trig_start = time
|
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) :
"""See : meth : ` superclass method
< . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > `
for spec of input and result values ."""
|
assert all ( stddev_type in self . DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types )
# GMPE differentiates strike - slip , reverse and normal ruptures ,
# but combines normal and strike - slip into one category . See page 180.
is_reverse = ( 45 <= rup . rake <= 135 )
stddevs = [ numpy . zeros_like ( sites . vs30 ) for _ in stddev_types ]
means = numpy . zeros_like ( sites . vs30 )
[ rocks_i ] = ( sites . vs30 > self . ROCK_VS30 ) . nonzero ( )
if len ( rocks_i ) :
rrup = dists . rrup . take ( rocks_i )
mean_rock = self . _get_mean_rock ( rup . mag , rup . rake , rrup , is_reverse , imt )
means . put ( rocks_i , mean_rock )
for stddev_arr in stddevs :
stddev_rock = self . _get_stddev_rock ( rup . mag , imt )
stddev_arr . put ( rocks_i , stddev_rock )
[ soils_i ] = ( sites . vs30 <= self . ROCK_VS30 ) . nonzero ( )
if len ( soils_i ) :
rrup = dists . rrup . take ( soils_i )
mean_soil = self . _get_mean_deep_soil ( rup . mag , rup . rake , rrup , is_reverse , imt )
means . put ( soils_i , mean_soil )
for stddev_arr in stddevs :
stddev_soil = self . _get_stddev_deep_soil ( rup . mag , imt )
stddev_arr . put ( soils_i , stddev_soil )
return means , stddevs
|
def default_run_conf ( self ) :
'''Default run configuration ( namedtuple )'''
|
default_run_conf = namedtuple ( 'default_run_conf' , field_names = self . _default_run_conf . keys ( ) )
return default_run_conf ( ** self . _default_run_conf )
|
def bind ( self , ** fields ) :
"""Return a new L { Message } with this message ' s contents plus the
additional given bindings ."""
|
contents = self . _contents . copy ( )
contents . update ( fields )
return Message ( contents , self . _serializer )
|
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) :
"""See : meth : ` superclass method
< . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > `
for spec of input and result values ."""
|
# extracting dictionary of coefficients specific to required
# intensity measure type .
C = self . COEFFS [ imt ]
mean = ( self . _get_magnitude_scaling_term ( C , rup . mag ) + self . _get_distance_scaling_term ( C , rup . mag , dists . rrup ) + self . _get_style_of_faulting_term ( C , rup . rake ) + self . _get_site_scaling_term ( C , sites . vs30 ) )
stddevs = self . _get_stddevs ( imt , rup . mag , len ( dists . rrup ) , stddev_types )
return mean , stddevs
|
def number_peaks ( self , x , n = None ) :
"""As in tsfresh ` number _ peaks < https : / / github . com / blue - yonder / tsfresh / blob / master / tsfresh / feature _ extraction / feature _ calculators . py # L1003 > ` _
Calculates the number of peaks of at least support n in the time series x . A peak of support n is defined as a subsequence of x where a value occurs , which is bigger than its n neighbours to the left and to the right .
Hence in the sequence
> > > x = [ 3 , 0 , 0 , 4 , 0 , 0 , 13]
4 is a peak of support 1 and 2 because in the subsequences
> > > [ 0 , 4 , 0]
> > > [ 0 , 0 , 4 , 0 , 0]
4 is still the highest value . Here , 4 is not a peak of support 3 because 13 is the 3th neighbour to the right of 4 and its bigger than 4.
: param x : the time series to calculate the feature of
: type x : pandas . Series
: param n : the support of the peak
: type n : int
: return : the value of this feature
: rtype : float"""
|
if n is None :
n = 5
peaks = feature_calculators . number_peaks ( x , n )
logging . debug ( "agg linear trend by tsfresh calculated" )
return peaks
|
def get_version ( ) :
"""Loads the current module version from version . py and returns
it .
: returns : module version identifier .
: rtype : str"""
|
local_results = { }
version_file_path = os . path . join ( 'pytextql' , 'version.py' )
# This is compatible with py3k which removed execfile .
with open ( version_file_path , 'rb' ) as fin : # Compiling instead of passing the text straight to exec
# associates any errors with the correct file name .
code = compile ( fin . read ( ) , version_file_path , 'exec' )
exec ( code , { } , local_results )
return local_results [ '__version__' ]
|
def get_template_loaders ( ) :
"""Compatibility method to fetch the template loaders .
Source : https : / / github . com / django - debug - toolbar / django - debug - toolbar / blob / ece1c2775af108a92a0ef59636266b49e286e916 / debug _ toolbar / compat . py"""
|
try :
from django . template . engine import Engine
except ImportError : # Django < 1.8
Engine = None
if Engine :
try :
engine = Engine . get_default ( )
except ImproperlyConfigured :
loaders = [ ]
else :
loaders = engine . template_loaders
else : # Django < 1.8
from django . template . loader import find_template_loader
loaders = [ find_template_loader ( loader_name ) for loader_name in settings . TEMPLATE_LOADERS ]
return loaders
|
def _key_period ( self , text ) :
"""Action for ' . '"""
|
self . insert_text ( text )
if self . codecompletion_auto : # Enable auto - completion only if last token isn ' t a float
last_obj = self . get_last_obj ( )
if last_obj and not last_obj . isdigit ( ) :
self . show_code_completion ( )
|
def list_clusters ( self ) :
"""List the clusters in this instance .
For example :
. . literalinclude : : snippets . py
: start - after : [ START bigtable _ list _ clusters _ on _ instance ]
: end - before : [ END bigtable _ list _ clusters _ on _ instance ]
: rtype : tuple
: returns :
( clusters , failed _ locations ) , where ' clusters ' is list of
: class : ` google . cloud . bigtable . instance . Cluster ` , and
' failed _ locations ' is a list of locations which could not
be resolved ."""
|
resp = self . _client . instance_admin_client . list_clusters ( self . name )
clusters = [ Cluster . from_pb ( cluster , self ) for cluster in resp . clusters ]
return clusters , resp . failed_locations
|
def service_start ( name ) :
'''Start a " service " on the ssh server
. . versionadded : : 2015.8.2'''
|
cmd = 'start ' + name
# Send the command to execute
out , err = DETAILS [ 'server' ] . sendline ( cmd )
# " scrape " the output and return the right fields as a dict
return parse ( out )
|
def scroll_down ( lines = 1 , file = sys . stdout ) :
"""Scroll the whole page down a number of lines , new lines are added to
the top .
Esc [ < lines > T"""
|
scroll . down ( lines ) . write ( file = file )
|
def bounds ( self ) :
"""The bounds of the random variable .
Set ` self . i = 0.95 ` to return the 95 % interval if this is used for setting
bounds on optimizers / etc . where infinite bounds may not be useful ."""
|
return [ scipy . stats . norm . interval ( self . i , loc = m , scale = s ) for s , m in zip ( self . sigma , self . mu ) ]
|
def aloha_to_etree ( html_source ) :
"""Converts HTML5 from Aloha editor output to a lxml etree ."""
|
xml = _tidy2xhtml5 ( html_source )
for i , transform in enumerate ( ALOHA2HTML_TRANSFORM_PIPELINE ) :
xml = transform ( xml )
return xml
|
def fingerprint_from_var ( var ) :
"""Extract a fingerprint from a GPG public key"""
|
vsn = gpg_version ( )
cmd = flatten ( [ gnupg_bin ( ) , gnupg_home ( ) ] )
if vsn [ 0 ] >= 2 and vsn [ 1 ] < 1 :
cmd . append ( "--with-fingerprint" )
output = polite_string ( stderr_with_input ( cmd , var ) ) . split ( '\n' )
if not output [ 0 ] . startswith ( 'pub' ) :
raise CryptoritoError ( 'probably an invalid gpg key' )
if vsn [ 0 ] >= 2 and vsn [ 1 ] < 1 :
return output [ 1 ] . split ( '=' ) [ 1 ] . replace ( ' ' , '' )
return output [ 1 ] . strip ( )
|
async def iter_lines ( self , chunk_size = 1024 ) :
"""Return an iterator to yield lines from the raw stream .
This is achieved by reading chunk of bytes ( of size chunk _ size ) at a
time from the raw stream , and then yielding lines from there ."""
|
pending = b''
async for chunk in self . iter_chunks ( chunk_size ) :
lines = ( pending + chunk ) . splitlines ( True )
for line in lines [ : - 1 ] :
await yield_ ( line . splitlines ( ) [ 0 ] )
pending = lines [ - 1 ]
if pending :
await yield_ ( pending . splitlines ( ) [ 0 ] )
|
def VerifyMessageSignature ( self , unused_response_comms , packed_message_list , cipher , cipher_verified , api_version , remote_public_key ) :
"""Verify the message list signature .
This is the way the messages are verified in the client .
In the client we also check that the nonce returned by the server is correct
( the timestamp doubles as a nonce ) . If the nonce fails we deem the response
unauthenticated since it might have resulted from a replay attack .
Args :
packed _ message _ list : The PackedMessageList rdfvalue from the server .
cipher : The cipher belonging to the remote end .
cipher _ verified : If True , the cipher ' s signature is not verified again .
api _ version : The api version we should use .
remote _ public _ key : The public key of the source .
Returns :
An rdf _ flows . GrrMessage . AuthorizationState .
Raises :
DecryptionError : if the message is corrupt ."""
|
# This is not used atm since we only support a single api version ( 3 ) .
_ = api_version
result = rdf_flows . GrrMessage . AuthorizationState . UNAUTHENTICATED
if cipher_verified or cipher . VerifyCipherSignature ( remote_public_key ) :
stats_collector_instance . Get ( ) . IncrementCounter ( "grr_authenticated_messages" )
result = rdf_flows . GrrMessage . AuthorizationState . AUTHENTICATED
# Check for replay attacks . We expect the server to return the same
# timestamp nonce we sent .
if packed_message_list . timestamp != self . timestamp : # pytype : disable = attribute - error
result = rdf_flows . GrrMessage . AuthorizationState . UNAUTHENTICATED
if not cipher . cipher_metadata : # Fake the metadata
cipher . cipher_metadata = rdf_flows . CipherMetadata ( source = packed_message_list . source )
return result
|
def reconstructImage ( self ) :
'''do inverse Fourier transform and return result'''
|
f_ishift = np . fft . ifftshift ( self . fshift )
return np . real ( np . fft . ifft2 ( f_ishift ) )
|
def check ( self , request , secret ) :
"""Verifies whether or not the request bears an authorization appropriate and valid for this version of the signature .
This verifies every element of the signature , including the timestamp ' s value .
Does not alter the request .
Keyword arguments :
request - - A request object which can be consumed by this API .
secret - - The base64 - encoded secret key for the HMAC authorization ."""
|
if request . get_header ( "Authorization" ) == "" :
return False
ah = self . parse_auth_headers ( request . get_header ( "Authorization" ) )
if "signature" not in ah :
return False
if request . get_header ( 'x-authorization-timestamp' ) == '' :
raise KeyError ( "X-Authorization-Timestamp is required." )
timestamp = int ( float ( request . get_header ( 'x-authorization-timestamp' ) ) )
if timestamp == 0 :
raise ValueError ( "X-Authorization-Timestamp must be a valid, non-zero timestamp." )
if self . preset_time is None :
curr_time = time . time ( )
else :
curr_time = self . preset_time
if timestamp > curr_time + 900 :
raise ValueError ( "X-Authorization-Timestamp is too far in the future." )
if timestamp < curr_time - 900 :
raise ValueError ( "X-Authorization-Timestamp is too far in the past." )
if request . body is not None and request . body != b'' :
content_hash = request . get_header ( "x-authorization-content-sha256" )
if content_hash == '' :
raise KeyError ( "X-Authorization-Content-SHA256 is required for requests with a request body." )
sha256 = hashlib . sha256 ( )
sha256 . update ( request . body )
if content_hash != base64 . b64encode ( sha256 . digest ( ) ) . decode ( 'utf-8' ) :
raise ValueError ( "X-Authorization-Content-SHA256 must match the SHA-256 hash of the request body." )
return ah [ "signature" ] == self . sign ( request , ah , secret )
|
def energy_error ( NAV_CONTROLLER_OUTPUT , VFR_HUD ) :
'''return energy error matching APM internals
This is positive when we are too low or going too slow'''
|
aspeed_energy_error = airspeed_energy_error ( NAV_CONTROLLER_OUTPUT , VFR_HUD )
alt_error = NAV_CONTROLLER_OUTPUT . alt_error * 100
energy_error = aspeed_energy_error + alt_error * 0.098
return energy_error
|
def _build_named_object_ids ( parameters ) :
"""Builds a list of NamedObjectId ."""
|
if isinstance ( parameters , str ) :
return [ _build_named_object_id ( parameters ) ]
return [ _build_named_object_id ( parameter ) for parameter in parameters ]
|
def profile ( request ) :
'''Get or set user profile .'''
|
serializer_class = registration_settings . PROFILE_SERIALIZER_CLASS
if request . method in [ 'POST' , 'PUT' , 'PATCH' ] :
partial = request . method == 'PATCH'
serializer = serializer_class ( instance = request . user , data = request . data , partial = partial , )
serializer . is_valid ( raise_exception = True )
serializer . save ( )
else : # request . method = = ' GET ' :
serializer = serializer_class ( instance = request . user )
return Response ( serializer . data )
|
def edit ( self , description = '' , files = { } ) :
"""Edit this gist .
: param str description : ( optional ) , description of the gist
: param dict files : ( optional ) , files that make up this gist ; the
key ( s ) should be the file name ( s ) and the values should be another
( optional ) dictionary with ( optional ) keys : ' content ' and
' filename ' where the former is the content of the file and the
latter is the new name of the file .
: returns : bool - - whether the edit was successful"""
|
data = { }
json = None
if description :
data [ 'description' ] = description
if files :
data [ 'files' ] = files
if data :
json = self . _json ( self . _patch ( self . _api , data = dumps ( data ) ) , 200 )
if json :
self . _update_ ( json )
return True
return False
|
def velocity_embedding ( data , basis = None , vkey = 'velocity' , scale = 10 , self_transitions = True , use_negative_cosines = True , direct_projection = None , pca_transform = None , retain_scale = False , autoscale = True , all_comps = True , T = None , copy = False ) :
"""Computes the single cell velocities in the embedding
Arguments
data : : class : ` ~ anndata . AnnData `
Annotated data matrix .
basis : ` str ` ( default : ` ' tsne ' ` )
Which embedding to use .
vkey : ` str ` ( default : ` ' velocity ' ` )
Name of velocity estimates to be used .
scale : ` int ` ( default : 10)
Scale parameter of gaussian kernel for transition matrix .
self _ transitions : ` bool ` ( default : ` True ` )
Whether to allow self transitions , based on the confidences of transitioning to neighboring cell .
use _ negative _ cosines : ` bool ` ( default : ` True ` )
Whether to use not only positive , but also negative cosines and use those transitions to the opposite way .
direct _ projection : ` bool ` ( default : ` True ` )
Whether to directly project the velocities into PCA space , thus skipping velocity graph .
pca _ transform : ` bool ` ( default : ` None ` )
same as direct _ projection ( deprecated )
retain _ scale : ` bool ` ( default : ` False ` )
Whether to retain scale from high dimensional space in embedding .
autoscale : ` bool ` ( default : ` True ` )
Whether to scale the embedded velocities by a scalar multiplier ,
which simply ensures that the arrows in the embedding are properly scaled .
all _ comps : ` bool ` ( default : ` True ` )
Whether to compute the velocities on all embedding components or just the first two .
T : ` csr _ matrix ` ( default : ` None ` )
Allows the user to directly pass a transition matrix .
Returns
Returns or updates ` adata ` with the attributes
velocity _ basis : ` . obsm `
coordinates of velocity projection on embedding"""
|
adata = data . copy ( ) if copy else data
if basis is None :
keys = [ key for key in [ 'pca' , 'tsne' , 'umap' ] if 'X_' + key in adata . obsm . keys ( ) ]
if len ( keys ) > 0 :
basis = keys [ - 1 ]
else :
raise ValueError ( 'No basis specified' )
if 'X_' + basis not in adata . obsm_keys ( ) :
raise ValueError ( 'You need compute the embedding first.' )
logg . info ( 'computing velocity embedding' , r = True )
if pca_transform is None and direct_projection is None :
pca_transform = True if 'pca' in basis else False
if 'pca' in basis and ( direct_projection or pca_transform ) :
V = adata . layers [ vkey ]
PCs = adata . varm [ 'PCs' ] if all_comps else adata . varm [ 'PCs' ] [ : , : 2 ]
if vkey + '_genes' in adata . var . keys ( ) :
V = V [ : , adata . var [ vkey + '_genes' ] ]
PCs = PCs [ adata . var [ vkey + '_genes' ] ]
nans = np . isnan ( V . sum ( 0 ) )
if np . any ( nans ) :
V = V [ : , ~ nans ]
PCs = PCs [ ~ nans ]
X_emb = adata . obsm [ 'X_' + basis ]
V_emb = ( V - V . mean ( 0 ) ) . dot ( PCs )
else :
X_emb = adata . obsm [ 'X_' + basis ] if all_comps else adata . obsm [ 'X_' + basis ] [ : , : 2 ]
V_emb = np . zeros ( X_emb . shape )
T = transition_matrix ( adata , vkey = vkey , scale = scale , self_transitions = self_transitions , use_negative_cosines = use_negative_cosines ) if T is None else T
T . setdiag ( 0 )
T . eliminate_zeros ( )
densify = adata . n_obs < 1e4
TA = T . A if densify else None
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" )
for i in range ( adata . n_obs ) :
indices = T [ i ] . indices
dX = X_emb [ indices ] - X_emb [ i , None ]
# shape ( n _ neighbors , 2)
if not retain_scale :
dX /= norm ( dX ) [ : , None ]
dX [ np . isnan ( dX ) ] = 0
# zero diff in a steady - state
probs = TA [ i , indices ] if densify else T [ i ] . data
V_emb [ i ] = probs . dot ( dX ) - probs . mean ( ) * dX . sum ( 0 )
# probs . sum ( ) / len ( indices )
if retain_scale :
delta = T . dot ( adata . X ) - adata . X
if issparse ( delta ) :
delta = delta . A
cos_proj = ( adata . layers [ vkey ] * delta ) . sum ( 1 ) / norm ( delta )
V_emb *= np . clip ( cos_proj [ : , None ] * 10 , 0 , 1 )
if autoscale :
V_emb /= ( 3 * quiver_autoscale ( X_emb , V_emb ) )
vkey += '_' + basis
adata . obsm [ vkey ] = V_emb
logg . info ( ' finished' , time = True , end = ' ' if settings . verbosity > 2 else '\n' )
logg . hint ( 'added\n' ' \'' + vkey + '\', embedded velocity vectors (adata.obsm)' )
return adata if copy else None
|
def to_alu_hlu_map ( input_str ) :
"""Converter for alu hlu map
Convert following input into a alu - > hlu map :
Sample input :
HLU Number ALU Number
0 12
1 23
ALU stands for array LUN number
hlu stands for host LUN number
: param input _ str : raw input from naviseccli
: return : alu - > hlu map"""
|
ret = { }
if input_str is not None :
pattern = re . compile ( r'(\d+)\s*(\d+)' )
for line in input_str . split ( '\n' ) :
line = line . strip ( )
if len ( line ) == 0 :
continue
matched = re . search ( pattern , line )
if matched is None or len ( matched . groups ( ) ) < 2 :
continue
else :
hlu = matched . group ( 1 )
alu = matched . group ( 2 )
ret [ int ( alu ) ] = int ( hlu )
return ret
|
def series_in_dir ( self ) :
"""input is dcmdir , not dirpath"""
|
# none _ count = 0
countsd = { }
# dcmdirseries = [ ]
for line in self . files_with_info :
if "SeriesNumber" in line :
sn = line [ 'SeriesNumber' ]
else :
sn = None
if sn in countsd :
countsd [ sn ] += 1
else :
countsd [ sn ] = 1
bins = list ( countsd )
counts = list ( countsd . values ( ) )
# try :
# dcmdirseries = [ line [ ' SeriesNumber ' ] for line in self . files _ with _ info ]
# except :
# return [ 0 ] , [ 0]
# bins , counts = np . unique ( dcmdirseries , return _ counts = True )
# binslist = bins . tolist ( )
# if None in binslist :
# if len ( binslist ) = = 1:
# return [ 0 ] , [ 0]
# else :
# logger . warning
# kvůli správným intervalům mezi biny je nutno jeden přidat na konce
# mxb = np . max ( bins )
# if mxb is None :
# mxb = 1
# else :
# mxb = mxb + 1
# binslist . append ( mxb )
# counts , binsvyhodit = np . histogram ( dcmdirseries , bins = binslist )
# return counts . tolist ( ) , bins . tolist ( )
return counts , bins
|
def load ( self , spec ) :
"""Find and return the template associated to a TemplateSpec instance .
Returns the template as a unicode string .
Arguments :
spec : a TemplateSpec instance ."""
|
if spec . template is not None :
return self . loader . unicode ( spec . template , spec . template_encoding )
path = self . _find ( spec )
return self . loader . read ( path , spec . template_encoding )
|
def zip2bytes ( compressed ) :
"""UNZIP DATA"""
|
if hasattr ( compressed , "read" ) :
return gzip . GzipFile ( fileobj = compressed , mode = 'r' )
buff = BytesIO ( compressed )
archive = gzip . GzipFile ( fileobj = buff , mode = 'r' )
from pyLibrary . env . big_data import safe_size
return safe_size ( archive )
|
def delete_poll ( args ) :
"""Deletes a poll ."""
|
if not args . isadmin :
return "Nope, not gonna do it."
if not args . msg :
return "Syntax: !poll delete <pollnum>"
if not args . msg . isdigit ( ) :
return "Not A Valid Positive Integer."
poll = args . session . query ( Polls ) . filter ( Polls . accepted == 1 , Polls . id == int ( args . msg ) ) . first ( )
if poll is None :
return "Poll does not exist."
if poll . active == 1 :
return "You can't delete an active poll!"
elif poll . deleted == 1 :
return "Poll already deleted."
poll . deleted = 1
return "Poll deleted."
|
def _pick_exit ( self , block_address , stmt_idx , target_ips ) :
"""Include an exit in the final slice .
: param block _ address : Address of the basic block .
: param stmt _ idx : ID of the exit statement .
: param target _ ips : The target address of this exit statement ."""
|
# TODO : Support context - sensitivity
tpl = ( stmt_idx , target_ips )
if tpl not in self . chosen_exits [ block_address ] :
self . chosen_exits [ block_address ] . append ( tpl )
|
def get_defaults ( path ) :
'''Reads file for configuration defaults .
Arguments :
- path ( str ) Absolute filepath ( usually ~ / . licenser )
Returns :
- ( dict ) Defaults for name , email , license , . txt extension'''
|
defaults = { }
if os . path . isfile ( path ) :
with open ( path ) as f :
for line in f :
line = line . strip ( )
if '=' not in line or line . startswith ( '#' ) :
continue
k , v = line . split ( '=' , 1 )
v = v . strip ( '"' ) . strip ( "'" )
defaults [ k ] = v
return defaults
else :
return { }
|
def maybe_download ( url , filename ) :
"""Download the data from Yann ' s website , unless it ' s already here ."""
|
if not os . path . exists ( WORK_DIRECTORY ) :
os . mkdir ( WORK_DIRECTORY )
filepath = os . path . join ( WORK_DIRECTORY , filename )
if not os . path . exists ( filepath ) :
filepath , _ = request . urlretrieve ( url + filename , filepath )
statinfo = os . stat ( filepath )
print ( 'Successfully downloaded' , filename , statinfo . st_size , 'bytes.' )
return filepath
|
def dumpBlock ( self , block_name ) :
"""This method is used at source server and gets the
information on a single block that is being migrated .
Try to return in a format to be ready for insert calls"""
|
if '%' in block_name or '*' in block_name :
msg = "No wildcard is allowed in block_name for dumpBlock API"
dbsExceptionHandler ( 'dbsException-invalid-input' , msg , self . logger . exception )
conn = self . dbi . connection ( )
try : # block name is unique
block1 = self . blocklist . execute ( conn , block_name = block_name )
block = [ ]
for b1 in block1 :
if not b1 :
return { }
else :
block = b1
# a block only has one dataset and one primary dataset
# in order to reduce the number of dao objects , we will not write
# a special migration one . However , we will have to remove the
# extras
# block1 is a generator . When it is empty , it will skip the for loop above . why ?
# we cannot test on b1 to decide if the generator is empty or not .
# so have to do below :
if not block :
return { }
dataset1 = self . datasetlist . execute ( conn , dataset = block [ "dataset" ] , dataset_access_type = "" )
dataset = [ ]
for d in dataset1 :
if d :
dataset = d
dconfig_list = self . outputCoflist . execute ( conn , dataset = dataset [ 'dataset' ] )
else :
return { }
# get block parentage
bparent = self . blockparentlist . execute ( conn , block [ 'block_name' ] )
# get dataset parentage
dsparent = self . dsparentlist . execute ( conn , dataset [ 'dataset' ] )
for p in dsparent :
del p [ 'parent_dataset_id' ]
if 'dataset' in p :
del p [ 'dataset' ]
elif 'this_dataset' in p :
del p [ 'this_dataset' ]
else :
pass
fparent_list = self . fplist . execute ( conn , block_id = block [ 'block_id' ] )
fparent_list2 = [ ]
for fp in fparent_list :
fparent_list2 . append ( fp )
# print " - - - YG file Parent List - - "
# print fparent _ list2
fconfig_list = self . outputCoflist . execute ( conn , block_id = block [ 'block_id' ] )
acqEra = { }
prsEra = { }
if dataset [ "acquisition_era_name" ] not in ( "" , None ) :
acqEra = self . aelist . execute ( conn , acquisitionEra = dataset [ "acquisition_era_name" ] ) [ 0 ]
if dataset [ "processing_version" ] not in ( "" , None ) :
prsEra = self . pelist . execute ( conn , processingV = dataset [ "processing_version" ] ) [ 0 ]
primds = self . primdslist . execute ( conn , primary_ds_name = dataset [ "primary_ds_name" ] ) [ 0 ]
del dataset [ "primary_ds_name" ] , dataset [ 'primary_ds_type' ]
files = self . filelist . execute ( conn , block_name = block_name )
for f in files : # There are a trade off between json sorting and db query .
# We keep lumi sec in a file , but the file parentage seperate
# from file
file_lumi_list = [ ]
for item in self . fllist . execute ( conn , logical_file_name = f [ 'logical_file_name' ] , migration = True ) :
file_lumi_list . append ( item )
# print " - - - YG file lumi list - - - "
f . update ( file_lumi_list = file_lumi_list )
del file_lumi_list
# YG 09/2015
del f [ 'branch_hash_id' ]
del dataset [ "acquisition_era_name" ] , dataset [ "processing_version" ]
del block [ "dataset" ]
result = dict ( block = block , dataset = dataset , primds = primds , files = files , block_parent_list = bparent , ds_parent_list = dsparent , file_conf_list = fconfig_list , file_parent_list = fparent_list2 , dataset_conf_list = dconfig_list )
if acqEra :
result [ "acquisition_era" ] = acqEra
if prsEra :
result [ "processing_era" ] = prsEra
return result
finally :
if conn :
conn . close ( )
|
def uninit ( self ) :
"""! @ brief Uninitialize the flash algo .
Before further operations are executed , the algo must be reinited . The target is left in
a state where algo does not have to be reloaded when init ( ) is called .
@ exception FlashFailure"""
|
if self . _active_operation is None :
return
if self . _is_api_valid ( 'pc_unInit' ) : # update core register to execute the uninit subroutine
result = self . _call_function_and_wait ( self . flash_algo [ 'pc_unInit' ] , r0 = self . _active_operation . value )
# check the return code
if result != 0 :
raise FlashFailure ( 'uninit error: %i' % result , result_code = result )
self . _active_operation = None
|
def from_wif_file ( path : str ) -> SigningKeyType :
"""Return SigningKey instance from Duniter WIF file
: param path : Path to WIF file"""
|
with open ( path , 'r' ) as fh :
wif_content = fh . read ( )
# check data field
regex = compile ( 'Data: ([1-9A-HJ-NP-Za-km-z]+)' , MULTILINE )
match = search ( regex , wif_content )
if not match :
raise Exception ( 'Error: Bad format WIF v1 file' )
# capture hexa wif key
wif_hex = match . groups ( ) [ 0 ]
return SigningKey . from_wif_hex ( wif_hex )
|
def put_blob ( storage_conn = None , ** kwargs ) :
'''. . versionadded : : 2015.8.0
Upload a blob'''
|
if not storage_conn :
storage_conn = get_storage_conn ( opts = kwargs )
if 'container' not in kwargs :
raise SaltSystemExit ( code = 42 , msg = 'The blob container name must be specified as "container"' )
if 'name' not in kwargs :
raise SaltSystemExit ( code = 42 , msg = 'The blob name must be specified as "name"' )
if 'blob_path' not in kwargs and 'blob_content' not in kwargs :
raise SaltSystemExit ( code = 42 , msg = 'Either a path to a file needs to be passed in as "blob_path" ' 'or the contents of a blob as "blob_content."' )
blob_kwargs = { 'container_name' : kwargs [ 'container' ] , 'blob_name' : kwargs [ 'name' ] , 'cache_control' : kwargs . get ( 'cache_control' , None ) , 'content_language' : kwargs . get ( 'content_language' , None ) , 'content_md5' : kwargs . get ( 'content_md5' , None ) , 'x_ms_blob_content_type' : kwargs . get ( 'blob_content_type' , None ) , 'x_ms_blob_content_encoding' : kwargs . get ( 'blob_content_encoding' , None ) , 'x_ms_blob_content_language' : kwargs . get ( 'blob_content_language' , None ) , 'x_ms_blob_content_md5' : kwargs . get ( 'blob_content_md5' , None ) , 'x_ms_blob_cache_control' : kwargs . get ( 'blob_cache_control' , None ) , 'x_ms_meta_name_values' : kwargs . get ( 'meta_name_values' , None ) , 'x_ms_lease_id' : kwargs . get ( 'lease_id' , None ) , }
if 'blob_path' in kwargs :
data = storage_conn . put_block_blob_from_path ( file_path = kwargs [ 'blob_path' ] , ** blob_kwargs )
elif 'blob_content' in kwargs :
data = storage_conn . put_block_blob_from_bytes ( blob = kwargs [ 'blob_content' ] , ** blob_kwargs )
return data
|
def create_dataset_synchronous ( self , file_url , dataset_type = 'image' , token = None , url = API_CREATE_DATASET ) :
"""Creates a dataset so you can train models from it
: param file _ url : string , url to an accessible zip file containing the necessary image files
and folder structure indicating the labels to train . See docs online .
: param dataset _ type : string , one of the dataset types , available options Nov 2017 were
' image ' , ' image - detection ' and ' image - multi - label ' .
returns : requests object"""
|
auth = 'Bearer ' + self . check_for_token ( token )
m = MultipartEncoder ( fields = { 'type' : dataset_type , 'path' : file_url } )
h = { 'Authorization' : auth , 'Cache-Control' : 'no-cache' , 'Content-Type' : m . content_type }
the_url = url
r = requests . post ( the_url , headers = h , data = m )
return r
|
def desc_for ( self , obj : Element , doing_descs : bool ) -> str :
"""Return a description for object if it is unique ( different than its parent )
@ param obj : object to be described
@ param doing _ descs : If false , always return an empty string
@ return : text or empty string"""
|
if obj . description and doing_descs :
if isinstance ( obj , SlotDefinition ) and obj . is_a :
parent = self . schema . slots [ obj . is_a ]
elif isinstance ( obj , ClassDefinition ) and obj . is_a :
parent = self . schema . classes [ obj . is_a ]
else :
parent = None
return '' if parent and obj . description == parent . description else obj . description
return ''
|
def params ( self ) :
""": return : A dictionary of SSOS query parameters .
: rtype : dict"""
|
params = dict ( format = RESPONSE_FORMAT , verbose = self . verbose , epoch1 = str ( self . search_start_date ) , epoch2 = str ( self . search_end_date ) , search = self . orbit_method , eunits = self . error_units , eellipse = self . error_ellipse , extres = self . resolve_extension , xyres = self . resolve_position , telinst = self . telescope_instrument )
if self . orbit_method == 'bynameHorizons' :
params [ 'object' ] = NEW_LINE . join ( ( str ( target_name ) for target_name in self . observations ) )
else :
params [ 'obs' ] = NEW_LINE . join ( ( str ( observation ) for observation in self . observations ) )
return params
|
def get_scheme ( self ) :
"""When Splunk starts , it looks for all the modular inputs defined by
its configuration , and tries to run them with the argument - - scheme .
Splunkd expects the modular inputs to print a description of the
input in XML on stdout . The modular input framework takes care of all
the details of formatting XML and printing it . The user need only
override get _ scheme and return a new Scheme object .
: return : scheme , a Scheme object"""
|
# Splunk will display " Github Repository Forks " to users for this input
scheme = Scheme ( "Github Repository Forks" )
scheme . description = "Streams events giving the number of forks of a GitHub repository."
# If you set external validation to True , without overriding validate _ input ,
# the script will accept anything as valid . Generally you only need external
# validation if there are relationships you must maintain among the
# parameters , such as requiring min to be less than max in this example ,
# or you need to check that some resource is reachable or valid .
# Otherwise , Splunk lets you specify a validation string for each argument
# and will run validation internally using that string .
scheme . use_external_validation = True
scheme . use_single_instance = True
owner_argument = Argument ( "owner" )
owner_argument . title = "Owner"
owner_argument . data_type = Argument . data_type_string
owner_argument . description = "Github user or organization that created the repository."
owner_argument . required_on_create = True
# If you are not using external validation , you would add something like :
# scheme . validation = " owner = = splunk "
scheme . add_argument ( owner_argument )
repo_name_argument = Argument ( "repo_name" )
repo_name_argument . title = "Repo Name"
repo_name_argument . data_type = Argument . data_type_string
repo_name_argument . description = "Name of the Github repository."
repo_name_argument . required_on_create = True
scheme . add_argument ( repo_name_argument )
return scheme
|
def createPerson ( self , nickname , vip = _NO_VIP ) :
"""Create a new L { Person } with the given name in this organizer .
@ type nickname : C { unicode }
@ param nickname : The value for the new person ' s C { name } attribute .
@ type vip : C { bool }
@ param vip : Value to set the created person ' s C { vip } attribute to
( deprecated ) .
@ rtype : L { Person }"""
|
for person in ( self . store . query ( Person , attributes . AND ( Person . name == nickname , Person . organizer == self ) ) ) :
raise ValueError ( "Person with name %r exists already." % ( nickname , ) )
person = Person ( store = self . store , created = extime . Time ( ) , organizer = self , name = nickname )
if vip is not self . _NO_VIP :
warn ( "Usage of Organizer.createPerson's 'vip' parameter" " is deprecated" , category = DeprecationWarning )
person . vip = vip
self . _callOnOrganizerPlugins ( 'personCreated' , person )
return person
|
def _load_modules_from_entry_points ( self , entry_point_group ) :
"""Load modules from the entry _ points ( slower ) .
Entry points can be used to add new commands to the CLI .
Usage :
entry _ points = { ' softlayer . cli ' : [ ' new - cmd = mymodule . new _ cmd . cli ' ] }"""
|
for obj in pkg_resources . iter_entry_points ( group = entry_point_group , name = None ) :
self . commands [ obj . name ] = obj
|
def cancel ( self , timeperiods , hosts , services ) :
"""Remove ref in scheduled downtime and raise downtime log entry ( cancel )
: param hosts : hosts objects to get item ref
: type hosts : alignak . objects . host . Hosts
: param services : services objects to get item ref
: type services : alignak . objects . service . Services
: return : [ ] , always
: rtype : list"""
|
if self . ref in hosts :
item = hosts [ self . ref ]
else :
item = services [ self . ref ]
broks = [ ]
self . is_in_effect = False
item . scheduled_downtime_depth -= 1
if item . scheduled_downtime_depth == 0 :
item . raise_cancel_downtime_log_entry ( )
item . in_scheduled_downtime = False
if self . ref in hosts :
broks . append ( self . get_expire_brok ( item . get_name ( ) ) )
else :
broks . append ( self . get_expire_brok ( item . host_name , item . get_name ( ) ) )
self . del_automatic_comment ( item )
self . can_be_deleted = True
item . in_scheduled_downtime_during_last_check = True
# Nagios does not notify on canceled downtimes
# res . extend ( self . ref . create _ notifications ( ' DOWNTIMECANCELLED ' ) )
# Also cancel other downtimes triggered by me
for downtime in self . activate_me :
broks . extend ( downtime . cancel ( timeperiods , hosts , services ) )
return broks
|
def lemmatize ( ambiguous_word : str , pos : str = None , neverstem = False , lemmatizer = wnl , stemmer = porter ) -> str :
"""Tries to convert a surface word into lemma , and if lemmatize word is not in
wordnet then try and convert surface word into its stem .
This is to handle the case where users input a surface word as an ambiguous
word and the surface word is a not a lemma ."""
|
# Try to be a little smarter and use most frequent POS .
pos = pos if pos else penn2morphy ( pos_tag ( [ ambiguous_word ] ) [ 0 ] [ 1 ] , default_to_noun = True )
lemma = lemmatizer . lemmatize ( ambiguous_word , pos = pos )
stem = stemmer . stem ( ambiguous_word )
# Ensure that ambiguous word is a lemma .
if not wn . synsets ( lemma ) :
if neverstem :
return ambiguous_word
if not wn . synsets ( stem ) :
return ambiguous_word
else :
return stem
else :
return lemma
|
def get_asset_mdata ( ) :
"""Return default mdata map for Asset"""
|
return { 'copyright_registration' : { 'element_label' : { 'text' : 'copyright registration' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter no more than 256 characters.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_string_values' : [ '' ] , 'syntax' : 'STRING' , 'minimum_string_length' : 0 , 'maximum_string_length' : 256 , 'string_set' : [ ] , } , 'copyright' : { 'element_label' : { 'text' : 'copyright' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter no more than 256 characters.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_string_values' : [ { 'text' : '' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } ] , 'syntax' : 'STRING' , 'minimum_string_length' : 0 , 'maximum_string_length' : 256 , 'string_set' : [ ] , } , 'title' : { 'element_label' : { 'text' : 'title' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter no more than 256 characters.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_string_values' : [ { 'text' : '' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } ] , 'syntax' : 'STRING' , 'minimum_string_length' : 0 , 'maximum_string_length' : 256 , 'string_set' : [ ] , } , 'distribute_verbatim' : { 'element_label' : { 'text' : 'distribute verbatim' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter either true or false.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_boolean_values' : [ None ] , 'syntax' : 'BOOLEAN' , } , 'created_date' : { 'element_label' : { 'text' : 'created date' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter a valid datetime object.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_date_time_values' : [ None ] , 'syntax' : 'DATETIME' , 'date_time_set' : [ ] , } , 'distribute_alterations' : { 'element_label' : { 'text' : 'distribute alterations' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter either true or false.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_boolean_values' : [ None ] , 'syntax' : 'BOOLEAN' , } , 'principal_credit_string' : { 'element_label' : { 'text' : 'principal credit string' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter no more than 256 characters.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_string_values' : [ { 'text' : '' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } ] , 'syntax' : 'STRING' , 'minimum_string_length' : 0 , 'maximum_string_length' : 256 , 'string_set' : [ ] , } , 'published_date' : { 'element_label' : { 'text' : 'published date' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter a valid datetime object.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_date_time_values' : [ None ] , 'syntax' : 'DATETIME' , 'date_time_set' : [ ] , } , 'source' : { 'element_label' : { 'text' : 'source' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'accepts an osid.id.Id object' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_id_values' : [ '' ] , 'syntax' : 'ID' , 'id_set' : [ ] , } , 'provider_links' : { 'element_label' : { 'text' : 'provider links' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'accepts an osid.id.Id[] object' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : True , 'default_id_values' : [ ] , 'syntax' : 'ID' , 'id_set' : [ ] , } , 'public_domain' : { 'element_label' : { 'text' : 'public domain' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter either true or false.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_boolean_values' : [ None ] , 'syntax' : 'BOOLEAN' , } , 'distribute_compositions' : { 'element_label' : { 'text' : 'distribute compositions' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter either true or false.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_boolean_values' : [ None ] , 'syntax' : 'BOOLEAN' , } , 'composition' : { 'element_label' : { 'text' : 'composition' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'accepts an osid.id.Id object' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_id_values' : [ '' ] , 'syntax' : 'ID' , 'id_set' : [ ] , } , 'published' : { 'element_label' : { 'text' : 'published' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'instructions' : { 'text' : 'enter either true or false.' , 'languageTypeId' : str ( DEFAULT_LANGUAGE_TYPE ) , 'scriptTypeId' : str ( DEFAULT_SCRIPT_TYPE ) , 'formatTypeId' : str ( DEFAULT_FORMAT_TYPE ) , } , 'required' : False , 'read_only' : False , 'linked' : False , 'array' : False , 'default_boolean_values' : [ None ] , 'syntax' : 'BOOLEAN' , } , }
|
def new_param ( name , type , value , start = None , scale = None , unit = None , dataunit = None , comment = None ) :
"""Construct a LIGO Light Weight XML Param document subtree . FIXME :
document keyword arguments ."""
|
elem = Param ( )
elem . Name = name
elem . Type = type
elem . pcdata = value
# FIXME : I have no idea how most of the attributes should be
# encoded , I don ' t even know what they ' re supposed to be .
if dataunit is not None :
elem . DataUnit = dataunit
if scale is not None :
elem . Scale = scale
if start is not None :
elem . Start = start
if unit is not None :
elem . Unit = unit
if comment is not None :
elem . appendChild ( ligolw . Comment ( ) )
elem . childNodes [ - 1 ] . pcdata = comment
return elem
|
def clear ( self , name ) :
"""Clears ( resets ) a counter specified by its name .
: param name : a counter name to clear ."""
|
self . _lock . acquire ( )
try :
del self . _cache [ name ]
finally :
self . _lock . release ( )
|
def scale_timeseries_unit ( tsunit , scaling = 'density' ) :
"""Scale the unit of a ` TimeSeries ` to match that of a ` FrequencySeries `
Parameters
tsunit : ` ~ astropy . units . UnitBase `
input unit from ` TimeSeries `
scaling : ` str `
type of frequency series , either ' density ' for a PSD , or
' spectrum ' for a power spectrum .
Returns
unit : ` ~ astropy . units . Unit `
unit to be applied to the resulting ` FrequencySeries ` ."""
|
# set units
if scaling == 'density' :
baseunit = units . Hertz
elif scaling == 'spectrum' :
baseunit = units . dimensionless_unscaled
else :
raise ValueError ( "Unknown scaling: %r" % scaling )
if tsunit :
specunit = tsunit ** 2 / baseunit
else :
specunit = baseunit ** - 1
return specunit
|
def iter_valid_fields ( meta ) :
"""walk through the available valid fields . ."""
|
# fetch field configuration and always add the id _ field as exclude
meta_fields = getattr ( meta , 'fields' , ( ) )
meta_exclude = getattr ( meta , 'exclude' , ( ) )
meta_exclude += ( meta . document . _meta . get ( 'id_field' ) , )
# walk through meta _ fields or through the document fields to keep
# meta _ fields order in the form
if meta_fields :
for field_name in meta_fields :
field = meta . document . _fields . get ( field_name )
if field :
yield ( field_name , field )
else :
for field_name , field in meta . document . _fields . iteritems ( ) : # skip excluded fields
if field_name not in meta_exclude :
yield ( field_name , field )
|
def H_donor_count ( mol ) :
"""Hydrogen bond donor count"""
|
mol . require ( "Valence" )
return sum ( 1 for _ , a in mol . atoms_iter ( ) if a . H_donor )
|
def create_ec2_role ( self , role , bound_ami_id = None , bound_account_id = None , bound_iam_role_arn = None , bound_iam_instance_profile_arn = None , bound_ec2_instance_id = None , bound_region = None , bound_vpc_id = None , bound_subnet_id = None , role_tag = None , ttl = None , max_ttl = None , period = None , policies = None , allow_instance_migration = False , disallow_reauthentication = False , resolve_aws_unique_ids = None , mount_point = 'aws-ec2' ) :
"""POST / auth / < mount _ point > / role / < role >
: param role :
: type role :
: param bound _ ami _ id :
: type bound _ ami _ id :
: param bound _ account _ id :
: type bound _ account _ id :
: param bound _ iam _ role _ arn :
: type bound _ iam _ role _ arn :
: param bound _ iam _ instance _ profile _ arn :
: type bound _ iam _ instance _ profile _ arn :
: param bound _ ec2 _ instance _ id :
: type bound _ ec2 _ instance _ id :
: param bound _ region :
: type bound _ region :
: param bound _ vpc _ id :
: type bound _ vpc _ id :
: param bound _ subnet _ id :
: type bound _ subnet _ id :
: param role _ tag :
: type role _ tag :
: param ttl :
: type ttl :
: param max _ ttl :
: type max _ ttl :
: param period :
: type period :
: param policies :
: type policies :
: param allow _ instance _ migration :
: type allow _ instance _ migration :
: param disallow _ reauthentication :
: type disallow _ reauthentication :
: param resolve _ aws _ unique _ ids :
: type resolve _ aws _ unique _ ids :
: param mount _ point :
: type mount _ point :
: return :
: rtype :"""
|
params = { 'role' : role , 'auth_type' : 'ec2' , 'disallow_reauthentication' : disallow_reauthentication , 'allow_instance_migration' : allow_instance_migration }
if bound_ami_id is not None :
params [ 'bound_ami_id' ] = bound_ami_id
if bound_account_id is not None :
params [ 'bound_account_id' ] = bound_account_id
if bound_iam_role_arn is not None :
params [ 'bound_iam_role_arn' ] = bound_iam_role_arn
if bound_ec2_instance_id is not None :
params [ 'bound_iam_instance_profile_arn' ] = bound_ec2_instance_id
if bound_iam_instance_profile_arn is not None :
params [ 'bound_iam_instance_profile_arn' ] = bound_iam_instance_profile_arn
if bound_region is not None :
params [ 'bound_region' ] = bound_region
if bound_vpc_id is not None :
params [ 'bound_vpc_id' ] = bound_vpc_id
if bound_subnet_id is not None :
params [ 'bound_subnet_id' ] = bound_subnet_id
if role_tag is not None :
params [ 'role_tag' ] = role_tag
if ttl is not None :
params [ 'ttl' ] = ttl
else :
params [ 'ttl' ] = 0
if max_ttl is not None :
params [ 'max_ttl' ] = max_ttl
else :
params [ 'max_ttl' ] = 0
if period is not None :
params [ 'period' ] = period
else :
params [ 'period' ] = 0
if policies is not None :
params [ 'policies' ] = policies
if resolve_aws_unique_ids is not None :
params [ 'resolve_aws_unique_ids' ] = resolve_aws_unique_ids
return self . _adapter . post ( '/v1/auth/{0}/role/{1}' . format ( mount_point , role ) , json = params )
|
def _default_node_visitor ( self , node ) :
"""Generates a dictionary representation of the given : class : ` CTENode `
` node ` , which consists of the node itself under the key ` ` node ` ` , as
well as structural information under the keys ` ` depth ` ` , ` ` path ` ` ,
` ` ordering ` ` , ` ` leaf ` ` , and ` ` branch ` ` .
: param node : the : class : ` CTENode ` for which to generate the
representation .
: return : a dictionary representation of the structure of the node ."""
|
return { "depth" : getattr ( node , node . _cte_node_depth ) , "path" : [ str ( c ) for c in getattr ( node , node . _cte_node_path ) ] , "ordering" : getattr ( node , node . _cte_node_ordering ) , "leaf" : node . is_leaf ( ) , "branch" : node . is_branch ( ) , "node" : node , }
|
def create_int ( help_string = NO_HELP , default = NO_DEFAULT ) : # type : ( str , Union [ int , NO _ DEFAULT _ TYPE ] ) - > int
"""Create an int parameter
: param help _ string :
: param default :
: return :"""
|
# noinspection PyTypeChecker
return ParamFunctions ( help_string = help_string , default = default , type_name = "int" , function_s2t = convert_string_to_int , function_t2s = convert_int_to_string , function_s2t_generate_from_default = convert_string_to_int_default , )
|
def _make_scaled_srcmap ( self ) :
"""Make an exposure cube with the same binning as the counts map ."""
|
self . logger . info ( 'Computing scaled source map.' )
bexp0 = fits . open ( self . files [ 'bexpmap_roi' ] )
bexp1 = fits . open ( self . config [ 'gtlike' ] [ 'bexpmap' ] )
srcmap = fits . open ( self . config [ 'gtlike' ] [ 'srcmap' ] )
if bexp0 [ 0 ] . data . shape != bexp1 [ 0 ] . data . shape :
raise Exception ( 'Wrong shape for input exposure map file.' )
bexp_ratio = bexp0 [ 0 ] . data / bexp1 [ 0 ] . data
self . logger . info ( 'Min/Med/Max exposure correction: %f %f %f' % ( np . min ( bexp_ratio ) , np . median ( bexp_ratio ) , np . max ( bexp_ratio ) ) )
for hdu in srcmap [ 1 : ] :
if hdu . name == 'GTI' :
continue
if hdu . name == 'EBOUNDS' :
continue
hdu . data *= bexp_ratio
srcmap . writeto ( self . files [ 'srcmap' ] , overwrite = True )
|
def aux_dict ( self ) :
"""Get dictionary representation of auxiliary states arrays .
Returns
aux _ dict : dict of str to NDArray
The dictionary that maps name of auxiliary states to NDArrays .
Raises
ValueError : if there are duplicated names in the auxiliary states ."""
|
if self . _aux_dict is None :
self . _aux_dict = Executor . _get_dict ( self . _symbol . list_auxiliary_states ( ) , self . aux_arrays )
return self . _aux_dict
|
def template_global ( self , name : Optional [ str ] = None ) -> Callable :
"""Add a template global .
This is designed to be used as a decorator . An example usage ,
. . code - block : : python
@ app . template _ global ( ' name ' )
def five ( ) :
return 5
Arguments :
name : The global name ( defaults to function name ) ."""
|
def decorator ( func : Callable ) -> Callable :
self . add_template_global ( func , name = name )
return func
return decorator
|
def send_invoice ( self , chat_id , title , description , payload , provider_token , start_parameter , currency , prices , provider_data = None , photo_url = None , photo_size = None , photo_width = None , photo_height = None , need_name = None , need_phone_number = None , need_email = None , need_shipping_address = None , send_phone_number_to_provider = None , send_email_to_provider = None , is_flexible = None , disable_notification = None , reply_to_message_id = None , reply_markup = None ) :
"""Use this method to send invoices . On success , the sent Message is returned .
https : / / core . telegram . org / bots / api # sendinvoice
Parameters :
: param chat _ id : Unique identifier for the target private chat
: type chat _ id : int
: param title : Product name , 1-32 characters
: type title : str | unicode
: param description : Product description , 1-255 characters
: type description : str | unicode
: param payload : Bot - defined invoice payload , 1-128 bytes . This will not be displayed to the user , use for your internal processes .
: type payload : str | unicode
: param provider _ token : Payments provider token , obtained via Botfather
: type provider _ token : str | unicode
: param start _ parameter : Unique deep - linking parameter that can be used to generate this invoice when used as a start parameter
: type start _ parameter : str | unicode
: param currency : Three - letter ISO 4217 currency code , see more on currencies
: type currency : str | unicode
: param prices : Price breakdown , a list of components ( e . g . product price , tax , discount , delivery cost , delivery tax , bonus , etc . )
: type prices : list of pytgbot . api _ types . sendable . payments . LabeledPrice
Optional keyword parameters :
: param provider _ data : JSON - encoded data about the invoice , which will be shared with the payment provider . A detailed description of required fields should be provided by the payment provider .
: type provider _ data : str | unicode
: param photo _ url : URL of the product photo for the invoice . Can be a photo of the goods or a marketing image for a service . People like it better when they see what they are paying for .
: type photo _ url : str | unicode
: param photo _ size : Photo size
: type photo _ size : int
: param photo _ width : Photo width
: type photo _ width : int
: param photo _ height : Photo height
: type photo _ height : int
: param need _ name : Pass True , if you require the user ' s full name to complete the order
: type need _ name : bool
: param need _ phone _ number : Pass True , if you require the user ' s phone number to complete the order
: type need _ phone _ number : bool
: param need _ email : Pass True , if you require the user ' s email address to complete the order
: type need _ email : bool
: param need _ shipping _ address : Pass True , if you require the user ' s shipping address to complete the order
: type need _ shipping _ address : bool
: param send _ phone _ number _ to _ provider : Pass True , if user ' s phone number should be sent to provider
: type send _ phone _ number _ to _ provider : bool
: param send _ email _ to _ provider : Pass True , if user ' s email address should be sent to provider
: type send _ email _ to _ provider : bool
: param is _ flexible : Pass True , if the final price depends on the shipping method
: type is _ flexible : bool
: param disable _ notification : Sends the message silently . Users will receive a notification with no sound .
: type disable _ notification : bool
: param reply _ to _ message _ id : If the message is a reply , ID of the original message
: type reply _ to _ message _ id : int
: param reply _ markup : A JSON - serialized object for an inline keyboard . If empty , one ' Pay total price ' button will be shown . If not empty , the first button must be a Pay button .
: type reply _ markup : pytgbot . api _ types . sendable . reply _ markup . InlineKeyboardMarkup
Returns :
: return : On success , the sent Message is returned
: rtype : pytgbot . api _ types . receivable . updates . Message"""
|
from pytgbot . api_types . sendable . payments import LabeledPrice
from pytgbot . api_types . sendable . reply_markup import InlineKeyboardMarkup
assert_type_or_raise ( chat_id , int , parameter_name = "chat_id" )
assert_type_or_raise ( title , unicode_type , parameter_name = "title" )
assert_type_or_raise ( description , unicode_type , parameter_name = "description" )
assert_type_or_raise ( payload , unicode_type , parameter_name = "payload" )
assert_type_or_raise ( provider_token , unicode_type , parameter_name = "provider_token" )
assert_type_or_raise ( start_parameter , unicode_type , parameter_name = "start_parameter" )
assert_type_or_raise ( currency , unicode_type , parameter_name = "currency" )
assert_type_or_raise ( prices , list , parameter_name = "prices" )
assert_type_or_raise ( provider_data , None , unicode_type , parameter_name = "provider_data" )
assert_type_or_raise ( photo_url , None , unicode_type , parameter_name = "photo_url" )
assert_type_or_raise ( photo_size , None , int , parameter_name = "photo_size" )
assert_type_or_raise ( photo_width , None , int , parameter_name = "photo_width" )
assert_type_or_raise ( photo_height , None , int , parameter_name = "photo_height" )
assert_type_or_raise ( need_name , None , bool , parameter_name = "need_name" )
assert_type_or_raise ( need_phone_number , None , bool , parameter_name = "need_phone_number" )
assert_type_or_raise ( need_email , None , bool , parameter_name = "need_email" )
assert_type_or_raise ( need_shipping_address , None , bool , parameter_name = "need_shipping_address" )
assert_type_or_raise ( send_phone_number_to_provider , None , bool , parameter_name = "send_phone_number_to_provider" )
assert_type_or_raise ( send_email_to_provider , None , bool , parameter_name = "send_email_to_provider" )
assert_type_or_raise ( is_flexible , None , bool , parameter_name = "is_flexible" )
assert_type_or_raise ( disable_notification , None , bool , parameter_name = "disable_notification" )
assert_type_or_raise ( reply_to_message_id , None , int , parameter_name = "reply_to_message_id" )
assert_type_or_raise ( reply_markup , None , InlineKeyboardMarkup , parameter_name = "reply_markup" )
result = self . do ( "sendInvoice" , chat_id = chat_id , title = title , description = description , payload = payload , provider_token = provider_token , start_parameter = start_parameter , currency = currency , prices = prices , provider_data = provider_data , photo_url = photo_url , photo_size = photo_size , photo_width = photo_width , photo_height = photo_height , need_name = need_name , need_phone_number = need_phone_number , need_email = need_email , need_shipping_address = need_shipping_address , send_phone_number_to_provider = send_phone_number_to_provider , send_email_to_provider = send_email_to_provider , is_flexible = is_flexible , disable_notification = disable_notification , reply_to_message_id = reply_to_message_id , reply_markup = reply_markup )
if self . return_python_objects :
logger . debug ( "Trying to parse {data}" . format ( data = repr ( result ) ) )
from pytgbot . api_types . receivable . updates import Message
try :
return Message . from_array ( result )
except TgApiParseException :
logger . debug ( "Failed parsing as api_type Message" , exc_info = True )
# end try
# no valid parsing so far
raise TgApiParseException ( "Could not parse result." )
# See debug log for details !
# end if return _ python _ objects
return result
|
def store_layout ( self , name , * args ) :
"""Stores given layout .
: param name : Layout name .
: type name : unicode
: param \ * args : Arguments .
: type \ * args : \ *
: return : Method success .
: rtype : bool"""
|
layout = self . __layouts . get ( name )
if not layout :
raise umbra . exceptions . LayoutExistError ( "{0} | '{1}' layout isn't registered!" . format ( self . __class__ . __name__ , name ) )
LOGGER . debug ( "> Storing layout '{0}'." . format ( name ) )
self . __current_layout = name
self . __settings . set_key ( "Layouts" , "{0}_geometry" . format ( name ) , self . __container . saveGeometry ( ) )
self . __settings . set_key ( "Layouts" , "{0}_window_state" . format ( name ) , self . __container . saveState ( ) )
self . __settings . set_key ( "Layouts" , "{0}_central_widget" . format ( name ) , self . __container . centralWidget ( ) . isVisible ( ) )
self . layout_stored . emit ( self . __current_layout )
return True
|
def load ( self , filename , set_current = True , add_where = 'end' ) :
"""Load filename , create an editor instance and return it
* Warning * This is loading file , creating editor but not executing
the source code analysis - - the analysis must be done by the editor
plugin ( in case multiple editorstack instances are handled )"""
|
filename = osp . abspath ( to_text_string ( filename ) )
self . starting_long_process . emit ( _ ( "Loading %s..." ) % filename )
text , enc = encoding . read ( filename )
finfo = self . create_new_editor ( filename , enc , text , set_current , add_where = add_where )
index = self . data . index ( finfo )
self . _refresh_outlineexplorer ( index , update = True )
self . ending_long_process . emit ( "" )
if self . isVisible ( ) and self . checkeolchars_enabled and sourcecode . has_mixed_eol_chars ( text ) :
name = osp . basename ( filename )
self . msgbox = QMessageBox ( QMessageBox . Warning , self . title , _ ( "<b>%s</b> contains mixed end-of-line " "characters.<br>Spyder will fix this " "automatically." ) % name , QMessageBox . Ok , self )
self . msgbox . exec_ ( )
self . set_os_eol_chars ( index )
self . is_analysis_done = False
return finfo
|
def create_role ( self , name , bound_service_account_names , bound_service_account_namespaces , ttl = "" , max_ttl = "" , period = "" , policies = None , mount_point = DEFAULT_MOUNT_POINT ) :
"""Create a role in the method .
Registers a role in the auth method . Role types have specific entities that can perform login operations
against this endpoint . Constraints specific to the role type must be set on the role . These are applied to
the authenticated entities attempting to login .
Supported methods :
POST : / auth / { mount _ point } / role / { name } . Produces : 204 ( empty body )
: param name : Name of the role .
: type name : str | unicode
: param bound _ service _ account _ names : List of service account names able to access this role . If set to " * "
all names are allowed , both this and bound _ service _ account _ namespaces can not be " * " .
: type bound _ service _ account _ names : list | str | unicode
: param bound _ service _ account _ namespaces : List of namespaces allowed to access this role . If set to " * " all
namespaces are allowed , both this and bound _ service _ account _ names can not be set to " * " .
: type bound _ service _ account _ namespaces : list | str | unicode
: param ttl : The TTL period of tokens issued using this role in seconds .
: type ttl : str | unicode
: param max _ ttl : The maximum allowed lifetime of tokens issued in seconds using this role .
: type max _ ttl : str | unicode
: param period : If set , indicates that the token generated using this role should never expire . The token should
be renewed within the duration specified by this value . At each renewal , the token ' s TTL will be set to the
value of this parameter .
: type period : str | unicode
: param policies : Policies to be set on tokens issued using this role .
: type policies : list | str | unicode
: param mount _ point : The " path " the azure auth method was mounted on .
: type mount _ point : str | unicode
: return : The response of the request .
: rtype : requests . Response"""
|
list_of_strings_params = { 'bound_service_account_names' : bound_service_account_names , 'bound_service_account_namespaces' : bound_service_account_namespaces , 'policies' : policies }
for param_name , param_argument in list_of_strings_params . items ( ) :
validate_list_of_strings_param ( param_name = param_name , param_argument = param_argument , )
if bound_service_account_names in ( "*" , [ "*" ] ) and bound_service_account_namespaces in ( "*" , [ "*" ] ) :
error_msg = 'unsupported combination of `bind_service_account_names` and ' '`bound_service_account_namespaces` arguments. Both of them can not be set to `*`'
raise exceptions . ParamValidationError ( error_msg )
params = { 'bound_service_account_names' : comma_delimited_to_list ( bound_service_account_names ) , 'bound_service_account_namespaces' : comma_delimited_to_list ( bound_service_account_namespaces ) , 'ttl' : ttl , 'max_ttl' : max_ttl , 'period' : period , 'policies' : comma_delimited_to_list ( policies ) , }
api_path = '/v1/auth/{mount_point}/role/{name}' . format ( mount_point = mount_point , name = name )
return self . _adapter . post ( url = api_path , json = params , )
|
def apply_grad_zmat_tensor ( grad_C , construction_table , cart_dist ) :
"""Apply the gradient for transformation to Zmatrix space onto cart _ dist .
Args :
grad _ C ( : class : ` numpy . ndarray ` ) : A ` ` ( 3 , n , n , 3 ) ` ` array .
The mathematical details of the index layout is explained in
: meth : ` ~ chemcoord . Cartesian . get _ grad _ zmat ( ) ` .
construction _ table ( pandas . DataFrame ) : Explained in
: meth : ` ~ chemcoord . Cartesian . get _ construction _ table ( ) ` .
cart _ dist ( : class : ` ~ chemcoord . Cartesian ` ) :
Distortions in cartesian space .
Returns :
: class : ` Zmat ` : Distortions in Zmatrix space ."""
|
if ( construction_table . index != cart_dist . index ) . any ( ) :
message = "construction_table and cart_dist must use the same index"
raise ValueError ( message )
X_dist = cart_dist . loc [ : , [ 'x' , 'y' , 'z' ] ] . values . T
C_dist = np . tensordot ( grad_C , X_dist , axes = ( [ 3 , 2 ] , [ 0 , 1 ] ) ) . T
if C_dist . dtype == np . dtype ( 'i8' ) :
C_dist = C_dist . astype ( 'f8' )
try :
C_dist [ : , [ 1 , 2 ] ] = np . rad2deg ( C_dist [ : , [ 1 , 2 ] ] )
except AttributeError :
C_dist [ : , [ 1 , 2 ] ] = sympy . deg ( C_dist [ : , [ 1 , 2 ] ] )
from chemcoord . internal_coordinates . zmat_class_main import Zmat
cols = [ 'atom' , 'b' , 'bond' , 'a' , 'angle' , 'd' , 'dihedral' ]
dtypes = [ 'O' , 'i8' , 'f8' , 'i8' , 'f8' , 'i8' , 'f8' ]
new = pd . DataFrame ( data = np . zeros ( ( len ( construction_table ) , 7 ) ) , index = cart_dist . index , columns = cols , dtype = 'f8' )
new = new . astype ( dict ( zip ( cols , dtypes ) ) )
new . loc [ : , [ 'b' , 'a' , 'd' ] ] = construction_table
new . loc [ : , 'atom' ] = cart_dist . loc [ : , 'atom' ]
new . loc [ : , [ 'bond' , 'angle' , 'dihedral' ] ] = C_dist
return Zmat ( new , _metadata = { 'last_valid_cartesian' : cart_dist } )
|
def clean_all_trash_pages_from_all_spaces ( confluence ) :
"""Main function for retrieve space keys and provide space for cleaner
: param confluence :
: return :"""
|
limit = 50
flag = True
i = 0
while flag :
space_lists = confluence . get_all_spaces ( start = i * limit , limit = limit )
if space_lists and len ( space_lists ) != 0 :
i += 1
for space_list in space_lists :
print ( "Start review the space with key = " + space_list [ 'key' ] )
clean_pages_from_space ( confluence = confluence , space_key = space_list [ 'key' ] )
else :
flag = False
return 0
|
def _update ( self , commit = False ) :
"""Forces an update of this rating ( useful for when Vote objects are removed ) ."""
|
votes = Vote . objects . filter ( content_type = self . get_content_type ( ) , object_id = self . instance . pk , key = self . field . key , )
obj_score = sum ( [ v . score for v in votes ] )
obj_votes = len ( votes )
score , created = Score . objects . get_or_create ( content_type = self . get_content_type ( ) , object_id = self . instance . pk , key = self . field . key , defaults = dict ( score = obj_score , votes = obj_votes , ) )
if not created :
score . score = obj_score
score . votes = obj_votes
score . save ( )
self . score = obj_score
self . votes = obj_votes
if commit :
self . instance . save ( )
|
def is_color_supported ( ) :
"Find out if your terminal environment supports color ."
|
# shinx . util . console
if not hasattr ( sys . stdout , 'isatty' ) :
return False
if not sys . stdout . isatty ( ) and 'TERMINAL-COLOR' not in os . environ :
return False
if sys . platform == 'win32' : # pragma : no cover
try :
import colorama
colorama . init ( )
return True
except ImportError :
return False
if 'COLORTERM' in os . environ :
return True
term = os . environ . get ( 'TERM' , 'dumb' ) . lower ( )
return term in ( 'xterm' , 'linux' ) or 'color' in term
|
def add_contents ( self , dest , contents ) :
"""Add file contents to the archive under ` ` dest ` ` .
If ` ` dest ` ` is a path , it will be added compressed and world - readable
( user - writeable ) . You may also pass a : py : class : ` ~ zipfile . ZipInfo ` for
custom behavior ."""
|
assert not self . _closed , "Archive closed"
if not isinstance ( dest , zipfile . ZipInfo ) :
dest = zinfo ( dest )
# see for some caveats
# Ensure we apply the compression
dest . compress_type = self . zip_compression
# Mark host OS as Linux for all archives
dest . create_system = 3
self . _zip_file . writestr ( dest , contents )
|
def to_dict ( self ) :
"""to _ dict : puts data in format CC expects
Args : None
Returns : dict of channel data"""
|
return { "title" : self . title , "language" : self . language , "description" : self . description , "node_id" : self . get_node_id ( ) . hex , "content_id" : self . get_content_id ( ) . hex , "source_domain" : self . domain_ns . hex , "source_id" : self . source_id , "author" : self . author , "aggregator" : self . aggregator , "provider" : self . provider , "files" : [ f . to_dict ( ) for f in filter ( lambda x : x and x . filename , self . files ) ] , # Filter out failed downloads
"tags" : self . tags , "kind" : self . kind , "license" : self . license . license_id , "license_description" : self . license . description , "copyright_holder" : self . license . copyright_holder , "questions" : [ question . to_dict ( ) for question in self . questions ] , "extra_fields" : json . dumps ( self . extra_fields ) , "role" : self . role , }
|
def _insert_additionals ( self , fmtos , seen = None ) :
"""Insert additional formatoptions into ` fmtos ` .
This method inserts those formatoptions into ` fmtos ` that are required
because one of the following criteria is fullfilled :
1 . The : attr : ` replot ` attribute is True
2 . Any formatoption with START priority is in ` fmtos `
3 . A dependency of one formatoption is in ` fmtos `
Parameters
fmtos : list
The list of formatoptions that shall be updated
seen : set
The formatoption keys that shall not be included . If None , all
formatoptions in ` fmtos ` are used
Returns
fmtos
The initial ` fmtos ` plus further formatoptions
Notes
` fmtos ` and ` seen ` are modified in place ( except that any formatoption
in the initial ` fmtos ` has : attr : ` ~ Formatoption . requires _ clearing `
attribute set to True )"""
|
def get_dependencies ( fmto ) :
if fmto is None :
return [ ]
return fmto . dependencies + list ( chain ( * map ( lambda key : get_dependencies ( getattr ( self , key , None ) ) , fmto . dependencies ) ) )
seen = seen or { fmto . key for fmto in fmtos }
keys = { fmto . key for fmto in fmtos }
self . replot = self . replot or any ( fmto . requires_replot for fmto in fmtos )
if self . replot or any ( fmto . priority >= START for fmto in fmtos ) :
self . replot = True
self . plot_data = self . data
new_fmtos = dict ( ( f . key , f ) for f in self . _fmtos if ( ( f not in fmtos and is_data_dependent ( f , self . data ) ) ) )
seen . update ( new_fmtos )
keys . update ( new_fmtos )
fmtos += list ( new_fmtos . values ( ) )
# insert the formatoptions that have to be updated if the plot is
# changed
if any ( fmto . priority >= BEFOREPLOTTING for fmto in fmtos ) :
new_fmtos = dict ( ( f . key , f ) for f in self . _fmtos if ( ( f not in fmtos and f . update_after_plot ) ) )
fmtos += list ( new_fmtos . values ( ) )
for fmto in set ( self . _fmtos ) . difference ( fmtos ) :
all_dependencies = get_dependencies ( fmto )
if keys . intersection ( all_dependencies ) :
fmtos . append ( fmto )
if any ( fmto . requires_clearing for fmto in fmtos ) :
self . cleared = True
return list ( self . _fmtos )
return fmtos
|
def s ( self , data , errors = 'strict' ) :
"""Decode value using correct Python 2/3 method .
This method is intended to replace the : py : meth : ` ~ tcex . tcex . TcEx . to _ string ` method with
better logic to handle poorly encoded unicode data in Python2 and still work in Python3.
Args :
data ( any ) : Data to ve validated and ( de ) encoded
errors ( string ) : What method to use when dealing with errors .
Returns :
( string ) : Return decoded data"""
|
try :
if data is None or isinstance ( data , ( int , list , dict ) ) :
pass
# Do nothing with these types
elif isinstance ( data , unicode ) :
try :
data . decode ( 'utf-8' )
except UnicodeEncodeError : # 2to3 converts unicode to str
# 2to3 converts unicode to str
data = str ( data . encode ( 'utf-8' ) . strip ( ) , errors = errors )
self . log . warning ( u'Encoding poorly encoded string ({})' . format ( data ) )
except AttributeError :
pass
# Python 3 can ' t decode a str
else :
data = str ( data , 'utf-8' , errors = errors )
# 2to3 converts unicode to str
except NameError :
pass
# Can ' t decode str in Python 3
return data
|
def decode_dict_keys_to_str ( src ) :
'''Convert top level keys from bytes to strings if possible .
This is necessary because Python 3 makes a distinction
between these types .'''
|
if not six . PY3 or not isinstance ( src , dict ) :
return src
output = { }
for key , val in six . iteritems ( src ) :
if isinstance ( key , bytes ) :
try :
key = key . decode ( )
except UnicodeError :
pass
output [ key ] = val
return output
|
def search ( self , Queue = None , order = None , raw_query = None , Format = 'l' , ** kwargs ) :
"""Search arbitrary needles in given fields and queue .
Example : :
> > > tracker = Rt ( ' http : / / tracker . example . com / REST / 1.0 / ' , ' rt - username ' , ' top - secret ' )
> > > tracker . login ( )
> > > tickets = tracker . search ( CF _ Domain = ' example . com ' , Subject _ _ like = ' warning ' )
> > > tickets = tracker . search ( Queue = ' General ' , order = ' Status ' , raw _ query = " id = ' 1 ' + OR + id = ' 2 ' + OR + id = ' 3 ' " )
: keyword Queue : Queue where to search . If you wish to search across
all of your queues , pass the ALL _ QUEUES object as the
argument .
: keyword order : Name of field sorting result list , for descending
order put - before the field name . E . g . - Created
will put the newest tickets at the beginning
: keyword raw _ query : A raw query to provide to RT if you know what
you are doing . You may still pass Queue and order
kwargs , so use these instead of including them in
the raw query . You can refer to the RT query builder .
If passing raw _ query , all other * * kwargs will be ignored .
: keyword Format : Format of the query :
- i : only ` id ' fields are populated
- s : only ` id ' and ` subject ' fields are populated
- l : multi - line format , all fields are populated
: keyword kwargs : Other arguments possible to set if not passing raw _ query :
Requestors , Subject , Cc , AdminCc , Owner , Status ,
Priority , InitialPriority , FinalPriority ,
TimeEstimated , Starts , Due , Text , . . . ( according to RT
fields )
Custom fields CF . { < CustomFieldName > } could be set
with keywords CF _ CustomFieldName .
To alter lookup operators you can append one of the
following endings to each keyword :
_ _ exact for operator = ( default )
_ _ notexact for operator ! =
_ _ gt for operator >
_ _ lt for operator <
_ _ like for operator LIKE
_ _ notlike for operator NOT LIKE
Setting values to keywords constrain search
result to the tickets satisfying all of them .
: returns : List of matching tickets . Each ticket is the same dictionary
as in : py : meth : ` ~ Rt . get _ ticket ` .
: raises : UnexpectedMessageFormat : Unexpected format of returned message .
InvalidQueryError : If raw query is malformed"""
|
get_params = { }
query = [ ]
url = 'search/ticket'
if Queue is not ALL_QUEUES :
query . append ( "Queue=\'{}\'" . format ( Queue or self . default_queue ) )
if not raw_query :
operators_map = { 'gt' : '>' , 'lt' : '<' , 'exact' : '=' , 'notexact' : '!=' , 'like' : ' LIKE ' , 'notlike' : ' NOT LIKE ' }
for key , value in iteritems ( kwargs ) :
op = '='
key_parts = key . split ( '__' )
if len ( key_parts ) > 1 :
key = '__' . join ( key_parts [ : - 1 ] )
op = operators_map . get ( key_parts [ - 1 ] , '=' )
if key [ : 3 ] != 'CF_' :
query . append ( "{}{}\'{}\'" . format ( key , op , value ) )
else :
query . append ( "'CF.{{{}}}'{}\'{}\'" . format ( key [ 3 : ] , op , value ) )
else :
query . append ( raw_query )
get_params [ 'query' ] = ' AND ' . join ( '(' + part + ')' for part in query )
if order :
get_params [ 'orderby' ] = order
get_params [ 'format' ] = Format
msg = self . __request ( url , get_params = get_params )
lines = msg . split ( '\n' )
if len ( lines ) > 2 :
if self . __get_status_code ( lines [ 0 ] ) != 200 and lines [ 2 ] . startswith ( 'Invalid query: ' ) :
raise InvalidQueryError ( lines [ 2 ] )
if lines [ 2 ] . startswith ( 'No matching results.' ) :
return [ ]
if Format == 'l' :
msgs = map ( lambda x : x . split ( '\n' ) , msg . split ( '\n--\n' ) )
items = [ ]
for msg in msgs :
pairs = { }
req_matching = [ i for i , m in enumerate ( msg ) if self . RE_PATTERNS [ 'requestors_pattern' ] . match ( m ) ]
req_id = req_matching [ 0 ] if req_matching else None
if not req_id :
raise UnexpectedMessageFormat ( 'Missing line starting with `Requestors:`.' )
for i in range ( req_id ) :
if ': ' in msg [ i ] :
header , content = self . split_header ( msg [ i ] )
pairs [ header . strip ( ) ] = content . strip ( )
requestors = [ msg [ req_id ] [ 12 : ] ]
req_id += 1
while ( req_id < len ( msg ) ) and ( msg [ req_id ] [ : 12 ] == ' ' * 12 ) :
requestors . append ( msg [ req_id ] [ 12 : ] )
req_id += 1
pairs [ 'Requestors' ] = self . __normalize_list ( requestors )
for i in range ( req_id , len ( msg ) ) :
if ': ' in msg [ i ] :
header , content = self . split_header ( msg [ i ] )
pairs [ header . strip ( ) ] = content . strip ( )
if pairs :
items . append ( pairs )
if 'Cc' in pairs :
pairs [ 'Cc' ] = self . __normalize_list ( pairs [ 'Cc' ] )
if 'AdminCc' in pairs :
pairs [ 'AdminCc' ] = self . __normalize_list ( pairs [ 'AdminCc' ] )
if 'id' not in pairs and not pairs [ 'id' ] . startswitch ( 'ticket/' ) :
raise UnexpectedMessageFormat ( 'Response from RT didn\'t contain a valid ticket_id' )
else :
pairs [ 'numerical_id' ] = pairs [ 'id' ] . split ( 'ticket/' ) [ 1 ]
return items
elif Format == 's' :
items = [ ]
msgs = lines [ 2 : ]
for msg in msgs :
if "" == msg : # Ignore blank line at the end
continue
ticket_id , subject = self . split_header ( msg )
items . append ( { 'id' : 'ticket/' + ticket_id , 'numerical_id' : ticket_id , 'Subject' : subject } )
return items
elif Format == 'i' :
items = [ ]
msgs = lines [ 2 : ]
for msg in msgs :
if "" == msg : # Ignore blank line at the end
continue
_ , ticket_id = msg . split ( '/' , 1 )
items . append ( { 'id' : 'ticket/' + ticket_id , 'numerical_id' : ticket_id } )
return items
|
def ekf1_pos ( EKF1 ) :
'''calculate EKF position when EKF disabled'''
|
global ekf_home
from . import mavutil
self = mavutil . mavfile_global
if ekf_home is None :
if not 'GPS' in self . messages or self . messages [ 'GPS' ] . Status != 3 :
return None
ekf_home = self . messages [ 'GPS' ]
( ekf_home . Lat , ekf_home . Lng ) = gps_offset ( ekf_home . Lat , ekf_home . Lng , - EKF1 . PE , - EKF1 . PN )
( lat , lon ) = gps_offset ( ekf_home . Lat , ekf_home . Lng , EKF1 . PE , EKF1 . PN )
return ( lat , lon )
|
def init ( * , output_dir = FS_DEFAULT_OUTPUT_DIR , dry_run = False , ** kwargs ) :
"""Set up output directory
: param output _ dir ( str , optional ) : Output dir for holding temporary files
: param \ * \ * kwargs : arbitrary keyword arguments"""
|
# Output directory
global _output_dir
_output_dir = output_dir
# Dry run mode
global _dry_run
_dry_run = dry_run
# Type checks
utils . chkstr ( _output_dir , 'output_dir' )
# log the thing
log . msg ( "Output directory will be: {output_dir}" . format ( output_dir = _output_dir ) )
# Create output directory if it does not exist
if not os . path . exists ( _output_dir ) :
log . msg_warn ( "Output path '{out_dir}' does not exist, creating it ..." . format ( out_dir = _output_dir ) )
if not _dry_run : # create the actual root output directory
os . makedirs ( _output_dir )
# set folder permissions to 0770
os . chmod ( _output_dir , stat . S_IRWXU | stat . S_IRWXG )
|
def _set_optimizer_param ( optimizer , param_group , param_name , value ) :
"""Set a parameter on an all or a specific parameter group of an
optimizer instance . To select all param groups , use ` ` param _ group = ' all ' ` ` ."""
|
if param_group == 'all' :
groups = optimizer . param_groups
else :
groups = [ optimizer . param_groups [ int ( param_group ) ] ]
for group in groups :
group [ param_name ] = value
|
def filter_create ( self , phrase , context , irreversible = False , whole_word = True , expires_in = None ) :
"""Creates a new keyword filter . ` phrase ` is the phrase that should be
filtered out , ` context ` specifies from where to filter the keywords .
Valid contexts are ' home ' , ' notifications ' , ' public ' and ' thread ' .
Set ` irreversible ` to True if you want the filter to just delete statuses
server side . This works only for the ' home ' and ' notifications ' contexts .
Set ` whole _ word ` to False if you want to allow filter matches to
start or end within a word , not only at word boundaries .
Set ` expires _ in ` to specify for how many seconds the filter should be
kept around .
Returns the ` filter dict ` _ of the newly created filter ."""
|
params = self . __generate_params ( locals ( ) )
for context_val in context :
if not context_val in [ 'home' , 'notifications' , 'public' , 'thread' ] :
raise MastodonIllegalArgumentError ( 'Invalid filter context.' )
return self . __api_request ( 'POST' , '/api/v1/filters' , params )
|
def execOnSubArrays ( arrs , fn , splitX , splitY ) :
"""execute a function ( on or multiple arrays )
only on sub sections
works only on 2d arrays at the moment
> > > a1 = np . ones ( ( 1000,1000 ) )
> > > a2 = np . ones ( ( 1000,1000 ) )
> > > out = execOnSubArrays ( ( a1 , a2 ) , lambda sa1 , sa2 : sa1 + as2 , splitX = 10 , splitY = 10)"""
|
if type ( arrs ) not in ( tuple , list ) :
arrs = ( arrs , )
s0 , s1 = arrs [ 0 ] . shape
ss0 = s0 // splitX
ss1 = s1 // splitY
px , py = 0 , 0
out = None
for ix in range ( splitX ) :
if ix == splitX - 1 :
ss0 = s0 - px
for iy in range ( splitY ) :
if iy == splitY - 1 :
ss1 = s1 - py
# current sub arrays :
sarrs = [ a [ px : px + ss0 , py : py + ss1 ] for a in arrs ]
sub = fn ( * tuple ( sarrs ) )
if out is None :
out = np . empty ( shape = ( s0 , s1 ) , dtype = sub . dtype )
out [ px : px + ss0 , py : py + ss1 ] = sub
py += ss1
py = 0
px += ss0
return out
|
def import_stringified_func ( funcstring ) :
"""Import a string that represents a module and function , e . g . { module } . { funcname } .
Given a function f , import _ stringified _ func ( stringify _ func ( f ) ) will return the same function .
: param funcstring : String to try to import
: return : callable"""
|
assert isinstance ( funcstring , str )
modulestring , funcname = funcstring . rsplit ( '.' , 1 )
mod = importlib . import_module ( modulestring )
func = getattr ( mod , funcname )
return func
|
def delete ( self , num_iid , properties , session , item_price = None , item_num = None , lang = None ) :
'''taobao . item . sku . delete 删除SKU
删除一个sku的数据 需要删除的sku通过属性properties进行匹配查找'''
|
request = TOPRequest ( 'taobao.item.sku.delete' )
request [ 'num_iid' ] = num_iid
request [ 'properties' ] = properties
if item_num != None :
request [ 'item_num' ] = item_num
if item_price != None :
request [ 'item_price' ] = item_price
if lang != None :
request [ 'lang' ] = lang
self . create ( self . execute ( request , session ) [ 'sku' ] )
return self
|
def extract_odd_numbers ( numbers ) :
"""This function uses a lambda function to filter out and return the odd numbers from a list of integers .
Examples :
> > > extract _ odd _ numbers ( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ] )
[1 , 3 , 5 , 7 , 9]
> > > extract _ odd _ numbers ( [ 10 , 20 , 45 , 67 , 84 , 93 ] )
[45 , 67 , 93]
> > > extract _ odd _ numbers ( [ 5 , 7 , 9 , 8 , 6 , 4 , 3 ] )
[5 , 7 , 9 , 3]"""
|
return list ( filter ( lambda n : n % 2 != 0 , numbers ) )
|
def _get_mass_by_index ( self , index ) :
"""where index can either by an integer or a list of integers ( returns some of masses )"""
|
if hasattr ( index , '__iter__' ) :
return sum ( [ self . masses [ i ] for i in index ] )
else :
return self . masses [ index ]
|
def k_depth ( d , depth , _counter = 1 ) :
"""Iterate keys on specific depth .
depth has to be greater equal than 0.
Usage reference see : meth : ` DictTree . kv _ depth ( ) < DictTree . kv _ depth > `"""
|
if depth == 0 :
yield d [ _meta ] [ "_rootname" ]
else :
if _counter == depth :
for key in DictTree . k ( d ) :
yield key
else :
_counter += 1
for node in DictTree . v ( d ) :
for key in DictTree . k_depth ( node , depth , _counter ) :
yield key
|
def get_users_info ( self , usernames ) :
""": param usernames : a list of usernames
: return : a dict , in the form { username : val } , where val is either None if the user cannot be found , or a tuple ( realname , email )"""
|
retval = { username : None for username in usernames }
remaining_users = usernames
infos = self . _database . users . find ( { "username" : { "$in" : remaining_users } } )
for info in infos :
retval [ info [ "username" ] ] = ( info [ "realname" ] , info [ "email" ] )
return retval
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.