signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def to_api ( in_dict , int_keys = None , date_keys = None , bool_keys = None ) :
"""Extends a given object for API Production ."""
|
# Cast all int _ keys to int ( )
if int_keys :
for in_key in int_keys :
if ( in_key in in_dict ) and ( in_dict . get ( in_key , None ) is not None ) :
in_dict [ in_key ] = int ( in_dict [ in_key ] )
# Cast all date _ keys to datetime . isoformat
if date_keys :
for in_key in date_keys :
if ( in_key in in_dict ) and ( in_dict . get ( in_key , None ) is not None ) :
_from = in_dict [ in_key ]
if isinstance ( _from , basestring ) :
dtime = parse_datetime ( _from )
elif isinstance ( _from , datetime ) :
dtime = _from
in_dict [ in_key ] = dtime . isoformat ( )
elif ( in_key in in_dict ) and in_dict . get ( in_key , None ) is None :
del in_dict [ in_key ]
# Remove all Nones
for k , v in in_dict . items ( ) :
if v is None :
del in_dict [ k ]
return in_dict
|
def _processor ( self ) :
"""Application processor to setup session for every request"""
|
self . store . cleanup ( self . _config . timeout )
self . _load ( )
|
def string_to_locale ( value , strict = True ) :
"""Return an instance ` ` Locale ` ` corresponding to the string
representation of a locale .
@ param value : a string representation of a locale , i . e . , a ISO 639-3
alpha - 3 code ( or alpha - 2 code ) , optionally followed by a dash
character ` ` - ` ` and a ISO 3166-1 alpha - 2 code .
@ param strict : indicate whether the string representation of a locale
has to be strictly compliant with RFC 4646 , or whether a Java -
style locale ( character ` ` _ ` ` instead of ` ` - ` ` ) is accepted .
@ return : an instance ` ` Locale ` ` ."""
|
try :
return None if is_undefined ( value ) else Locale . from_string ( value , strict = strict )
except Locale . MalformedLocaleException , exception :
if strict :
raise exception
|
def execute ( self , conn , child_block_name = '' , child_lfn_list = [ ] , transaction = False ) :
sql = ''
binds = { }
child_ds_name = ''
child_where = ''
if child_block_name :
child_ds_name = child_block_name . split ( '#' ) [ 0 ]
parent_where = " where d.dataset = :child_ds_name ))"
binds = { "child_ds_name" : child_ds_name }
else :
dbsExceptionHandler ( 'dbsException-invalid-input' , "Missing child block_name for listFileParentsByLumi. " )
if not child_lfn_list : # most use cases
child_where = " where b.block_name = :child_block_name )"
binds . update ( { "child_block_name" : child_block_name } )
sql = """
with
parents as
(
""" + self . parent_sql + parent_where + """),
""" + """
children as
(
""" + self . child_sql + child_where + """)
select distinct cid, pid from children c
inner join parents p on c.R = p.R and c.L = p.L
"""
else : # not commom
child_where = """ where b.block_name = :child_block_name
and f.logical_file_name in (SELECT TOKEN FROM TOKEN_GENERATOR) ))
"""
lfn_generator , bind = create_token_generator ( child_lfn_list )
binds . update ( bind )
sql = lfn_generator + """
with
parents as
(
""" + self . parent_sql + parent_where + """),
""" + """
children as
(
""" + self . child_sql + child_where + """)
select distinct cid, pid from children c
inner join parents p on c.R = p.R and c.L = p.L
"""
print ( sql )
r = self . dbi . processData ( sql , binds , conn , transaction = transaction )
# print ( self . format ( r ) )
return self . format ( r )
"""cursors = self . dbi . processData ( sql , binds , conn , transaction = transaction , returnCursor = True )
for i in cursors :
d = self . formatCursor ( i , size = 100)
if isinstance ( d , list ) or isinstance ( d , GeneratorType ) :
for elem in d :
yield elem
elif d :
yield d"""
| |
def ploidy ( args ) :
"""% prog ploidy seqids layout
Build a figure that calls graphics . karyotype to illustrate the high ploidy
of B . napus genome ."""
|
p = OptionParser ( ploidy . __doc__ )
opts , args , iopts = p . set_image_options ( args , figsize = "8x7" )
if len ( args ) != 2 :
sys . exit ( not p . print_help ( ) )
seqidsfile , klayout = args
fig = plt . figure ( 1 , ( iopts . w , iopts . h ) )
root = fig . add_axes ( [ 0 , 0 , 1 , 1 ] )
Karyotype ( fig , root , seqidsfile , klayout )
fc = "darkslategrey"
radius = .012
ot = - .05
# use this to adjust vertical position of the left panel
TextCircle ( root , .1 , .9 + ot , r'$\gamma$' , radius = radius , fc = fc )
root . text ( .1 , .88 + ot , r"$\times3$" , ha = "center" , va = "top" , color = fc )
TextCircle ( root , .08 , .79 + ot , r'$\alpha$' , radius = radius , fc = fc )
TextCircle ( root , .12 , .79 + ot , r'$\beta$' , radius = radius , fc = fc )
root . text ( .1 , .77 + ot , r"$\times3\times2\times2$" , ha = "center" , va = "top" , color = fc )
root . text ( .1 , .67 + ot , r"Brassica triplication" , ha = "center" , va = "top" , color = fc , size = 11 )
root . text ( .1 , .65 + ot , r"$\times3\times2\times2\times3$" , ha = "center" , va = "top" , color = fc )
root . text ( .1 , .42 + ot , r"Allo-tetraploidy" , ha = "center" , va = "top" , color = fc , size = 11 )
root . text ( .1 , .4 + ot , r"$\times3\times2\times2\times3\times2$" , ha = "center" , va = "top" , color = fc )
bb = dict ( boxstyle = "round,pad=.5" , fc = "w" , ec = "0.5" , alpha = 0.5 )
root . text ( .5 , .2 + ot , r"\noindent\textit{Brassica napus}\\" "(A$\mathsf{_n}$C$\mathsf{_n}$ genome)" , ha = "center" , size = 16 , color = "k" , bbox = bb )
root . set_xlim ( 0 , 1 )
root . set_ylim ( 0 , 1 )
root . set_axis_off ( )
pf = "napus"
image_name = pf + "." + iopts . format
savefig ( image_name , dpi = iopts . dpi , iopts = iopts )
|
def wite_to_json ( self , dir_path = "" , file_name = "" ) :
"""将性能数据写入文件 ."""
|
# 提取数据
data = { "plot_data" : self . record_thread . profile_data , "method_exec_info" : self . method_exec_info , "search_file" : self . search_file , "source_file" : self . source_file }
# 写入文件
file_path = os . path . join ( dir_path , file_name )
if not os . path . exists ( dir_path ) :
os . makedirs ( dir_path )
json . dump ( data , open ( file_path , "w+" ) , indent = 4 )
|
def UpsertUserDefinedFunction ( self , collection_link , udf , options = None ) :
"""Upserts a user defined function in a collection .
: param str collection _ link :
The link to the collection .
: param str udf :
: param dict options :
The request options for the request .
: return :
The upserted UDF .
: rtype :
dict"""
|
if options is None :
options = { }
collection_id , path , udf = self . _GetContainerIdWithPathForUDF ( collection_link , udf )
return self . Upsert ( udf , path , 'udfs' , collection_id , None , options )
|
def check_strict ( self , name , original , loc , tokens ) :
"""Check that syntax meets - - strict requirements ."""
|
internal_assert ( len ( tokens ) == 1 , "invalid " + name + " tokens" , tokens )
if self . strict :
raise self . make_err ( CoconutStyleError , "found " + name , original , loc )
else :
return tokens [ 0 ]
|
def build_tree_from_json ( parent_node , sourcetree ) :
"""Recusively parse nodes in the list ` sourcetree ` and add them as children
to the ` parent _ node ` . Usually called with ` parent _ node ` being a ` ChannelNode ` ."""
|
EXPECTED_NODE_TYPES = [ TOPIC_NODE , VIDEO_NODE , AUDIO_NODE , EXERCISE_NODE , DOCUMENT_NODE , HTML5_NODE ]
for source_node in sourcetree :
kind = source_node [ 'kind' ]
if kind not in EXPECTED_NODE_TYPES :
LOGGER . critical ( 'Unexpected node kind found: ' + kind )
raise NotImplementedError ( 'Unexpected node kind found in json data.' )
if kind == TOPIC_NODE :
child_node = nodes . TopicNode ( source_id = source_node . get ( 'source_id' , None ) , title = source_node [ 'title' ] , description = source_node . get ( 'description' ) , author = source_node . get ( 'author' ) , aggregator = source_node . get ( 'aggregator' ) , provider = source_node . get ( 'provider' ) , # no role for topics ( computed dynaically from descendants )
language = source_node . get ( 'language' ) , thumbnail = source_node . get ( 'thumbnail' ) , )
parent_node . add_child ( child_node )
source_tree_children = source_node . get ( 'children' , [ ] )
build_tree_from_json ( child_node , source_tree_children )
elif kind == VIDEO_NODE :
child_node = nodes . VideoNode ( source_id = source_node [ 'source_id' ] , title = source_node [ 'title' ] , description = source_node . get ( 'description' ) , license = get_license ( ** source_node [ 'license' ] ) , author = source_node . get ( 'author' ) , aggregator = source_node . get ( 'aggregator' ) , provider = source_node . get ( 'provider' ) , role = source_node . get ( 'role' , roles . LEARNER ) , language = source_node . get ( 'language' ) , derive_thumbnail = source_node . get ( 'derive_thumbnail' , True ) , # video - specific option
thumbnail = source_node . get ( 'thumbnail' ) , )
add_files ( child_node , source_node . get ( 'files' ) or [ ] )
parent_node . add_child ( child_node )
elif kind == AUDIO_NODE :
child_node = nodes . AudioNode ( source_id = source_node [ 'source_id' ] , title = source_node [ 'title' ] , description = source_node . get ( 'description' ) , license = get_license ( ** source_node [ 'license' ] ) , author = source_node . get ( 'author' ) , aggregator = source_node . get ( 'aggregator' ) , provider = source_node . get ( 'provider' ) , role = source_node . get ( 'role' , roles . LEARNER ) , language = source_node . get ( 'language' ) , thumbnail = source_node . get ( 'thumbnail' ) , )
add_files ( child_node , source_node . get ( 'files' ) or [ ] )
parent_node . add_child ( child_node )
elif kind == EXERCISE_NODE :
child_node = nodes . ExerciseNode ( source_id = source_node [ 'source_id' ] , title = source_node [ 'title' ] , description = source_node . get ( 'description' ) , license = get_license ( ** source_node [ 'license' ] ) , author = source_node . get ( 'author' ) , aggregator = source_node . get ( 'aggregator' ) , provider = source_node . get ( 'provider' ) , role = source_node . get ( 'role' , roles . LEARNER ) , language = source_node . get ( 'language' ) , thumbnail = source_node . get ( 'thumbnail' ) , exercise_data = source_node . get ( 'exercise_data' ) , questions = [ ] , )
add_questions ( child_node , source_node . get ( 'questions' ) or [ ] )
parent_node . add_child ( child_node )
elif kind == DOCUMENT_NODE :
child_node = nodes . DocumentNode ( source_id = source_node [ 'source_id' ] , title = source_node [ 'title' ] , description = source_node . get ( 'description' ) , license = get_license ( ** source_node [ 'license' ] ) , author = source_node . get ( 'author' ) , aggregator = source_node . get ( 'aggregator' ) , provider = source_node . get ( 'provider' ) , role = source_node . get ( 'role' , roles . LEARNER ) , language = source_node . get ( 'language' ) , thumbnail = source_node . get ( 'thumbnail' ) , )
add_files ( child_node , source_node . get ( 'files' ) or [ ] )
parent_node . add_child ( child_node )
elif kind == HTML5_NODE :
child_node = nodes . HTML5AppNode ( source_id = source_node [ 'source_id' ] , title = source_node [ 'title' ] , description = source_node . get ( 'description' ) , license = get_license ( ** source_node [ 'license' ] ) , author = source_node . get ( 'author' ) , aggregator = source_node . get ( 'aggregator' ) , provider = source_node . get ( 'provider' ) , role = source_node . get ( 'role' , roles . LEARNER ) , language = source_node . get ( 'language' ) , thumbnail = source_node . get ( 'thumbnail' ) , )
add_files ( child_node , source_node . get ( 'files' ) or [ ] )
parent_node . add_child ( child_node )
else :
LOGGER . critical ( 'Encountered an unknown kind: ' + str ( source_node ) )
continue
return parent_node
|
def sub ( table , field , pattern , repl , count = 0 , flags = 0 ) :
"""Convenience function to convert values under the given field using a
regular expression substitution . See also : func : ` re . sub ` ."""
|
prog = re . compile ( pattern , flags )
conv = lambda v : prog . sub ( repl , v , count = count )
return convert ( table , field , conv )
|
def association_generator ( self , file , skipheader = False , outfile = None ) -> Dict :
"""Returns a generator that yields successive associations from file
Yields
association"""
|
file = self . _ensure_file ( file )
for line in file :
parsed_result = self . parse_line ( line )
self . report . report_parsed_result ( parsed_result , outfile , self . config . filtered_evidence_file , self . config . filter_out_evidence )
for association in parsed_result . associations : # yield association if we don ' t care if it ' s a header or if it ' s definitely a real gaf line
if not skipheader or "header" not in association :
yield association
logging . info ( self . report . short_summary ( ) )
file . close ( )
|
def POST ( self ) : # pylint : disable = arguments - differ
"""Display main course list page"""
|
if not self . app . welcome_page :
raise web . seeother ( "/courselist" )
return self . show_page ( self . app . welcome_page )
|
def validate_flavor_data ( self , expected , actual ) :
"""Validate flavor data .
Validate a list of actual flavors vs a list of expected flavors ."""
|
self . log . debug ( 'Validating flavor data...' )
self . log . debug ( 'actual: {}' . format ( repr ( actual ) ) )
act = [ a . name for a in actual ]
return self . _validate_list_data ( expected , act )
|
def manage_api_keys ( ) :
"""Page for viewing and creating API keys ."""
|
build = g . build
create_form = forms . CreateApiKeyForm ( )
if create_form . validate_on_submit ( ) :
api_key = models . ApiKey ( )
create_form . populate_obj ( api_key )
api_key . id = utils . human_uuid ( )
api_key . secret = utils . password_uuid ( )
save_admin_log ( build , created_api_key = True , message = api_key . id )
db . session . add ( api_key )
db . session . commit ( )
logging . info ( 'Created API key=%r for build_id=%r' , api_key . id , build . id )
return redirect ( url_for ( 'manage_api_keys' , build_id = build . id ) )
create_form . build_id . data = build . id
api_key_query = ( models . ApiKey . query . filter_by ( build_id = build . id ) . order_by ( models . ApiKey . created . desc ( ) ) . limit ( 1000 ) )
revoke_form_list = [ ]
for api_key in api_key_query :
form = forms . RevokeApiKeyForm ( )
form . id . data = api_key . id
form . build_id . data = build . id
form . revoke . data = True
revoke_form_list . append ( ( api_key , form ) )
return render_template ( 'view_api_keys.html' , build = build , create_form = create_form , revoke_form_list = revoke_form_list )
|
def put_nowait ( self , item ) :
"""Put an item into the queue without blocking .
If no free slot is immediately available , raise QueueFull ."""
|
self . _parent . _check_closing ( )
with self . _parent . _sync_mutex :
if self . _parent . _maxsize > 0 :
if self . _parent . _qsize ( ) >= self . _parent . _maxsize :
raise AsyncQueueFull
self . _parent . _put_internal ( item )
self . _parent . _notify_async_not_empty ( threadsafe = False )
self . _parent . _notify_sync_not_empty ( )
|
def _is_physical_entity ( pe ) :
"""Return True if the element is a physical entity"""
|
val = isinstance ( pe , _bp ( 'PhysicalEntity' ) ) or isinstance ( pe , _bpimpl ( 'PhysicalEntity' ) )
return val
|
def average_percentage_of_new_providers ( providers_info , providers_count ) :
"""Return the average percentage of new providers
per segment and the average percentage of all projects ."""
|
segments_percentages = { }
all_projects_percentages = [ ]
providers_count = providers_count . to_dict ( ) [ 0 ]
for _ , items in providers_info . groupby ( 'PRONAC' ) :
cnpj_array = items [ 'nrCNPJCPF' ] . unique ( )
new_providers = 0
for cnpj in cnpj_array :
cnpj_count = providers_count . get ( cnpj , 0 )
if cnpj_count <= 1 :
new_providers += 1
segment_id = items . iloc [ 0 ] [ 'idSegmento' ]
new_providers_percent = new_providers / cnpj_array . size
segments_percentages . setdefault ( segment_id , [ ] )
segments_percentages [ segment_id ] . append ( new_providers_percent )
all_projects_percentages . append ( new_providers_percent )
segments_average_percentage = { }
for segment_id , percentages in segments_percentages . items ( ) :
mean = np . mean ( percentages )
segments_average_percentage [ segment_id ] = mean
return pd . DataFrame . from_dict ( { 'segments_average_percentage' : segments_average_percentage , 'all_projects_average' : np . mean ( all_projects_percentages ) } )
|
def as_phononwebsite ( self ) :
"""Return a dictionary with the phononwebsite format :
http : / / henriquemiranda . github . io / phononwebsite"""
|
d = { }
# define the lattice
d [ "lattice" ] = self . structure . lattice . _matrix . tolist ( )
# define atoms
atom_pos_car = [ ]
atom_pos_red = [ ]
atom_types = [ ]
for site in self . structure . sites :
atom_pos_car . append ( site . coords . tolist ( ) )
atom_pos_red . append ( site . frac_coords . tolist ( ) )
atom_types . append ( site . species_string )
# default for now
d [ "repetitions" ] = get_reasonable_repetitions ( len ( atom_pos_car ) )
d [ "natoms" ] = len ( atom_pos_car )
d [ "atom_pos_car" ] = atom_pos_car
d [ "atom_pos_red" ] = atom_pos_red
d [ "atom_types" ] = atom_types
d [ "atom_numbers" ] = self . structure . atomic_numbers
d [ "formula" ] = self . structure . formula
d [ "name" ] = self . structure . formula
# get qpoints
qpoints = [ ]
for q in self . qpoints :
qpoints . append ( list ( q . frac_coords ) )
d [ "qpoints" ] = qpoints
# get labels
hsq_dict = collections . OrderedDict ( )
for nq , q in enumerate ( self . qpoints ) :
if q . label is not None :
hsq_dict [ nq ] = q . label
# get distances
dist = 0
nqstart = 0
distances = [ dist ]
line_breaks = [ ]
for nq in range ( 1 , len ( qpoints ) ) :
q1 = np . array ( qpoints [ nq ] )
q2 = np . array ( qpoints [ nq - 1 ] )
# detect jumps
if ( ( nq in hsq_dict ) and ( nq - 1 in hsq_dict ) ) :
if ( hsq_dict [ nq ] != hsq_dict [ nq - 1 ] ) :
hsq_dict [ nq - 1 ] += "|" + hsq_dict [ nq ]
del hsq_dict [ nq ]
line_breaks . append ( ( nqstart , nq ) )
nqstart = nq
else :
dist += np . linalg . norm ( q1 - q2 )
distances . append ( dist )
line_breaks . append ( ( nqstart , len ( qpoints ) ) )
d [ "distances" ] = distances
d [ "line_breaks" ] = line_breaks
d [ "highsym_qpts" ] = list ( hsq_dict . items ( ) )
# eigenvalues
thz2cm1 = 33.35641
bands = self . bands . copy ( ) * thz2cm1
d [ "eigenvalues" ] = bands . T . tolist ( )
# eigenvectors
eigenvectors = self . eigendisplacements . copy ( )
eigenvectors /= np . linalg . norm ( eigenvectors [ 0 , 0 ] )
eigenvectors = eigenvectors . swapaxes ( 0 , 1 )
eigenvectors = np . array ( [ eigenvectors . real , eigenvectors . imag ] )
eigenvectors = np . rollaxis ( eigenvectors , 0 , 5 )
d [ "vectors" ] = eigenvectors . tolist ( )
return d
|
def get ( self , key : Text , locale : Optional [ Text ] ) -> List [ Tuple [ Text , ... ] ] :
"""Get a single set of intents ."""
|
locale = self . choose_locale ( locale )
return self . dict [ locale ] [ key ]
|
def _url_for_email ( endpoint , base_url = None , ** kw ) :
"""Create an external url _ for by using a custom base _ url different from the domain we
are on
: param endpoint :
: param base _ url :
: param kw :
: return :"""
|
base_url = base_url or config ( "MAIL_EXTERNAL_BASE_URL" )
_external = True if not base_url else False
url = url_for ( endpoint , _external = _external , ** kw )
if base_url and not _external :
url = "%s/%s" % ( base_url . strip ( "/" ) , url . lstrip ( "/" ) )
return url
|
def copy ( self , with_time = True , ignore_standard_types = False ) :
"""Returns a deep copy of the Network object with all components and
time - dependent data .
Returns
network : pypsa . Network
Parameters
with _ time : boolean , default True
Copy snapshots and time - varying network . component _ names _ t data too .
ignore _ standard _ types : boolean , default False
Ignore the PyPSA standard types .
Examples
> > > network _ copy = network . copy ( )"""
|
override_components , override_component_attrs = self . _retrieve_overridden_components ( )
network = self . __class__ ( ignore_standard_types = ignore_standard_types , override_components = override_components , override_component_attrs = override_component_attrs )
for component in self . iterate_components ( [ "Bus" , "Carrier" ] + sorted ( self . all_components - { "Bus" , "Carrier" } ) ) :
df = component . df
# drop the standard types to avoid them being read in twice
if not ignore_standard_types and component . name in self . standard_type_components :
df = component . df . drop ( network . components [ component . name ] [ "standard_types" ] . index )
import_components_from_dataframe ( network , df , component . name )
if with_time :
network . set_snapshots ( self . snapshots )
for component in self . iterate_components ( ) :
pnl = getattr ( network , component . list_name + "_t" )
for k in iterkeys ( component . pnl ) :
pnl [ k ] = component . pnl [ k ] . copy ( )
# catch all remaining attributes of network
for attr in [ "name" , "srid" ] :
setattr ( network , attr , getattr ( self , attr ) )
network . snapshot_weightings = self . snapshot_weightings . copy ( )
return network
|
def subscribe_user_to_discussion ( recID , uid ) :
"""Subscribe a user to a discussion , so the she receives by emails
all new new comments for this record .
: param recID : record ID corresponding to the discussion we want to
subscribe the user
: param uid : user id"""
|
query = """INSERT INTO "cmtSUBSCRIPTION" (id_bibrec, id_user, creation_time)
VALUES (%s, %s, %s)"""
params = ( recID , uid , convert_datestruct_to_datetext ( time . localtime ( ) ) )
try :
run_sql ( query , params )
except :
return 0
return 1
|
def start_processor ( self ) :
"""* * Purpose * * : Method to start the wfp process . The wfp function
is not to be accessed directly . The function is started in a separate
process using this method ."""
|
if not self . _wfp_process :
try :
self . _prof . prof ( 'creating wfp process' , uid = self . _uid )
self . _wfp_process = Process ( target = self . _wfp , name = 'wfprocessor' )
self . _enqueue_thread = None
self . _dequeue_thread = None
self . _enqueue_thread_terminate = threading . Event ( )
self . _dequeue_thread_terminate = threading . Event ( )
self . _wfp_terminate = Event ( )
self . _logger . info ( 'Starting WFprocessor process' )
self . _prof . prof ( 'starting wfp process' , uid = self . _uid )
self . _wfp_process . start ( )
return True
except Exception , ex :
self . _logger . exception ( 'WFprocessor not started' )
self . terminate_processor ( )
raise
else :
self . _logger . warn ( 'Wfp process already running, attempted to restart!' )
|
def SaveName_Conv ( Mod = None , Cls = None , Type = None , Name = None , Deg = None , Exp = None , Diag = None , shot = None , version = None , usr = None , include = None ) :
"""Return a default name for saving the object
Includes key info for fast identification of the object from file name
Used on object creation by : class : ` ~ tofu . pathfile . ID `
It is recommended to use this default name ."""
|
Modstr = ID . _dModes [ Mod ] if Mod is not None else None
include = ID . _defInclude if include is None else include
if Cls is not None and Type is not None and 'Type' in include :
Clsstr = Cls + Type
else :
Clsstr = Cls
Dict = { 'Mod' : Modstr , 'Cls' : Clsstr , 'Name' : Name }
for ii in include :
if not ii in [ 'Mod' , 'Cls' , 'Type' , 'Name' ] :
Dict [ ii ] = None
if ii == 'Deg' and Deg is not None :
Dict [ ii ] = ID . _dPref [ ii ] + '{0:02.0f}' . format ( Deg )
elif ii == 'shot' and shot is not None :
Dict [ ii ] = ID . _dPref [ ii ] + '{0:05.0f}' . format ( shot )
elif not ( ii in [ 'Mod' , 'Cls' , 'Type' , 'Name' ] or eval ( ii + ' is None' ) ) :
Dict [ ii ] = ID . _dPref [ ii ] + eval ( ii )
if 'Data' in Cls :
Order = [ 'Mod' , 'Cls' , 'Exp' , 'Deg' , 'Diag' , 'shot' , 'Name' , 'version' , 'usr' ]
else :
Order = [ 'Mod' , 'Cls' , 'Exp' , 'Deg' , 'Diag' , 'Name' , 'shot' , 'version' , 'usr' ]
SVN = ""
for ii in range ( 0 , len ( Order ) ) :
if Order [ ii ] in include and Dict [ Order [ ii ] ] is not None :
SVN += '_' + Dict [ Order [ ii ] ]
SVN = SVN . replace ( '__' , '_' )
if SVN [ 0 ] == '_' :
SVN = SVN [ 1 : ]
return SVN
|
def relop_code ( self , relop , operands_type ) :
"""Returns code for relational operator
relop - relational operator
operands _ type - int or unsigned"""
|
code = self . RELATIONAL_DICT [ relop ]
offset = 0 if operands_type == SharedData . TYPES . INT else len ( SharedData . RELATIONAL_OPERATORS )
return code + offset
|
async def stderr ( self ) -> AsyncGenerator [ str , None ] :
"""Asynchronous generator for lines from subprocess stderr ."""
|
await self . wait_running ( )
async for line in self . _subprocess . stderr : # type : ignore
yield line
|
def nx_gen_node_values ( G , key , nodes , default = util_const . NoParam ) :
"""Generates attributes values of specific nodes"""
|
node_dict = nx_node_dict ( G )
if default is util_const . NoParam :
return ( node_dict [ n ] [ key ] for n in nodes )
else :
return ( node_dict [ n ] . get ( key , default ) for n in nodes )
|
def stopReceivingBoxes ( self , reason ) :
"""Stop observing log events ."""
|
AMP . stopReceivingBoxes ( self , reason )
log . removeObserver ( self . _emit )
|
def searchusers ( self , pattern ) :
"""Return a bugzilla User for the given list of patterns
: arg pattern : List of patterns to match against .
: returns : List of User records"""
|
return [ User ( self , ** rawuser ) for rawuser in self . _getusers ( match = pattern ) . get ( 'users' , [ ] ) ]
|
def get_html_output ( self ) :
"""Return line generator ."""
|
def html_splitlines ( lines ) : # this cool function was taken from trac .
# http : / / projects . edgewall . com / trac /
open_tag_re = re . compile ( r'<(\w+)(\s.*)?[^/]?>' )
close_tag_re = re . compile ( r'</(\w+)>' )
open_tags = [ ]
for line in lines :
for tag in open_tags :
line = tag . group ( 0 ) + line
open_tags = [ ]
for tag in open_tag_re . finditer ( line ) :
open_tags . append ( tag )
open_tags . reverse ( )
for ctag in close_tag_re . finditer ( line ) :
for otag in open_tags :
if otag . group ( 1 ) == ctag . group ( 1 ) :
open_tags . remove ( otag )
break
for tag in open_tags :
line += '</%s>' % tag . group ( 1 )
yield line
if self . error :
return escape ( self . raw ) . splitlines ( )
return list ( html_splitlines ( self . out . getvalue ( ) . splitlines ( ) ) )
|
def create ( self , name , volume , description = None , force = False ) :
"""Adds exception handling to the default create ( ) call ."""
|
try :
snap = super ( CloudBlockStorageSnapshotManager , self ) . create ( name = name , volume = volume , description = description , force = force )
except exc . BadRequest as e :
msg = str ( e )
if "Invalid volume: must be available" in msg : # The volume for the snapshot was attached .
raise exc . VolumeNotAvailable ( "Cannot create a snapshot from an " "attached volume. Detach the volume before trying " "again, or pass 'force=True' to the create_snapshot() " "call." )
else : # Some other error
raise
except exc . ClientException as e :
if e . code == 409 :
if "Request conflicts with in-progress" in str ( e ) :
txt = ( "The volume is current creating a snapshot. You " "must wait until that completes before attempting " "to create an additional snapshot." )
raise exc . VolumeNotAvailable ( txt )
else :
raise
else :
raise
return snap
|
def _get_template_texts ( source_list = None , template = 'jinja' , defaults = None , context = None , ** kwargs ) :
'''Iterate a list of sources and process them as templates .
Returns a list of ' chunks ' containing the rendered templates .'''
|
ret = { 'name' : '_get_template_texts' , 'changes' : { } , 'result' : True , 'comment' : '' , 'data' : [ ] }
if source_list is None :
return _error ( ret , '_get_template_texts called with empty source_list' )
txtl = [ ]
for ( source , source_hash ) in source_list :
context_dict = defaults if defaults else { }
if context :
context_dict = salt . utils . dictupdate . merge ( context_dict , context )
rndrd_templ_fn = __salt__ [ 'cp.get_template' ] ( source , '' , template = template , saltenv = __env__ , context = context_dict , ** kwargs )
log . debug ( 'cp.get_template returned %s (Called with: %s)' , rndrd_templ_fn , source )
if rndrd_templ_fn :
tmplines = None
with salt . utils . files . fopen ( rndrd_templ_fn , 'rb' ) as fp_ :
tmplines = fp_ . read ( )
tmplines = salt . utils . stringutils . to_unicode ( tmplines )
tmplines = tmplines . splitlines ( True )
if not tmplines :
msg = 'Failed to read rendered template file {0} ({1})' . format ( rndrd_templ_fn , source )
log . debug ( msg )
ret [ 'name' ] = source
return _error ( ret , msg )
txtl . append ( '' . join ( tmplines ) )
else :
msg = 'Failed to load template file {0}' . format ( source )
log . debug ( msg )
ret [ 'name' ] = source
return _error ( ret , msg )
ret [ 'data' ] = txtl
return ret
|
def get_success_url ( self ) :
"""Returns the success URL to redirect the user to ."""
|
return reverse ( 'forum_conversation:topic' , kwargs = { 'forum_slug' : self . object . forum . slug , 'forum_pk' : self . object . forum . pk , 'slug' : self . object . slug , 'pk' : self . object . pk , } , )
|
def regex_replace ( arg , pattern , replacement ) :
"""Replaces match found by regex with replacement string .
Replacement string can also be a regex
Parameters
pattern : string ( regular expression string )
replacement : string ( can be regular expression string )
Examples
> > > import ibis
> > > table = ibis . table ( [ ( ' strings ' , ' string ' ) ] )
> > > result = table . strings . replace ( ' ( b + ) ' , r ' < \1 > ' ) # ' aaabbbaa ' becomes ' aaa < bbb > aaa ' # noqa : E501
Returns
modified : string"""
|
return ops . RegexReplace ( arg , pattern , replacement ) . to_expr ( )
|
def prompt_cfg ( self , msg , sec , name , ispass = False ) :
"""Prompt for a config value , optionally saving it to the user - level
cfg . Only runs if we are in an interactive mode .
@ param msg : Message to display to user .
@ param sec : Section of config to add to .
@ param name : Config item name .
@ param ispass : If True , hide the input from the terminal .
Default : False .
@ type msg : string
@ type sec : string
@ type name : string
@ type ispass : boolean
@ return : the value entered by the user
@ rtype : string"""
|
shutit_global . shutit_global_object . yield_to_draw ( )
cfgstr = '[%s]/%s' % ( sec , name )
config_parser = self . config_parser
usercfg = os . path . join ( self . host [ 'shutit_path' ] , 'config' )
self . log ( '\nPROMPTING FOR CONFIG: %s' % ( cfgstr , ) , transient = True , level = logging . INFO )
self . log ( '\n' + msg + '\n' , transient = True , level = logging . INFO )
if not shutit_global . shutit_global_object . determine_interactive ( ) :
self . fail ( 'ShutIt is not in a terminal so cannot prompt for values.' , throw_exception = False )
# pragma : no cover
if config_parser . has_option ( sec , name ) :
whereset = config_parser . whereset ( sec , name )
if usercfg == whereset :
self . fail ( cfgstr + ' has already been set in the user config, edit ' + usercfg + ' directly to change it' , throw_exception = False )
# pragma : no cover
for subcp , filename , _ in reversed ( config_parser . layers ) : # Is the config file loaded after the user config file ?
if filename == whereset :
self . fail ( cfgstr + ' is being set in ' + filename + ', unable to override on a user config level' , throw_exception = False )
# pragma : no cover
elif filename == usercfg :
break
else : # The item is not currently set so we ' re fine to do so
pass
if ispass :
val = getpass . getpass ( '>> ' )
else :
val = shutit_util . util_raw_input ( prompt = '>> ' )
is_excluded = ( config_parser . has_option ( 'save_exclude' , sec ) and name in config_parser . get ( 'save_exclude' , sec ) . split ( ) )
# TODO : ideally we would remember the prompted config item for this invocation of shutit
if not is_excluded :
usercp = [ subcp for subcp , filename , _ in config_parser . layers if filename == usercfg ] [ 0 ]
if shutit_util . util_raw_input ( prompt = shutit_util . colorise ( '32' , 'Do you want to save this to your user settings? y/n: ' ) , default = 'y' ) == 'y' :
sec_toset , name_toset , val_toset = sec , name , val
else : # Never save it
if config_parser . has_option ( 'save_exclude' , sec ) :
excluded = config_parser . get ( 'save_exclude' , sec ) . split ( )
else :
excluded = [ ]
excluded . append ( name )
excluded = ' ' . join ( excluded )
sec_toset , name_toset , val_toset = 'save_exclude' , sec , excluded
if not usercp . has_section ( sec_toset ) :
usercp . add_section ( sec_toset )
usercp . set ( sec_toset , name_toset , val_toset )
usercp . write ( open ( usercfg , 'w' ) )
config_parser . reload ( )
return val
|
def _pys2code ( self , line ) :
"""Updates code in pys code _ array"""
|
row , col , tab , code = self . _split_tidy ( line , maxsplit = 3 )
key = self . _get_key ( row , col , tab )
self . code_array . dict_grid [ key ] = unicode ( code , encoding = 'utf-8' )
|
def cartesian_to_spherical ( cartesian ) :
"""Convert cartesian to spherical coordinates passed as ( N , 3 ) shaped arrays ."""
|
xyz = cartesian
xy = xyz [ : , 0 ] ** 2 + xyz [ : , 1 ] ** 2
r = np . sqrt ( xy + xyz [ : , 2 ] ** 2 )
phi = np . arctan2 ( np . sqrt ( xy ) , xyz [ : , 2 ] )
# for elevation angle defined from Z - axis down
# ptsnew [ : , 4 ] = np . arctan2 ( xyz [ : , 2 ] , np . sqrt ( xy ) ) # for elevation angle defined from XY - plane up
theta = np . arctan2 ( xyz [ : , 1 ] , xyz [ : , 0 ] )
return np . array ( [ r , theta , phi ] ) . T
|
def ystep ( self ) :
r"""Minimise Augmented Lagrangian with respect to
: math : ` \ mathbf { y } ` ."""
|
self . Y = np . asarray ( sp . prox_l1l2 ( self . AX + self . U , ( self . lmbda / self . rho ) * self . wl1 , self . mu / self . rho , axis = - 1 ) , dtype = self . dtype )
GenericBPDN . ystep ( self )
|
def _next_page ( self ) :
"""Fetch the next page of the query ."""
|
if self . _last_page_seen :
raise StopIteration
new , self . _last_page_seen = self . conn . query_multiple ( self . object_type , self . _next_page_index , self . url_params , self . query_params )
self . _next_page_index += 1
if len ( new ) == 0 :
self . _last_page_seen = True
# don ' t bother with next page if nothing was returned
else :
self . _results += new
|
def draw_connected_scoped_label ( context , color , name_size , handle_pos , port_side , port_side_size , draw_connection_to_port = False ) :
"""Draw label of scoped variable
This method draws the label of a scoped variable connected to a data port . This is represented by drawing a bigger
label where the top part is filled and the bottom part isn ' t .
: param context : Draw Context
: param Gdk . Color color : Color to draw the label in ( border and background fill color )
: param name _ size : Size of the name labels ( scoped variable and port name ) combined
: param handle _ pos : Position of port which label is connected to
: param port _ side : Side on which the label should be drawn
: param port _ side _ size : Size of port ( to have a relative size )
: param draw _ connection _ to _ port : Whether there should be a line connecting the label to the port
: return : Rotation Angle ( to rotate names accordingly ) , X - Position of name labels start point , Y - Position of name
labels start point"""
|
c = context . cairo
c . set_line_width ( port_side_size * .03 )
c . set_source_rgb ( * color . to_floats ( ) )
rot_angle = .0
move_x = 0.
move_y = 0.
if port_side is SnappedSide . RIGHT :
move_x = handle_pos . x + 2 * port_side_size
move_y = handle_pos . y - name_size [ 1 ] / 2.
c . move_to ( move_x + name_size [ 0 ] , move_y + name_size [ 1 ] / 2. )
c . line_to ( move_x + name_size [ 0 ] , move_y )
c . line_to ( move_x , move_y )
c . line_to ( handle_pos . x + port_side_size , handle_pos . y )
c . fill_preserve ( )
c . stroke ( )
if draw_connection_to_port :
c . line_to ( handle_pos . x + port_side_size / 2. , handle_pos . y )
c . line_to ( handle_pos . x + port_side_size , handle_pos . y )
else :
c . move_to ( handle_pos . x + port_side_size , handle_pos . y )
c . line_to ( move_x , move_y + name_size [ 1 ] )
c . line_to ( move_x + name_size [ 0 ] , move_y + name_size [ 1 ] )
c . line_to ( move_x + name_size [ 0 ] , move_y + name_size [ 1 ] / 2. )
elif port_side is SnappedSide . BOTTOM :
move_x = handle_pos . x + name_size [ 1 ] / 2.
move_y = handle_pos . y + 2 * port_side_size
rot_angle = pi / 2.
c . move_to ( move_x - name_size [ 1 ] / 2. , move_y + name_size [ 0 ] )
c . line_to ( move_x , move_y + name_size [ 0 ] )
c . line_to ( move_x , move_y )
c . line_to ( handle_pos . x , move_y - port_side_size )
c . fill_preserve ( )
c . stroke ( )
if draw_connection_to_port :
c . line_to ( handle_pos . x , handle_pos . y + port_side_size / 2. )
c . line_to ( handle_pos . x , move_y - port_side_size )
else :
c . move_to ( handle_pos . x , move_y - port_side_size )
c . line_to ( move_x - name_size [ 1 ] , move_y )
c . line_to ( move_x - name_size [ 1 ] , move_y + name_size [ 0 ] )
c . line_to ( move_x - name_size [ 1 ] / 2. , move_y + name_size [ 0 ] )
elif port_side is SnappedSide . LEFT :
move_x = handle_pos . x - 2 * port_side_size - name_size [ 0 ]
move_y = handle_pos . y - name_size [ 1 ] / 2.
c . move_to ( move_x , move_y + name_size [ 1 ] / 2. )
c . line_to ( move_x , move_y )
c . line_to ( move_x + name_size [ 0 ] , move_y )
c . line_to ( handle_pos . x - port_side_size , move_y + name_size [ 1 ] / 2. )
c . fill_preserve ( )
c . stroke ( )
if draw_connection_to_port :
c . line_to ( handle_pos . x - port_side_size / 2. , handle_pos . y )
c . line_to ( handle_pos . x - port_side_size , handle_pos . y )
else :
c . move_to ( handle_pos . x - port_side_size , move_y + name_size [ 1 ] / 2. )
c . line_to ( move_x + name_size [ 0 ] , move_y + name_size [ 1 ] )
c . line_to ( move_x , move_y + name_size [ 1 ] )
c . line_to ( move_x , move_y + name_size [ 1 ] / 2. )
elif port_side is SnappedSide . TOP :
move_x = handle_pos . x - name_size [ 1 ] / 2.
move_y = handle_pos . y - 2 * port_side_size
rot_angle = - pi / 2.
c . move_to ( move_x + name_size [ 1 ] / 2. , move_y - name_size [ 0 ] )
c . line_to ( move_x , move_y - name_size [ 0 ] )
c . line_to ( move_x , move_y )
c . line_to ( handle_pos . x , move_y + port_side_size )
c . fill_preserve ( )
c . stroke ( )
if draw_connection_to_port :
c . line_to ( handle_pos . x , handle_pos . y - port_side_size / 2. )
c . line_to ( handle_pos . x , move_y + port_side_size )
else :
c . move_to ( handle_pos . x , move_y + port_side_size )
c . line_to ( move_x + name_size [ 1 ] , move_y )
c . line_to ( move_x + name_size [ 1 ] , move_y - name_size [ 0 ] )
c . line_to ( move_x + name_size [ 1 ] / 2. , move_y - name_size [ 0 ] )
c . stroke ( )
return rot_angle , move_x , move_y
|
def from_gff3 ( path , attributes = None , region = None , score_fill = - 1 , phase_fill = - 1 , attributes_fill = '.' , dtype = None ) :
"""Read a feature table from a GFF3 format file .
Parameters
path : string
File path .
attributes : list of strings , optional
List of columns to extract from the " attributes " field .
region : string , optional
Genome region to extract . If given , file must be position
sorted , bgzipped and tabix indexed . Tabix must also be installed
and on the system path .
score _ fill : int , optional
Value to use where score field has a missing value .
phase _ fill : int , optional
Value to use where phase field has a missing value .
attributes _ fill : object or list of objects , optional
Value ( s ) to use where attribute field ( s ) have a missing value .
dtype : numpy dtype , optional
Manually specify a dtype .
Returns
ft : FeatureTable"""
|
a = gff3_to_recarray ( path , attributes = attributes , region = region , score_fill = score_fill , phase_fill = phase_fill , attributes_fill = attributes_fill , dtype = dtype )
if a is None :
return None
else :
return FeatureTable ( a , copy = False )
|
def set_condition ( self , value ) :
"""Setter for ' condition ' field .
: param value - a new value of ' condition ' field . Required field . Must be a String ."""
|
if value is None or not isinstance ( value , str ) :
raise TypeError ( "Condition is required and must be set to a String" )
else :
self . __condition = value
|
def set_mmap ( self , mmap ) :
"""Enable / Disable use of a mapped file to simulate router memory .
By default , a mapped file is used . This is a bit slower , but requires less memory .
: param mmap : activate / deactivate mmap ( boolean )"""
|
if mmap :
flag = 1
else :
flag = 0
yield from self . _hypervisor . send ( 'vm set_ram_mmap "{name}" {mmap}' . format ( name = self . _name , mmap = flag ) )
if mmap :
log . info ( 'Router "{name}" [{id}]: mmap enabled' . format ( name = self . _name , id = self . _id ) )
else :
log . info ( 'Router "{name}" [{id}]: mmap disabled' . format ( name = self . _name , id = self . _id ) )
self . _mmap = mmap
|
def id_to_fqname ( self , uuid , type = None ) :
"""Return fq _ name and type for uuid
If ` type ` is provided check that uuid is actually
a resource of type ` type ` . Raise HttpError if it ' s
not the case .
: param uuid : resource uuid
: type uuid : UUIDv4 str
: param type : resource type
: type type : str
: rtype : dict { ' type ' : str , ' fq _ name ' : FQName }
: raises HttpError : uuid not found"""
|
data = { "uuid" : uuid }
result = self . post_json ( self . make_url ( "/id-to-fqname" ) , data )
result [ 'fq_name' ] = FQName ( result [ 'fq_name' ] )
if type is not None and not result [ 'type' ] . replace ( '_' , '-' ) == type :
raise HttpError ( 'uuid %s not found for type %s' % ( uuid , type ) , http_status = 404 )
return result
|
def save ( self , filename , dataset_number = None , force = False , overwrite = True , extension = "h5" , ensure_step_table = None ) :
"""Save the data structure to cellpy - format .
Args :
filename : ( str ) the name you want to give the file
dataset _ number : ( int ) if you have several datasets , chose the one
you want ( probably leave this untouched )
force : ( bool ) save a file even if the summary is not made yet
( not recommended )
overwrite : ( bool ) save the new version of the file even if old one
exists .
extension : ( str ) filename extension .
ensure _ step _ table : ( bool ) make step - table if missing .
Returns : Nothing at all ."""
|
if ensure_step_table is None :
ensure_step_table = self . ensure_step_table
dataset_number = self . _validate_dataset_number ( dataset_number )
if dataset_number is None :
self . logger . info ( "Saving test failed!" )
self . _report_empty_dataset ( )
return
test = self . get_dataset ( dataset_number )
dfsummary_made = test . dfsummary_made
if not dfsummary_made and not force :
self . logger . info ( "You should not save datasets " "without making a summary first!" )
self . logger . info ( "If you really want to do it, " "use save with force=True" )
return
step_table_made = test . step_table_made
if not step_table_made and not force and not ensure_step_table :
self . logger . info ( "You should not save datasets " "without making a step-table first!" )
self . logger . info ( "If you really want to do it, " "use save with force=True" )
return
if not os . path . splitext ( filename ) [ - 1 ] :
outfile_all = filename + "." + extension
else :
outfile_all = filename
if os . path . isfile ( outfile_all ) :
self . logger . debug ( "Outfile exists" )
if overwrite :
self . logger . debug ( "overwrite = True" )
os . remove ( outfile_all )
else :
self . logger . info ( "save (hdf5): file exist - did not save" , end = ' ' )
self . logger . info ( outfile_all )
return
if ensure_step_table :
self . logger . debug ( "ensure_step_table is on" )
if not test . step_table_made :
self . logger . debug ( "save: creating step table" )
self . make_step_table ( dataset_number = dataset_number )
# This method can probalby be updated using pandas transpose trick
self . logger . debug ( "trying to make infotable" )
infotbl , fidtbl = self . _create_infotable ( dataset_number = dataset_number )
root = prms . _cellpyfile_root
self . logger . debug ( "trying to save to hdf5" )
txt = "\nHDF5 file: %s" % outfile_all
self . logger . debug ( txt )
warnings . simplefilter ( "ignore" , PerformanceWarning )
try :
store = pd . HDFStore ( outfile_all , complib = prms . _cellpyfile_complib , complevel = prms . _cellpyfile_complevel , )
self . logger . debug ( "trying to put dfdata" )
self . logger . debug ( " - lets set Data_Point as index" )
hdr_data_point = self . headers_normal . data_point_txt
test . dfdata = test . dfdata . set_index ( hdr_data_point , drop = False )
store . put ( root + "/dfdata" , test . dfdata , format = prms . _cellpyfile_dfdata_format )
self . logger . debug ( " dfdata -> hdf5 OK" )
self . logger . debug ( "trying to put dfsummary" )
store . put ( root + "/dfsummary" , test . dfsummary , format = prms . _cellpyfile_dfsummary_format )
self . logger . debug ( " dfsummary -> hdf5 OK" )
self . logger . debug ( "trying to put infotbl" )
store . put ( root + "/info" , infotbl , format = prms . _cellpyfile_infotable_format )
self . logger . debug ( " infotable -> hdf5 OK" )
self . logger . debug ( "trying to put fidtable" )
store . put ( root + "/fidtable" , fidtbl , format = prms . _cellpyfile_fidtable_format )
self . logger . debug ( " fidtable -> hdf5 OK" )
self . logger . debug ( "trying to put step_table" )
try :
store . put ( root + "/step_table" , test . step_table , format = prms . _cellpyfile_stepdata_format )
self . logger . debug ( " step_table -> hdf5 OK" )
except TypeError :
test = self . _fix_dtype_step_table ( test )
store . put ( root + "/step_table" , test . step_table , format = prms . _cellpyfile_stepdata_format )
self . logger . debug ( " fixed step_table -> hdf5 OK" )
# creating indexes
# hdr _ data _ point = self . headers _ normal . data _ point _ txt
# hdr _ cycle _ steptable = self . headers _ step _ table . cycle
# hdr _ cycle _ normal = self . headers _ normal . cycle _ index _ txt
# store . create _ table _ index ( root + " / dfdata " , columns = [ hdr _ data _ point ] ,
# optlevel = 9 , kind = ' full ' )
finally :
store . close ( )
self . logger . debug ( " all -> hdf5 OK" )
warnings . simplefilter ( "default" , PerformanceWarning )
|
def terminate ( self ) :
"""Terminate all the : attr : ` initialized _ providers ` ."""
|
logger . debug ( 'Terminating initialized providers' )
for name in list ( self . initialized_providers ) :
del self [ name ]
|
def update_model_snapshot ( self , job_id , snapshot_id , body , params = None ) :
"""` < http : / / www . elastic . co / guide / en / elasticsearch / reference / current / ml - update - snapshot . html > ` _
: arg job _ id : The ID of the job to fetch
: arg snapshot _ id : The ID of the snapshot to update
: arg body : The model snapshot properties to update"""
|
for param in ( job_id , snapshot_id , body ) :
if param in SKIP_IN_PATH :
raise ValueError ( "Empty value passed for a required argument." )
return self . transport . perform_request ( "POST" , _make_path ( "_ml" , "anomaly_detectors" , job_id , "model_snapshots" , snapshot_id , "_update" , ) , params = params , body = body , )
|
def on_install ( self , editor ) :
"""Extends : meth : ` pyqode . core . api . Mode . on _ install ` method to set the
editor instance as the parent widget .
. . warning : : Don ' t forget to call * * super * * if you override this
method !
: param editor : editor instance
: type editor : pyqode . core . api . CodeEdit"""
|
Mode . on_install ( self , editor )
self . setParent ( editor )
self . setPalette ( QtWidgets . QApplication . instance ( ) . palette ( ) )
self . setFont ( QtWidgets . QApplication . instance ( ) . font ( ) )
self . editor . panels . refresh ( )
self . _background_brush = QtGui . QBrush ( QtGui . QColor ( self . palette ( ) . window ( ) . color ( ) ) )
self . _foreground_pen = QtGui . QPen ( QtGui . QColor ( self . palette ( ) . windowText ( ) . color ( ) ) )
|
def reward ( self ) :
"""Returns a tuple of sum of raw and processed rewards ."""
|
raw_rewards , processed_rewards = 0 , 0
for ts in self . time_steps : # NOTE : raw _ reward and processed _ reward are None for the first time - step .
if ts . raw_reward is not None :
raw_rewards += ts . raw_reward
if ts . processed_reward is not None :
processed_rewards += ts . processed_reward
return raw_rewards , processed_rewards
|
def match ( self , item ) :
"""Return ` ` True ` ` if the expected matchers are matched in any order ,
otherwise ` ` False ` ` ."""
|
if not self . _unused_matchers :
raise RuntimeError ( 'Matcher exhausted, no more matchers to use' )
for matcher in self . _unused_matchers :
if matcher ( item ) :
self . _used_matchers . append ( matcher )
break
if not self . _unused_matchers : # All patterns have been matched
return True
return False
|
def loadSharedResource ( self , pchResourceName , pchBuffer , unBufferLen ) :
"""Loads the specified resource into the provided buffer if large enough .
Returns the size in bytes of the buffer required to hold the specified resource ."""
|
fn = self . function_table . loadSharedResource
result = fn ( pchResourceName , pchBuffer , unBufferLen )
return result
|
def control ( self , key ) :
"""Send a control command ."""
|
if not self . connection :
raise exceptions . ConnectionClosed ( )
payload = b"\x00\x00\x00" + self . _serialize_string ( key )
packet = b"\x00\x00\x00" + self . _serialize_string ( payload , True )
logging . info ( "Sending control command: %s" , key )
self . connection . send ( packet )
self . _read_response ( )
time . sleep ( self . _key_interval )
|
def make_optimal_phenotype_grid ( environment , phenotypes ) :
"""Takes an EnvironmentFile object and a 2d array of phenotypes and returns
a 2d array in which each location contains an index representing the
distance between the phenotype in that location and the optimal phenotype
for that location .
This is acheived by using the task list in the EnvironmentFile to convert
the phenotypes to sets of tasks , and comparing them to the sets of
resources in the environment . So if the environment file that you created
the EnvironmentFile object from for some reason doesn ' t contain all of the
tasks , or doesn ' t contain them in the right order this won ' t work . If this
is the environment file that you used for the run of Avida that generated
this data , you should be fine ."""
|
world_size = environment . size
phenotypes = deepcopy ( phenotypes )
for i in range ( world_size [ 1 ] ) :
for j in range ( world_size [ 0 ] ) :
for k in range ( len ( phenotypes [ i ] [ j ] ) ) :
phenotype = phenotype_to_res_set ( phenotypes [ i ] [ j ] [ k ] , environment . tasks )
diff = len ( environment [ i ] [ j ] . symmetric_difference ( phenotype ) )
phenotypes [ i ] [ j ] [ k ] = diff
return phenotypes
|
def masked ( a , b ) :
"""Return a numpy array with values from a where elements in b are
not False . Populate with numpy . nan where b is False . When plotting ,
those elements look like missing , which can be a desired result ."""
|
if np . any ( [ a . dtype . kind . startswith ( c ) for c in [ 'i' , 'u' , 'f' , 'c' ] ] ) :
n = np . array ( [ np . nan for i in range ( len ( a ) ) ] )
else :
n = np . array ( [ None for i in range ( len ( a ) ) ] )
# a = a . astype ( object )
return np . where ( b , a , n )
|
def subscribe_to_address_webhook ( callback_url , subscription_address , event = 'tx-confirmation' , confirmations = 0 , confidence = 0.00 , coin_symbol = 'btc' , api_key = None ) :
'''Subscribe to transaction webhooks on a given address .
Webhooks for transaction broadcast and each confirmation ( up to 6 ) .
Returns the blockcypher ID of the subscription'''
|
assert is_valid_coin_symbol ( coin_symbol )
assert is_valid_address_for_coinsymbol ( subscription_address , coin_symbol )
assert api_key , 'api_key required'
url = make_url ( coin_symbol , 'hooks' )
params = { 'token' : api_key }
data = { 'event' : event , 'url' : callback_url , 'address' : subscription_address , }
if event == 'tx-confirmation' and confirmations :
data [ 'confirmations' ] = confirmations
elif event == 'tx-confidence' and confidence :
data [ 'confidence' ] = confidence
r = requests . post ( url , json = data , params = params , verify = True , timeout = TIMEOUT_IN_SECONDS )
response_dict = get_valid_json ( r )
return response_dict [ 'id' ]
|
def load ( self , dtype_conversion = None ) :
"""Load the data table and corresponding validation schema .
Parameters
dtype _ conversion : dict
Column names as keys and corresponding type for loading the data .
Please take a look at the ` pandas documentation
< https : / / pandas . pydata . org / pandas - docs / stable / io . html # specifying - column - data - types > ` _ _
for detailed explanations ."""
|
if dtype_conversion is None :
dtype_conversion = { "growth" : str }
super ( GrowthExperiment , self ) . load ( dtype_conversion = dtype_conversion )
self . data [ "growth" ] = self . data [ "growth" ] . isin ( self . TRUTHY )
|
def transact ( self , contract_method : ContractFunction , ) :
"""A wrapper around to _ be _ called . transact ( ) that waits until the transaction succeeds ."""
|
txhash = contract_method . transact ( self . transaction )
LOG . debug ( f'Sending txHash={encode_hex(txhash)}' )
( receipt , _ ) = check_successful_tx ( web3 = self . web3 , txid = txhash , timeout = self . wait , )
return receipt
|
def removeChildren ( self , child_ids ) :
"""Remove children from current workitem
: param child _ ids : a : class : ` list ` contains the children
workitem id / number ( integer or equivalent string )"""
|
if not hasattr ( child_ids , "__iter__" ) :
error_msg = "Input parameter 'child_ids' is not iterable"
self . log . error ( error_msg )
raise exception . BadValue ( error_msg )
self . log . debug ( "Try to remove children <Workitem %s> from current " "<Workitem %s>" , child_ids , self )
self . _removeChildren ( child_ids )
self . log . info ( "Successfully remove children <Workitem %s> from " "current <Workitem %s>" , child_ids , self )
|
def _get_event_source_status ( awsclient , evt_source , lambda_arn ) :
"""Given an event _ source dictionary , create the object and get the event source status ."""
|
event_source_obj = _get_event_source_obj ( awsclient , evt_source )
return event_source_obj . status ( lambda_arn )
|
def insert ( self , crc , toc ) :
"""Save a new cache to file"""
|
if self . _rw_cache :
try :
filename = '%s/%08X.json' % ( self . _rw_cache , crc )
cache = open ( filename , 'w' )
cache . write ( json . dumps ( toc , indent = 2 , default = self . _encoder ) )
cache . close ( )
logger . info ( 'Saved cache to [%s]' , filename )
self . _cache_files += [ filename ]
except Exception as exp :
logger . warning ( 'Could not save cache to file [%s]: %s' , filename , str ( exp ) )
else :
logger . warning ( 'Could not save cache, no writable directory' )
|
def reference ( self , referencing_path = None ) :
"""How to reference this address in a BUILD file .
: API : public"""
|
if referencing_path is not None and self . _spec_path == referencing_path :
return self . relative_spec
elif os . path . basename ( self . _spec_path ) != self . _target_name :
return self . spec
else :
return self . _spec_path
|
def parse_command_line ( ) :
"""Parses the command line and returns a ` ` Namespace ` ` object
containing options and their values .
: return :
A ` ` Namespace ` ` object containing options and their values ."""
|
import argparse
parser = argparse . ArgumentParser ( description = __doc__ . split ( "\n" ) [ 0 ] )
parser . add_argument ( '-v' , '--version' , action = 'version' , version = '%(prog)s ' + __version__ , help = "Show version number and exit." )
parser . add_argument ( 'input_filename' , metavar = 'INPUT_FILE' , type = str , help = 'Path of the input file to be preprocessed' )
parser . add_argument ( '-q' , '--quiet' , dest = 'should_be_quiet' , action = 'store_true' , default = False , help = "Disables verbose logging" )
parser . add_argument ( '-L' , '--log-level' , '--logging-level' , dest = 'logging_level' , choices = [ 'DEBUG' , 'INFO' , 'WARNING' , 'ERROR' , 'CRITICAL' , 'NONE' , ] , default = 'INFO' , help = "Logging level." )
parser . add_argument ( '-o' , '--output' , metavar = "OUTPUT_FILE" , dest = 'output_filename' , default = None , help = 'Output file name (default STDOUT)' )
parser . add_argument ( '-f' , '--force' , dest = 'should_force_overwrite' , action = 'store_true' , default = False , help = 'Force overwrite existing output file.' )
parser . add_argument ( '-D' , '--define' , metavar = "EXPR" , dest = 'definitions' , action = 'append' , help = """\
Define a variable for preprocessing. <define>
can simply be a variable name (in which case it
will be true) or it can be of the form
<var>=<val>. An attempt will be made to convert
<val> to an integer so -D 'FOO=0' will create a
false value.""" )
parser . add_argument ( '-I' , '--include' , metavar = "DIR_PATH" , dest = 'include_paths' , action = 'append' , default = [ '.' ] , help = 'Add a directory to the include path for #include directives.' )
parser . add_argument ( '-k' , '--keep-lines' , dest = 'should_keep_lines' , action = 'store_true' , default = False , help = '''\
Emit empty lines for preprocessor statement
lines and skipped output lines. This allows line
numbers to stay constant.''' )
parser . add_argument ( '-s' , '--substitute' , dest = 'should_substitute' , action = 'store_true' , default = False , help = '''\
Substitute #defines into emitted lines.
(Disabled by default to avoid polluting strings)''' )
parser . add_argument ( '--default-content-type' , metavar = "CONTENT_TYPE" , dest = 'default_content_type' , default = None , help = 'If the content type of the file cannot be determined this will be used. (Default: an error is raised)' )
parser . add_argument ( '-c' , '--content-types-path' , '--content-types-config' , metavar = "PATH" , dest = 'content_types_config_files' , default = [ ] , action = 'append' , help = """\
Specify a path to a content.types file to assist
with file type determination. Use the -p or -P flags
to display content types as read by pepe.""" )
parser . add_argument ( '-p' , '--print-content-types' , dest = 'should_print_content_types' , action = 'store_true' , default = False , help = 'Display content types and exit.' )
parser . add_argument ( '-P' , '--print-content-types-config' , dest = 'should_print_content_types_config' , action = 'store_true' , default = False , help = 'Display content types configuration and exit.' )
return parser . parse_args ( )
|
def lub ( self , other ) :
"""Return the least upper bound for given intervals .
: param other : AbstractInterval instance"""
|
return self . __class__ ( [ max ( self . lower , other . lower ) , max ( self . upper , other . upper ) , ] , lower_inc = self . lower_inc if self < other else other . lower_inc , upper_inc = self . upper_inc if self > other else other . upper_inc , )
|
def create_dir ( self , directory_path , perm_bits = PERM_DEF ) :
"""Create ` directory _ path ` , and all the parent directories .
Helper method to set up your test faster .
Args :
directory _ path : The full directory path to create .
perm _ bits : The permission bits as set by ` chmod ` .
Returns :
The newly created FakeDirectory object .
Raises :
OSError : if the directory already exists ."""
|
directory_path = self . make_string_path ( directory_path )
directory_path = self . absnormpath ( directory_path )
self . _auto_mount_drive_if_needed ( directory_path )
if self . exists ( directory_path , check_link = True ) :
self . raise_os_error ( errno . EEXIST , directory_path )
path_components = self . _path_components ( directory_path )
current_dir = self . root
new_dirs = [ ]
for component in path_components :
directory = self . _directory_content ( current_dir , component ) [ 1 ]
if not directory :
new_dir = FakeDirectory ( component , filesystem = self )
new_dirs . append ( new_dir )
current_dir . add_entry ( new_dir )
current_dir = new_dir
else :
if S_ISLNK ( directory . st_mode ) :
directory = self . resolve ( directory . contents )
current_dir = directory
if directory . st_mode & S_IFDIR != S_IFDIR :
self . raise_os_error ( errno . ENOTDIR , current_dir . path )
# set the permission after creating the directories
# to allow directory creation inside a read - only directory
for new_dir in new_dirs :
new_dir . st_mode = S_IFDIR | perm_bits
self . _last_ino += 1
current_dir . st_ino = self . _last_ino
return current_dir
|
def roundness ( self , value ) :
"""Set the roundness of the vowel .
: param str value : the value to be set"""
|
if ( value is not None ) and ( not value in DG_V_ROUNDNESS ) :
raise ValueError ( "Unrecognized value for roundness: '%s'" % value )
self . __roundness = value
|
def parsewarn ( self , msg , line = None ) :
"""Emit parse warning ."""
|
if line is None :
line = self . sline
self . dowarn ( 'warning: ' + msg + ' on line {}' . format ( line ) )
|
def media ( self ) :
"""Access the media
: returns : twilio . rest . fax . v1 . fax . fax _ media . FaxMediaList
: rtype : twilio . rest . fax . v1 . fax . fax _ media . FaxMediaList"""
|
if self . _media is None :
self . _media = FaxMediaList ( self . _version , fax_sid = self . _solution [ 'sid' ] , )
return self . _media
|
def create_color_stops ( breaks , colors = 'RdYlGn' , color_ramps = color_ramps ) :
"""Convert a list of breaks into color stops using colors from colorBrewer
or a custom list of color values in RGB , RGBA , HSL , CSS text , or HEX format .
See www . colorbrewer2 . org for a list of color options to pass"""
|
num_breaks = len ( breaks )
stops = [ ]
if isinstance ( colors , list ) : # Check if colors contain a list of color values
if len ( colors ) == 0 or len ( colors ) != num_breaks :
raise ValueError ( 'custom color list must be of same length as breaks list' )
for color in colors : # Check if color is valid string
try :
Colour ( color )
except :
raise ValueError ( 'The color code {color} is in the wrong format' . format ( color = color ) )
for i , b in enumerate ( breaks ) :
stops . append ( [ b , colors [ i ] ] )
else :
if colors not in color_ramps . keys ( ) :
raise ValueError ( 'color does not exist in colorBrewer!' )
else :
try :
ramp = color_ramps [ colors ] [ num_breaks ]
except KeyError :
raise ValueError ( "Color ramp {} does not have a {} breaks" . format ( colors , num_breaks ) )
for i , b in enumerate ( breaks ) :
stops . append ( [ b , ramp [ i ] ] )
return stops
|
def wait_for_event ( self , event , timeout = 10 ) :
"""Block waiting for the given event . Returns the event params .
: param event : The event to handle .
: return : The event params .
: param timeout : The maximum time to wait before raising : exc : ` . TimeoutError ` ."""
|
return self . __handler . wait_for_event ( event , timeout = timeout )
|
def _compile_mapping ( self , schema , invalid_msg = None ) :
"""Create validator for given mapping ."""
|
invalid_msg = invalid_msg or 'mapping value'
# Keys that may be required
all_required_keys = set ( key for key in schema if key is not Extra and ( ( self . required and not isinstance ( key , ( Optional , Remove ) ) ) or isinstance ( key , Required ) ) )
# Keys that may have defaults
all_default_keys = set ( key for key in schema if isinstance ( key , Required ) or isinstance ( key , Optional ) )
_compiled_schema = { }
for skey , svalue in iteritems ( schema ) :
new_key = self . _compile ( skey )
new_value = self . _compile ( svalue )
_compiled_schema [ skey ] = ( new_key , new_value )
candidates = list ( _iterate_mapping_candidates ( _compiled_schema ) )
# After we have the list of candidates in the correct order , we want to apply some optimization so that each
# key in the data being validated will be matched against the relevant schema keys only .
# No point in matching against different keys
additional_candidates = [ ]
candidates_by_key = { }
for skey , ( ckey , cvalue ) in candidates :
if type ( skey ) in primitive_types :
candidates_by_key . setdefault ( skey , [ ] ) . append ( ( skey , ( ckey , cvalue ) ) )
elif isinstance ( skey , Marker ) and type ( skey . schema ) in primitive_types :
candidates_by_key . setdefault ( skey . schema , [ ] ) . append ( ( skey , ( ckey , cvalue ) ) )
else : # These are wildcards such as ' int ' , ' str ' , ' Remove ' and others which should be applied to all keys
additional_candidates . append ( ( skey , ( ckey , cvalue ) ) )
def validate_mapping ( path , iterable , out ) :
required_keys = all_required_keys . copy ( )
# Build a map of all provided key - value pairs .
# The type ( out ) is used to retain ordering in case a ordered
# map type is provided as input .
key_value_map = type ( out ) ( )
for key , value in iterable :
key_value_map [ key ] = value
# Insert default values for non - existing keys .
for key in all_default_keys :
if not isinstance ( key . default , Undefined ) and key . schema not in key_value_map : # A default value has been specified for this missing
# key , insert it .
key_value_map [ key . schema ] = key . default ( )
error = None
errors = [ ]
for key , value in key_value_map . items ( ) :
key_path = path + [ key ]
remove_key = False
# Optimization . Validate against the matching key first , then fallback to the rest
relevant_candidates = itertools . chain ( candidates_by_key . get ( key , [ ] ) , additional_candidates )
# compare each given key / value against all compiled key / values
# schema key , ( compiled key , compiled value )
for skey , ( ckey , cvalue ) in relevant_candidates :
try :
new_key = ckey ( key_path , key )
except er . Invalid as e :
if len ( e . path ) > len ( key_path ) :
raise
if not error or len ( e . path ) > len ( error . path ) :
error = e
continue
# Backtracking is not performed once a key is selected , so if
# the value is invalid we immediately throw an exception .
exception_errors = [ ]
# check if the key is marked for removal
is_remove = new_key is Remove
try :
cval = cvalue ( key_path , value )
# include if it ' s not marked for removal
if not is_remove :
out [ new_key ] = cval
else :
remove_key = True
continue
except er . MultipleInvalid as e :
exception_errors . extend ( e . errors )
except er . Invalid as e :
exception_errors . append ( e )
if exception_errors :
if is_remove or remove_key :
continue
for err in exception_errors :
if len ( err . path ) <= len ( key_path ) :
err . error_type = invalid_msg
errors . append ( err )
# If there is a validation error for a required
# key , this means that the key was provided .
# Discard the required key so it does not
# create an additional , noisy exception .
required_keys . discard ( skey )
break
# Key and value okay , mark as found in case it was
# a Required ( ) field .
required_keys . discard ( skey )
break
else :
if remove_key : # remove key
continue
elif self . extra == ALLOW_EXTRA :
out [ key ] = value
elif self . extra != REMOVE_EXTRA :
errors . append ( er . Invalid ( 'extra keys not allowed' , key_path ) )
# else REMOVE _ EXTRA : ignore the key so it ' s removed from output
# for any required keys left that weren ' t found and don ' t have defaults :
for key in required_keys :
msg = key . msg if hasattr ( key , 'msg' ) and key . msg else 'required key not provided'
errors . append ( er . RequiredFieldInvalid ( msg , path + [ key ] ) )
if errors :
raise er . MultipleInvalid ( errors )
return out
return validate_mapping
|
def functions ( self ) :
"""Returns a generator of all standalone functions in the file , in textual
order .
> > > file = FileDoc ( ' module . js ' , read _ file ( ' examples / module . js ' ) )
> > > list ( file . functions ) [ 0 ] . name
' the _ first _ function '
> > > list ( file . functions ) [ 3 ] . name
' not _ auto _ discovered '"""
|
def is_function ( comment ) :
return isinstance ( comment , FunctionDoc ) and not comment . member
return self . _filtered_iter ( is_function )
|
def file_ns_handler ( importer , path_item , packageName , module ) :
"""Compute an ns - package subpath for a filesystem or zipfile importer"""
|
subpath = os . path . join ( path_item , packageName . split ( '.' ) [ - 1 ] )
normalized = _normalize_cached ( subpath )
for item in module . __path__ :
if _normalize_cached ( item ) == normalized :
break
else : # Only return the path if it ' s not already there
return subpath
|
def combine_related ( self , return_toplevel = True , poolsize = None , size_cutoff = 100 ) :
"""Connect related statements based on their refinement relationships .
This function takes as a starting point the unique statements ( with
duplicates removed ) and returns a modified flat list of statements
containing only those statements which do not represent a refinement of
other existing statements . In other words , the more general versions of
a given statement do not appear at the top level , but instead are
listed in the ` supports ` field of the top - level statements .
If : py : attr : ` unique _ stmts ` has not been initialized with the
de - duplicated statements , : py : meth : ` combine _ duplicates ` is called
internally .
After this function is called the attribute : py : attr : ` related _ stmts ` is
set as a side - effect .
The procedure for combining statements in this way involves a series
of steps :
1 . The statements are grouped by type ( e . g . , Phosphorylation ) and
each type is iterated over independently .
2 . Statements of the same type are then grouped according to their
Agents ' entity hierarchy component identifiers . For instance ,
ERK , MAPK1 and MAPK3 are all in the same connected component in the
entity hierarchy and therefore all Statements of the same type
referencing these entities will be grouped . This grouping assures
that relations are only possible within Statement groups and
not among groups . For two Statements to be in the same group at
this step , the Statements must be the same type and the Agents at
each position in the Agent lists must either be in the same
hierarchy component , or if they are not in the hierarchy , must have
identical entity _ matches _ keys . Statements with None in one of the
Agent list positions are collected separately at this stage .
3 . Statements with None at either the first or second position are
iterated over . For a statement with a None as the first Agent ,
the second Agent is examined ; then the Statement with None is
added to all Statement groups with a corresponding component or
entity _ matches _ key in the second position . The same procedure is
performed for Statements with None at the second Agent position .
4 . The statements within each group are then compared ; if one
statement represents a refinement of the other ( as defined by the
` refinement _ of ( ) ` method implemented for the Statement ) , then the
more refined statement is added to the ` supports ` field of the more
general statement , and the more general statement is added to the
` supported _ by ` field of the more refined statement .
5 . A new flat list of statements is created that contains only those
statements that have no ` supports ` entries ( statements containing
such entries are not eliminated , because they will be retrievable
from the ` supported _ by ` fields of other statements ) . This list
is returned to the caller .
On multi - core machines , the algorithm can be parallelized by setting
the poolsize argument to the desired number of worker processes .
This feature is only available in Python > 3.4.
. . note : : Subfamily relationships must be consistent across arguments
For now , we require that merges can only occur if the * isa *
relationships are all in the * same direction for all the agents * in
a Statement . For example , the two statement groups : ` RAF _ family - >
MEK1 ` and ` BRAF - > MEK _ family ` would not be merged , since BRAF
* isa * RAF _ family , but MEK _ family is not a MEK1 . In the future this
restriction could be revisited .
Parameters
return _ toplevel : Optional [ bool ]
If True only the top level statements are returned .
If False , all statements are returned . Default : True
poolsize : Optional [ int ]
The number of worker processes to use to parallelize the
comparisons performed by the function . If None ( default ) , no
parallelization is performed . NOTE : Parallelization is only
available on Python 3.4 and above .
size _ cutoff : Optional [ int ]
Groups with size _ cutoff or more statements are sent to worker
processes , while smaller groups are compared in the parent process .
Default value is 100 . Not relevant when parallelization is not
used .
Returns
list of : py : class : ` indra . statement . Statement `
The returned list contains Statements representing the more
concrete / refined versions of the Statements involving particular
entities . The attribute : py : attr : ` related _ stmts ` is also set to
this list . However , if return _ toplevel is False then all
statements are returned , irrespective of level of specificity .
In this case the relationships between statements can
be accessed via the supports / supported _ by attributes .
Examples
A more general statement with no information about a Phosphorylation
site is identified as supporting a more specific statement :
> > > from indra . preassembler . hierarchy _ manager import hierarchies
> > > braf = Agent ( ' BRAF ' )
> > > map2k1 = Agent ( ' MAP2K1 ' )
> > > st1 = Phosphorylation ( braf , map2k1)
> > > st2 = Phosphorylation ( braf , map2k1 , residue = ' S ' )
> > > pa = Preassembler ( hierarchies , [ st1 , st2 ] )
> > > combined _ stmts = pa . combine _ related ( ) # doctest : + ELLIPSIS
> > > combined _ stmts
[ Phosphorylation ( BRAF ( ) , MAP2K1 ( ) , S ) ]
> > > combined _ stmts [ 0 ] . supported _ by
[ Phosphorylation ( BRAF ( ) , MAP2K1 ( ) ) ]
> > > combined _ stmts [ 0 ] . supported _ by [ 0 ] . supports
[ Phosphorylation ( BRAF ( ) , MAP2K1 ( ) , S ) ]"""
|
if self . related_stmts is not None :
if return_toplevel :
return self . related_stmts
else :
assert self . unique_stmts is not None
return self . unique_stmts
# Call combine _ duplicates , which lazily initializes self . unique _ stmts
unique_stmts = self . combine_duplicates ( )
# Generate the index map , linking related statements .
idx_map = self . _generate_id_maps ( unique_stmts , poolsize , size_cutoff )
# Now iterate over all indices and set supports / supported by
for ix1 , ix2 in idx_map :
unique_stmts [ ix1 ] . supported_by . append ( unique_stmts [ ix2 ] )
unique_stmts [ ix2 ] . supports . append ( unique_stmts [ ix1 ] )
# Get the top level statements
self . related_stmts = [ st for st in unique_stmts if not st . supports ]
logger . debug ( '%d top level' % len ( self . related_stmts ) )
if return_toplevel :
return self . related_stmts
else :
return unique_stmts
|
def _preprocess_scan_params ( self , xml_params ) :
"""Processes the scan parameters ."""
|
params = { }
for param in xml_params :
params [ param . tag ] = param . text or ''
# Set default values .
for key in self . scanner_params :
if key not in params :
params [ key ] = self . get_scanner_param_default ( key )
if self . get_scanner_param_type ( key ) == 'selection' :
params [ key ] = params [ key ] . split ( '|' ) [ 0 ]
# Validate values .
for key in params :
param_type = self . get_scanner_param_type ( key )
if not param_type :
continue
if param_type in [ 'integer' , 'boolean' ] :
try :
params [ key ] = int ( params [ key ] )
except ValueError :
raise OSPDError ( 'Invalid %s value' % key , 'start_scan' )
if param_type == 'boolean' :
if params [ key ] not in [ 0 , 1 ] :
raise OSPDError ( 'Invalid %s value' % key , 'start_scan' )
elif param_type == 'selection' :
selection = self . get_scanner_param_default ( key ) . split ( '|' )
if params [ key ] not in selection :
raise OSPDError ( 'Invalid %s value' % key , 'start_scan' )
if self . get_scanner_param_mandatory ( key ) and params [ key ] == '' :
raise OSPDError ( 'Mandatory %s value is missing' % key , 'start_scan' )
return params
|
def _op_generic_pack_StoU_saturation ( self , args , src_size , dst_size ) :
"""Generic pack with unsigned saturation .
Split args in chunks of src _ size signed bits and in pack them into unsigned saturated chunks of dst _ size bits .
Then chunks are concatenated resulting in a BV of len ( args ) * dst _ size / / src _ size * len ( args [ 0 ] ) bits ."""
|
if src_size <= 0 or dst_size <= 0 :
raise SimOperationError ( "Can't pack from or to zero or negative size" % self . name )
result = None
max_value = claripy . BVV ( - 1 , dst_size ) . zero_extend ( src_size - dst_size )
# max value for unsigned saturation
min_value = claripy . BVV ( 0 , src_size )
# min unsigned value always 0
for v in args :
for src_value in v . chop ( src_size ) :
dst_value = self . _op_generic_StoU_saturation ( src_value , min_value , max_value )
dst_value = dst_value . zero_extend ( dst_size - src_size )
if result is None :
result = dst_value
else :
result = self . _op_concat ( ( result , dst_value ) )
return result
|
def _prepare_version ( self ) :
"""Setup the application version"""
|
if config . VERSION not in self . _config :
self . _config [ config . VERSION ] = __version__
|
def cleanup ( self ) :
'''remove sockets on shutdown'''
|
log . debug ( 'ConCache cleaning up' )
if os . path . exists ( self . cache_sock ) :
os . remove ( self . cache_sock )
if os . path . exists ( self . update_sock ) :
os . remove ( self . update_sock )
if os . path . exists ( self . upd_t_sock ) :
os . remove ( self . upd_t_sock )
|
def function ( x , ax , ay ) :
'''general square root function'''
|
with np . errstate ( invalid = 'ignore' ) :
return ay * ( x - ax ) ** 0.5
|
def _dict_from_lines ( lines , key_nums , sep = None ) :
"""Helper function to parse formatted text structured like :
value1 value2 . . . sep key1 , key2 . . .
key _ nums is a list giving the number of keys for each line . 0 if line should be skipped .
sep is a string denoting the character that separates the keys from the value ( None if
no separator is present ) .
Returns :
dict { key1 : value1 , key2 : value2 , . . . }
Raises :
ValueError if parsing fails ."""
|
if is_string ( lines ) :
lines = [ lines ]
if not isinstance ( key_nums , collections . abc . Iterable ) :
key_nums = list ( key_nums )
if len ( lines ) != len ( key_nums ) :
err_msg = "lines = %s\n key_num = %s" % ( str ( lines ) , str ( key_nums ) )
raise ValueError ( err_msg )
kwargs = Namespace ( )
for ( i , nk ) in enumerate ( key_nums ) :
if nk == 0 :
continue
line = lines [ i ]
tokens = [ t . strip ( ) for t in line . split ( ) ]
values , keys = tokens [ : nk ] , "" . join ( tokens [ nk : ] )
# Sanitize keys : In some case we might get strings in the form : foo [ , bar ]
keys . replace ( "[" , "" ) . replace ( "]" , "" )
keys = keys . split ( "," )
if sep is not None :
check = keys [ 0 ] [ 0 ]
if check != sep :
raise ValueError ( "Expecting separator %s, got %s" % ( sep , check ) )
keys [ 0 ] = keys [ 0 ] [ 1 : ]
if len ( values ) != len ( keys ) :
msg = "line: %s\n len(keys) != len(value)\nkeys: %s\n values: %s" % ( line , keys , values )
raise ValueError ( msg )
kwargs . update ( zip ( keys , values ) )
return kwargs
|
def connect_to_endpoints_nowait ( self , * endpoints : ConnectionConfig ) -> None :
"""Connect to the given endpoints as soon as they become available but do not block ."""
|
self . _throw_if_already_connected ( * endpoints )
for endpoint in endpoints :
asyncio . ensure_future ( self . _await_connect_to_endpoint ( endpoint ) )
|
def handle_bail ( self , bail ) :
"""Handle a bail line ."""
|
self . _add_error ( _ ( "Bailed: {reason}" ) . format ( reason = bail . reason ) )
|
def _local_pauli_eig_meas ( op , idx ) :
"""Generate gate sequence to measure in the eigenbasis of a Pauli operator , assuming
we are only able to measure in the Z eigenbasis . ( Note : The unitary operations of this
Program are essentially the Hermitian conjugates of those in : py : func : ` _ one _ q _ pauli _ prep ` )"""
|
if op == 'X' :
return Program ( RY ( - pi / 2 , idx ) )
elif op == 'Y' :
return Program ( RX ( pi / 2 , idx ) )
elif op == 'Z' :
return Program ( )
raise ValueError ( f'Unknown operation {op}' )
|
def format_all ( format_string , env ) :
"""Format the input string using each possible combination of lists
in the provided environment . Returns a list of formated strings ."""
|
prepared_env = parse_pattern ( format_string , env , lambda x , y : [ FormatWrapper ( x , z ) for z in y ] )
# Generate each possible combination , format the string with it and yield
# the resulting string :
for field_values in product ( * prepared_env . itervalues ( ) ) :
format_env = dict ( izip ( prepared_env . iterkeys ( ) , field_values ) )
yield format_string . format ( ** format_env )
|
def parse_user ( raw ) :
"""Parse nick ( ! user ( @ host ) ? ) ? structure ."""
|
nick = raw
user = None
host = None
# Attempt to extract host .
if protocol . HOST_SEPARATOR in raw :
raw , host = raw . split ( protocol . HOST_SEPARATOR )
# Attempt to extract user .
if protocol . USER_SEPARATOR in raw :
nick , user = raw . split ( protocol . USER_SEPARATOR )
return nick , user , host
|
def sniff_iface ( f ) :
"""Ensure decorated function is called with a value for iface .
If no iface provided , inject net iface inferred from unit private address ."""
|
def iface_sniffer ( * args , ** kwargs ) :
if not kwargs . get ( 'iface' , None ) :
kwargs [ 'iface' ] = get_iface_from_addr ( unit_get ( 'private-address' ) )
return f ( * args , ** kwargs )
return iface_sniffer
|
def request_param_update ( self , complete_name ) :
"""Request an update of the value for the supplied parameter ."""
|
self . param_updater . request_param_update ( self . toc . get_element_id ( complete_name ) )
|
def legislators ( request , abbr ) :
'''Context :
- metadata
- chamber
- chamber _ title
- chamber _ select _ template
- chamber _ select _ collection
- chamber _ select _ chambers
- show _ chamber _ column
- abbr
- legislators
- sort _ order
- sort _ key
- legislator _ table
- nav _ active
Templates :
- billy / web / public / legislators . html
- billy / web / public / chamber _ select _ form . html
- billy / web / public / legislator _ table . html'''
|
try :
meta = Metadata . get_object ( abbr )
except DoesNotExist :
raise Http404
spec = { 'active' : True , 'district' : { '$exists' : True } }
chambers = dict ( ( k , v [ 'name' ] ) for k , v in meta [ 'chambers' ] . items ( ) )
chamber = request . GET . get ( 'chamber' , 'both' )
if chamber in chambers :
spec [ 'chamber' ] = chamber
chamber_title = meta [ 'chambers' ] [ chamber ] [ 'title' ] + 's'
else :
chamber = 'both'
chamber_title = 'Legislators'
fields = mongo_fields ( 'leg_id' , 'full_name' , 'photo_url' , 'district' , 'party' , 'first_name' , 'last_name' , 'chamber' , billy_settings . LEVEL_FIELD , 'last_name' )
sort_key = 'district'
sort_order = 1
if request . GET :
sort_key = request . GET . get ( 'key' , sort_key )
sort_order = int ( request . GET . get ( 'order' , sort_order ) )
legislators = meta . legislators ( extra_spec = spec , fields = fields )
def sort_by_district ( obj ) :
matchobj = re . search ( r'\d+' , obj . get ( 'district' , '' ) or '' )
if matchobj :
return int ( matchobj . group ( ) )
else :
return obj . get ( 'district' , '' )
legislators = sorted ( legislators , key = sort_by_district )
if sort_key != 'district' :
legislators = sorted ( legislators , key = operator . itemgetter ( sort_key ) , reverse = ( sort_order == - 1 ) )
else :
legislators = sorted ( legislators , key = sort_by_district , reverse = bool ( 0 > sort_order ) )
sort_order = { 1 : - 1 , - 1 : 1 } [ sort_order ]
legislators = list ( legislators )
return TemplateResponse ( request , templatename ( 'legislators' ) , dict ( metadata = meta , chamber = chamber , chamber_title = chamber_title , chamber_select_template = templatename ( 'chamber_select_form' ) , chamber_select_collection = 'legislators' , chamber_select_chambers = chambers , show_chamber_column = True , abbr = abbr , legislators = legislators , sort_order = sort_order , sort_key = sort_key , legislator_table = templatename ( 'legislator_table' ) , nav_active = 'legislators' ) )
|
def request ( self , location , fragment_enc = False ) :
"""Given a URL this method will add a fragment , a query part or extend
a query part if it already exists with the information in this instance .
: param location : A URL
: param fragment _ enc : Whether the information should be placed in a
fragment ( True ) or in a query part ( False )
: return : The extended URL"""
|
_l = as_unicode ( location )
_qp = as_unicode ( self . to_urlencoded ( ) )
if fragment_enc :
return "%s#%s" % ( _l , _qp )
else :
if "?" in location :
return "%s&%s" % ( _l , _qp )
else :
return "%s?%s" % ( _l , _qp )
|
def redirect ( to , * args , ** kwargs ) :
"""Similar to the Django ` ` redirect ` ` shortcut but with altered
functionality . If an optional ` ` params ` ` argument is provided , the
dictionary items will be injected as query parameters on the
redirection URL ."""
|
params = kwargs . pop ( 'params' , { } )
try :
to = reverse ( to , args = args , kwargs = kwargs )
except NoReverseMatch :
if '/' not in to and '.' not in to :
to = reverse ( 'cas_login' )
elif not service_allowed ( to ) :
raise PermissionDenied ( )
if params :
to = add_query_params ( to , params )
logger . debug ( "Redirecting to %s" % to )
return HttpResponseRedirect ( to )
|
def _poll_for_refresh ( self , check_id ) :
"""Given a Trusted Advisor check _ id that has just been refreshed , poll
until the refresh is complete . Once complete , return the check result .
: param check _ id : the Trusted Advisor check ID
: type check _ id : str
: returns : dict check result . The return value of
: py : meth : ` Support . Client . describe _ trusted _ advisor _ check _ result `
: rtype : dict"""
|
logger . warning ( 'Polling for TA check %s refresh...' , check_id )
if self . refresh_timeout is None : # no timeout . . .
cutoff = datetime_now ( ) + timedelta ( days = 365 )
else :
cutoff = datetime_now ( ) + timedelta ( seconds = self . refresh_timeout )
last_status = None
while datetime_now ( ) <= cutoff :
logger . debug ( 'Checking refresh status' )
status = self . conn . describe_trusted_advisor_check_refresh_statuses ( checkIds = [ check_id ] ) [ 'statuses' ] [ 0 ] [ 'status' ]
if status in [ 'success' , 'abandoned' ] :
logger . info ( 'Refresh status: %s; done polling' , status )
break
if status == 'none' and last_status not in [ 'none' , None ] :
logger . warning ( 'Trusted Advisor check refresh status went ' 'from "%s" to "%s"; refresh is either complete ' 'or timed out on AWS side. Continuing' , last_status , status )
break
last_status = status
logger . info ( 'Refresh status: %s; sleeping 30s' , status )
sleep ( 30 )
else :
logger . error ( 'Timed out waiting for TA Check refresh; status=%s' , status )
logger . info ( 'Done polling for check refresh' )
result , last_dt = self . _get_check_result ( check_id )
logger . debug ( 'Check shows last refresh time of: %s' , last_dt )
return result
|
def compute_grouped_metric ( ungrouped_metric , group_matrix ) :
'''Computes the mean value for the groups of parameter values in the
argument ungrouped _ metric'''
|
group_matrix = np . array ( group_matrix , dtype = np . bool )
mu_star_masked = np . ma . masked_array ( ungrouped_metric * group_matrix . T , mask = ( group_matrix ^ 1 ) . T )
mean_of_mu_star = np . ma . mean ( mu_star_masked , axis = 1 )
return mean_of_mu_star
|
def _explode_raster ( raster , band_names = [ ] ) : # type : ( _ Raster , Iterable [ str ] ) - > List [ _ Raster ]
"""Splits a raster into multiband rasters ."""
|
# Using band _ names = [ ] does no harm because we are not mutating it in place
# and it makes MyPy happy
if not band_names :
band_names = raster . band_names
else :
band_names = list ( IndexedSet ( raster . band_names ) . intersection ( band_names ) )
return [ _Raster ( image = raster . bands_data ( [ band_name ] ) , band_names = [ band_name ] ) for band_name in band_names ]
|
def daily_pr_intensity ( pr , thresh = '1 mm/day' , freq = 'YS' ) :
r"""Average daily precipitation intensity
Return the average precipitation over wet days .
Parameters
pr : xarray . DataArray
Daily precipitation [ mm / d or kg / m2 / s ]
thresh : str
precipitation value over which a day is considered wet . Default : ' 1 mm / day '
freq : str , optional
Resampling frequency defining the periods
defined in http : / / pandas . pydata . org / pandas - docs / stable / timeseries . html # resampling . Default : ' 1 mm / day '
Returns
xarray . DataArray
The average precipitation over wet days for each period
Notes
Let : math : ` \ mathbf { p } = p _ 0 , p _ 1 , \ ldots , p _ n ` be the daily precipitation and : math : ` thresh ` be the precipitation
threshold defining wet days . Then the daily precipitation intensity is defined as
. . math : :
\ frac { \ sum _ { i = 0 } ^ n p _ i [ p _ i \ leq thresh ] } { \ sum _ { i = 0 } ^ n [ p _ i \ leq thresh ] }
where : math : ` [ P ] ` is 1 if : math : ` P ` is true , and 0 if false .
Examples
The following would compute for each grid cell of file ` pr . day . nc ` the average
precipitation fallen over days with precipitation > = 5 mm at seasonal
frequency , ie DJF , MAM , JJA , SON , DJF , etc . :
> > > pr = xr . open _ dataset ( ' pr . day . nc ' )
> > > daily _ int = daily _ pr _ intensity ( pr , thresh = ' 5 mm / day ' , freq = " QS - DEC " )"""
|
t = utils . convert_units_to ( thresh , pr , 'hydro' )
# put pr = 0 for non wet - days
pr_wd = xr . where ( pr >= t , pr , 0 )
pr_wd . attrs [ 'units' ] = pr . units
# sum over wanted period
s = pr_wd . resample ( time = freq ) . sum ( dim = 'time' , keep_attrs = True )
sd = utils . pint_multiply ( s , 1 * units . day , 'mm' )
# get number of wetdays over period
wd = wetdays ( pr , thresh = thresh , freq = freq )
return sd / wd
|
def checkSimbad ( g , target , maxobj = 5 , timeout = 5 ) :
"""Sends off a request to Simbad to check whether a target is recognised .
Returns with a list of results , or raises an exception if it times out"""
|
url = 'http://simbad.u-strasbg.fr/simbad/sim-script'
q = 'set limit ' + str ( maxobj ) + '\nformat object form1 "Target: %IDLIST(1) | %COO(A D;ICRS)"\nquery ' + target
query = urllib . parse . urlencode ( { 'submit' : 'submit script' , 'script' : q } )
resp = urllib . request . urlopen ( url , query . encode ( ) , timeout )
data = False
error = False
results = [ ]
for line in resp :
line = line . decode ( )
if line . startswith ( '::data::' ) :
data = True
if line . startswith ( '::error::' ) :
error = True
if data and line . startswith ( 'Target:' ) :
name , coords = line [ 7 : ] . split ( ' | ' )
results . append ( { 'Name' : name . strip ( ) , 'Position' : coords . strip ( ) , 'Frame' : 'ICRS' } )
resp . close ( )
if error and len ( results ) :
g . clog . warn ( 'drivers.check: Simbad: there appear to be some ' + 'results but an error was unexpectedly raised.' )
return results
|
def run ( self ) :
"""Calls the main function of a plugin and mutates the output dict
with its return value . Provides an easy way to change the output
whilst not needing to constantly poll a queue in another thread and
allowing plugin ' s to manage their own intervals ."""
|
self . running = True
while self . running :
ret = self . func ( )
self . output_dict [ ret [ 'name' ] ] = ret
time . sleep ( self . interval )
return
|
def writeline ( self , data , crlf = "\n" ) : # pylint : disable = arguments - differ
"""Write data to process .
: param data : data to write
: param crlf : line end character
: return : Nothing"""
|
GenericProcess . writeline ( self , data , crlf = crlf )
|
def proxy_global ( name , no_expand_macro = False , fname = 'func' , args = ( ) ) :
"""Used to automatically asrootpy ROOT ' s thread local variables"""
|
if no_expand_macro : # pragma : no cover
# handle older ROOT versions without _ ExpandMacroFunction wrapping
@ property
def gSomething_no_func ( self ) :
glob = self ( getattr ( ROOT , name ) )
# create a fake func ( ) that just returns self
def func ( ) :
return glob
glob . func = func
return glob
return gSomething_no_func
@ property
def gSomething ( self ) :
obj_func = getattr ( getattr ( ROOT , name ) , fname )
try :
obj = obj_func ( * args )
except ReferenceError : # null pointer
return None
# asrootpy
return self ( obj )
return gSomething
|
def get_item ( self , table_name , key , attributes_to_get = None , consistent_read = False , object_hook = None ) :
"""Return a set of attributes for an item that matches
the supplied key .
: type table _ name : str
: param table _ name : The name of the table containing the item .
: type key : dict
: param key : A Python version of the Key data structure
defined by DynamoDB .
: type attributes _ to _ get : list
: param attributes _ to _ get : A list of attribute names .
If supplied , only the specified attribute names will
be returned . Otherwise , all attributes will be returned .
: type consistent _ read : bool
: param consistent _ read : If True , a consistent read
request is issued . Otherwise , an eventually consistent
request is issued ."""
|
data = { 'TableName' : table_name , 'Key' : key }
if attributes_to_get :
data [ 'AttributesToGet' ] = attributes_to_get
if consistent_read :
data [ 'ConsistentRead' ] = True
json_input = json . dumps ( data )
response = self . make_request ( 'GetItem' , json_input , object_hook = object_hook )
if not response . has_key ( 'Item' ) :
raise dynamodb_exceptions . DynamoDBKeyNotFoundError ( "Key does not exist." )
return response
|
def split_file ( splitNum , fileInput , lines ) :
"""split _ file is used to split fileInput into splitNum small pieces file .
For example , when splitNum is 56 , a 112 lines file will be split into 56 files and each file has 2 lines .
: param splitNum : split into splitNum files
: param fileInput : file to be split
: param lines : lines of fileInput"""
|
quot = lines // splitNum
rema = lines % splitNum
files = [ ]
current_line = 0
for i in range ( splitNum ) :
if i < rema :
read_line = quot + 1
else :
read_line = quot
temp = tempfile . NamedTemporaryFile ( )
os . system ( "head -n%d %s| tail -n%d > %s" % ( current_line + read_line , fileInput , read_line , temp . name ) )
current_line += read_line
files . append ( temp )
return files
|
def quasi_newton_uniform ( points , cells , * args , ** kwargs ) :
"""Like linear _ solve above , but assuming rho = = 1 . Note that the energy gradient
\\ partial E _ i = 2 / ( d + 1 ) sum _ { tau _ j in omega _ i } ( x _ i - b _ j ) \\ int _ { tau _ j } rho
becomes
\\ partial E _ i = 2 / ( d + 1 ) sum _ { tau _ j in omega _ i } ( x _ i - b _ j ) | tau _ j | .
Because of the dependence of | tau _ j | on the point coordinates , this is a nonlinear
problem .
This method makes the simplifying assumption that | tau _ j | does in fact _ not _ depend
on the point coordinates . With this , one still only needs to solve a linear system ."""
|
def get_new_points ( mesh ) : # do one Newton step
# TODO need copy ?
x = mesh . node_coords . copy ( )
cells = mesh . cells [ "nodes" ]
jac_x = jac_uniform ( x , cells )
x -= solve_hessian_approx_uniform ( x , cells , jac_x )
return x
mesh = MeshTri ( points , cells )
runner ( get_new_points , mesh , * args , ** kwargs )
return mesh . node_coords , mesh . cells [ "nodes" ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.