signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def add_to_list ( my_list , my_element ) :
"""Helper function to add new my _ element to my _ list based on its type .
Add as new element if it ' s not a list , otherwise extend to the list
if it ' s a list .
It ' s also guarantee that all elements are unique
: param my _ list : A list
: type my _ list : list
: param my _ element : A new element
: type my _ element : str , list
: returns : A list with unique element
: rtype : list"""
|
if isinstance ( my_element , list ) :
for element in my_element :
my_list = add_to_list ( my_list , element )
else :
if my_element not in my_list :
my_list . append ( my_element )
return my_list
|
def make_po_file ( self , potfile , locale ) :
"""Creates or updates the PO file for self . domain and : param locale : .
Uses contents of the existing : param potfile : .
Uses mguniq , msgmerge , and msgattrib GNU gettext utilities ."""
|
pofile = self . _get_po_path ( potfile , locale )
msgs = self . _get_unique_messages ( potfile )
msgs = self . _merge_messages ( potfile , pofile , msgs )
msgs = self . _strip_package_version ( msgs )
with open ( pofile , 'w' ) as fp :
fp . write ( msgs )
self . _remove_obsolete_messages ( pofile )
|
def set_mode ( self , anchor_id , mode ) :
"""Send a packet to set the anchor mode . If the anchor receive the packet ,
it will change mode and resets ."""
|
data = struct . pack ( '<BB' , LoPoAnchor . LPP_TYPE_MODE , mode )
self . crazyflie . loc . send_short_lpp_packet ( anchor_id , data )
|
def generatePixmap ( base64_data ) :
"""Generates a new pixmap based on the inputed base64 data .
: param base64 | < str >"""
|
import_qt ( globals ( ) )
binary_data = binascii . a2b_base64 ( base64_data )
arr = QtCore . QByteArray . fromRawData ( binary_data )
img = QtGui . QImage . fromData ( arr )
return QtGui . QPixmap ( img )
|
def reindex ( self , kdims = [ ] , force = False ) :
"""Reindexes object dropping static or supplied kdims
Creates a new object with a reordered or reduced set of key
dimensions . By default drops all non - varying key dimensions .
Reducing the number of key dimensions will discard information
from the keys . All data values are accessible in the newly
created object as the new labels must be sufficient to address
each value uniquely .
Args :
kdims ( optional ) : New list of key dimensions after reindexing
force ( bool , optional ) : Whether to drop non - unique items
Returns :
Reindexed object"""
|
old_kdims = [ d . name for d in self . kdims ]
if not isinstance ( kdims , list ) :
kdims = [ kdims ]
elif not len ( kdims ) :
kdims = [ d for d in old_kdims if not len ( set ( self . dimension_values ( d ) ) ) == 1 ]
indices = [ self . get_dimension_index ( el ) for el in kdims ]
keys = [ tuple ( k [ i ] for i in indices ) for k in self . data . keys ( ) ]
reindexed_items = OrderedDict ( ( k , v ) for ( k , v ) in zip ( keys , self . data . values ( ) ) )
reduced_dims = set ( [ d . name for d in self . kdims ] ) . difference ( kdims )
dimensions = [ self . get_dimension ( d ) for d in kdims if d not in reduced_dims ]
if len ( set ( keys ) ) != len ( keys ) and not force :
raise Exception ( "Given dimension labels not sufficient" "to address all values uniquely" )
if len ( keys ) :
cdims = { self . get_dimension ( d ) : self . dimension_values ( d ) [ 0 ] for d in reduced_dims }
else :
cdims = { }
with item_check ( indices == sorted ( indices ) ) :
return self . clone ( reindexed_items , kdims = dimensions , cdims = cdims )
|
def start_new_particles ( self ) :
"""Start some new particles from the emitters . We roll the dice
starts _ at _ once times , seeing if we can start each particle based
on starts _ prob . If we start , the particle gets a color form
the palette and a velocity from the vel list ."""
|
for e_pos , e_dir , e_vel , e_range , e_color , e_pal in self . emitters :
for roll in range ( self . starts_at_once ) :
if random . random ( ) < self . starts_prob : # Start one ?
p_vel = self . vel [ random . choice ( len ( self . vel ) ) ]
if e_dir < 0 or e_dir == 0 and random . random ( ) > 0.5 :
p_vel = - p_vel
self . particles . append ( ( p_vel , # Velocity
e_pos , # Position
int ( e_range // abs ( p_vel ) ) , # steps to live
e_pal [ random . choice ( len ( e_pal ) ) ] , # Color
255 ) )
|
def shorten_paths ( path_list , is_unsaved ) :
"""Takes a list of paths and tries to " intelligently " shorten them all . The
aim is to make it clear to the user where the paths differ , as that is
likely what they care about . Note that this operates on a list of paths
not on individual paths .
If the path ends in an actual file name , it will be trimmed off ."""
|
# TODO : at the end , if the path is too long , should do a more dumb kind of
# shortening , but not completely dumb .
# Convert the path strings to a list of tokens and start building the
# new _ path using the drive
path_list = path_list [ : ]
# Make a local copy
new_path_list = [ ]
for ii , ( path , is_unsav ) in enumerate ( zip ( path_list , is_unsaved ) ) :
if is_unsav :
new_path_list . append ( _ ( 'unsaved file' ) )
path_list [ ii ] = None
else :
drive , path = osp . splitdrive ( osp . dirname ( path ) )
new_path_list . append ( drive + osp . sep )
path_list [ ii ] = [ part for part in path . split ( osp . sep ) if part ]
def recurse_level ( level_idx ) :
sep = os . sep
# If toks are all empty we need not have recursed here
if not any ( level_idx . values ( ) ) :
return
# Firstly , find the longest common prefix for all in the level
# s = len of longest common prefix
sample_toks = list ( level_idx . values ( ) ) [ 0 ]
if not sample_toks :
s = 0
else :
for s , sample_val in enumerate ( sample_toks ) :
if not all ( len ( toks ) > s and toks [ s ] == sample_val for toks in level_idx . values ( ) ) :
break
# Shorten longest common prefix
if s == 0 :
short_form = ''
else :
if s == 1 :
short_form = sample_toks [ 0 ]
elif s == 2 :
short_form = sample_toks [ 0 ] + sep + sample_toks [ 1 ]
else :
short_form = "..." + sep + sample_toks [ s - 1 ]
for idx in level_idx :
new_path_list [ idx ] += short_form + sep
level_idx [ idx ] = level_idx [ idx ] [ s : ]
# Group the remaining bit after the common prefix , shorten , and recurse
while level_idx :
k , group = 0 , level_idx
# k is length of the group ' s common prefix
while True : # Abort if we ' ve gone beyond end of one or more in the group
prospective_group = { idx : toks for idx , toks in group . items ( ) if len ( toks ) == k }
if prospective_group :
if k == 0 : # we spit out the group with no suffix
group = prospective_group
break
# Only keep going if all n still match on the kth token
_ , sample_toks = next ( iteritems ( group ) )
prospective_group = { idx : toks for idx , toks in group . items ( ) if toks [ k ] == sample_toks [ k ] }
if len ( prospective_group ) == len ( group ) or k == 0 :
group = prospective_group
k += 1
else :
break
_ , sample_toks = next ( iteritems ( group ) )
if k == 0 :
short_form = ''
elif k == 1 :
short_form = sample_toks [ 0 ]
elif k == 2 :
short_form = sample_toks [ 0 ] + sep + sample_toks [ 1 ]
else : # k > 2
short_form = sample_toks [ 0 ] + "..." + sep + sample_toks [ k - 1 ]
for idx in group . keys ( ) :
new_path_list [ idx ] += short_form + ( sep if k > 0 else '' )
del level_idx [ idx ]
recurse_level ( { idx : toks [ k : ] for idx , toks in group . items ( ) } )
recurse_level ( { i : pl for i , pl in enumerate ( path_list ) if pl } )
return [ path . rstrip ( os . sep ) for path in new_path_list ]
|
def get_top_exchanges ( fsym , tsym , limit = 5 ) :
"""Get top exchanges by 24 hour trading volume for the currency pair .
Args :
fsym : FROM symbol .
tsym : TO symbol .
limit : Number of results . Default value returns top 5 exchanges .
Returns :
Function returns a list containing a dictionary for each result :
[ { ' exchange ' : . . . , ' fromSymbol ' : . . . , ' toSymbole ' : . . . ,
' volume24h ' : . . . , ' volume24hTo ' : . . . } ,
The list is ordered based on the volume of the FROM currency starting
with the highest value ."""
|
# load data
url = build_url ( 'exchanges' , fsym = fsym , tsym = tsym , limit = limit )
data = load_data ( url )
# price _ data = data [ ' Data ' ]
# return [ { ' exchange ' : p [ ' exchange ' ] ,
# ' volume24hto ' : p [ ' volume24hTo ' ] } for p in price _ data ]
return data [ 'Data' ]
|
def get_asset_search_session ( self ) :
"""Gets an asset search session .
return : ( osid . repository . AssetSearchSession ) - an
` ` AssetSearchSession ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ asset _ search ( ) ` ` is ` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ asset _ search ( ) ` ` is ` ` true ` ` . *"""
|
if not self . supports_asset_search ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . AssetSearchSession ( runtime = self . _runtime )
|
def from_molecule ( cls , mol , theory , charge = None , spin_multiplicity = None , basis_set = "6-31g" , basis_set_option = "cartesian" , title = None , operation = "optimize" , theory_directives = None , alternate_directives = None ) :
"""Very flexible arguments to support many types of potential setups .
Users should use more friendly static methods unless they need the
flexibility .
Args :
mol : Input molecule
charge : Charge of the molecule . If None , charge on molecule is
used . Defaults to None . This allows the input file to be set a
charge independently from the molecule itself .
spin _ multiplicity : Spin multiplicity of molecule . Defaults to None ,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons .
basis _ set : The basis set to be used as string or a dict . E . g . ,
{ " C " : " 6-311 + + G * * " , " H " : " 6-31 + + G * * " } or " 6-31G " . If string ,
same basis set is used for all elements .
basis _ set _ option : cartesian ( default ) | spherical ,
title : Title for the task . Defaults to None , which means a title
based on the theory and operation of the task is
autogenerated .
theory : The theory used for the task . Defaults to " dft " .
operation : The operation for the task . Defaults to " optimize " .
theory _ directives : A dict of theory directives . For example ,
if you are running dft calculations , you may specify the
exchange correlation functional using { " xc " : " b3lyp " } .
alternate _ directives : A dict of alternate directives . For
example , to perform cosmo calculations with DFT , you ' d supply
{ ' cosmo ' : " cosmo " } ."""
|
title = title if title is not None else "{} {} {}" . format ( re . sub ( r"\s" , "" , mol . formula ) , theory , operation )
charge = charge if charge is not None else mol . charge
nelectrons = - charge + mol . charge + mol . nelectrons
if spin_multiplicity is not None :
spin_multiplicity = spin_multiplicity
if ( nelectrons + spin_multiplicity ) % 2 != 1 :
raise ValueError ( "Charge of {} and spin multiplicity of {} is" " not possible for this molecule" . format ( charge , spin_multiplicity ) )
elif charge == mol . charge :
spin_multiplicity = mol . spin_multiplicity
else :
spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
elements = set ( mol . composition . get_el_amt_dict ( ) . keys ( ) )
if isinstance ( basis_set , str ) :
basis_set = { el : basis_set for el in elements }
basis_set_option = basis_set_option
return NwTask ( charge , spin_multiplicity , basis_set , basis_set_option = basis_set_option , title = title , theory = theory , operation = operation , theory_directives = theory_directives , alternate_directives = alternate_directives )
|
def cleanup ( graph , subgraphs ) :
"""Clean up the metadata in the subgraphs .
: type graph : pybel . BELGraph
: type subgraphs : dict [ Any , pybel . BELGraph ]"""
|
for subgraph in subgraphs . values ( ) :
update_node_helper ( graph , subgraph )
update_metadata ( graph , subgraph )
|
def library_line ( self , file_name ) :
"""Specifies GULP library file to read species and potential parameters .
If using library don ' t specify species and potential
in the input file and vice versa . Make sure the elements of
structure are in the library file .
Args :
file _ name : Name of GULP library file
Returns :
GULP input string specifying library option"""
|
gulplib_set = lambda : 'GULP_LIB' in os . environ . keys ( )
readable = lambda f : os . path . isfile ( f ) and os . access ( f , os . R_OK )
# dirpath , fname = os . path . split ( file _ name )
# if dirpath : # Full path specified
# if readable ( file _ name ) :
# gin = ' library ' + file _ name
# else :
# raise GulpError ( ' GULP Library not found ' )
# else :
# fpath = os . path . join ( os . getcwd ( ) , file _ name ) # Check current dir
# if readable ( fpath ) :
# gin = ' library ' + fpath
# elif gulplib _ set ( ) :
# fpath = os . path . join ( os . environ [ ' GULP _ LIB ' ] , file _ name )
# if readable ( fpath ) :
# gin = ' library ' + file _ name
# else :
# raise GulpError ( ' GULP Library not found ' )
# else :
# raise GulpError ( ' GULP Library not found ' )
# gin + = " \ n "
# return gin
gin = ""
dirpath , fname = os . path . split ( file_name )
if dirpath and readable ( file_name ) : # Full path specified
gin = 'library ' + file_name
else :
fpath = os . path . join ( os . getcwd ( ) , file_name )
# Check current dir
if readable ( fpath ) :
gin = 'library ' + fpath
elif gulplib_set ( ) : # Check the GULP _ LIB path
fpath = os . path . join ( os . environ [ 'GULP_LIB' ] , file_name )
if readable ( fpath ) :
gin = 'library ' + file_name
if gin :
return gin + "\n"
else :
raise GulpError ( 'GULP Library not found' )
|
def calculate_size ( name , reduction ) :
"""Calculates the request payload size"""
|
data_size = 0
data_size += calculate_size_str ( name )
data_size += INT_SIZE_IN_BYTES
return data_size
|
def gtk_threadsafe ( func ) :
'''Decorator to make wrapped function threadsafe by forcing it to execute
within the GTK main thread .
. . versionadded : : 0.18
. . versionchanged : : 0.22
Add support for keyword arguments in callbacks by supporting functions
wrapped by ` functools . partial ( ) ` . Also , ignore callback return value
to prevent callback from being called repeatedly indefinitely . See the
` gobject . idle _ add ( ) documentation ` _ for further information .
. . _ ` gobject . idle _ add ( ) documentation ` : http : / / library . isr . ist . utl . pt / docs / pygtk2reference / gobject - functions . html # function - gobject - - idle - add
Parameters
func : function or functools . partial'''
|
# Set up GDK threading .
# XXX This must be done to support running multiple threads in GTK
# applications .
gtk . gdk . threads_init ( )
# Support
wraps_func = func . func if isinstance ( func , functools . partial ) else func
@ functools . wraps ( wraps_func )
def _gtk_threadsafe ( * args ) :
def _no_return_func ( * args ) :
func ( * args )
gobject . idle_add ( _no_return_func , * args )
return _gtk_threadsafe
|
def caesar_cipher ( message , key ) :
"""凯特加密法
: param message : 待加密数据
: param key : 加密向量
: return : 被加密的字符串"""
|
LEFTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
translated = ''
message = message . upper ( )
for symbol in message :
if symbol in LEFTERS :
num = LEFTERS . find ( symbol )
num = num + key
if num >= len ( LEFTERS ) :
num = num - len ( LEFTERS )
elif num < 0 :
num = num + len ( LEFTERS )
translated = translated + LEFTERS [ num ]
else :
translated = translated + symbol
return translated
|
def main ( ) :
"""NAME
hysteresis _ magic . py
DESCRIPTION
calculates hystereis parameters and saves them in rmag _ hystereis format file
makes plots if option selected
SYNTAX
hysteresis _ magic . py [ command line options ]
OPTIONS
- h prints help message and quits
- usr USER : identify user , default is " "
- f : specify input file , default is agm _ measurements . txt
- fh : specify rmag _ hysteresis . txt input file
- F : specify output file , default is rmag _ hysteresis . txt
- P : do not make the plots
- spc SPEC : specify specimen name to plot and quit
- sav save all plots and quit
- fmt [ png , svg , eps , jpg ]"""
|
args = sys . argv
PLT = 1
plots = 0
user , meas_file , rmag_out , rmag_file = "" , "agm_measurements.txt" , "rmag_hysteresis.txt" , ""
pltspec = ""
dir_path = '.'
fmt = 'svg'
verbose = pmagplotlib . verbose
version_num = pmag . get_version ( )
if '-WD' in args :
ind = args . index ( '-WD' )
dir_path = args [ ind + 1 ]
if "-h" in args :
print ( main . __doc__ )
sys . exit ( )
if "-usr" in args :
ind = args . index ( "-usr" )
user = args [ ind + 1 ]
if '-f' in args :
ind = args . index ( "-f" )
meas_file = args [ ind + 1 ]
if '-F' in args :
ind = args . index ( "-F" )
rmag_out = args [ ind + 1 ]
if '-fh' in args :
ind = args . index ( "-fh" )
rmag_file = args [ ind + 1 ]
rmag_file = dir_path + '/' + rmag_file
if '-P' in args :
PLT = 0
irm_init , imag_init = - 1 , - 1
if '-sav' in args :
verbose = 0
plots = 1
if '-spc' in args :
ind = args . index ( "-spc" )
pltspec = args [ ind + 1 ]
verbose = 0
plots = 1
if '-fmt' in args :
ind = args . index ( "-fmt" )
fmt = args [ ind + 1 ]
rmag_out = dir_path + '/' + rmag_out
meas_file = dir_path + '/' + meas_file
rmag_rem = dir_path + "/rmag_remanence.txt"
meas_data , file_type = pmag . magic_read ( meas_file )
if file_type != 'magic_measurements' :
print ( main . __doc__ )
print ( 'bad file' )
sys . exit ( )
# initialize some variables
# define figure numbers for hyst , deltaM , DdeltaM curves
HystRecs , RemRecs = [ ] , [ ]
HDD = { }
if verbose :
if verbose and PLT :
print ( "Plots may be on top of each other - use mouse to place " )
if PLT :
HDD [ 'hyst' ] , HDD [ 'deltaM' ] , HDD [ 'DdeltaM' ] = 1 , 2 , 3
pmagplotlib . plot_init ( HDD [ 'DdeltaM' ] , 5 , 5 )
pmagplotlib . plot_init ( HDD [ 'deltaM' ] , 5 , 5 )
pmagplotlib . plot_init ( HDD [ 'hyst' ] , 5 , 5 )
imag_init = 0
irm_init = 0
else :
HDD [ 'hyst' ] , HDD [ 'deltaM' ] , HDD [ 'DdeltaM' ] , HDD [ 'irm' ] , HDD [ 'imag' ] = 0 , 0 , 0 , 0 , 0
if rmag_file != "" :
hyst_data , file_type = pmag . magic_read ( rmag_file )
# get list of unique experiment names and specimen names
experiment_names , sids = [ ] , [ ]
for rec in meas_data :
meths = rec [ 'magic_method_codes' ] . split ( ':' )
methods = [ ]
for meth in meths :
methods . append ( meth . strip ( ) )
if 'LP-HYS' in methods :
if 'er_synthetic_name' in list ( rec . keys ( ) ) and rec [ 'er_synthetic_name' ] != "" :
rec [ 'er_specimen_name' ] = rec [ 'er_synthetic_name' ]
if rec [ 'magic_experiment_name' ] not in experiment_names :
experiment_names . append ( rec [ 'magic_experiment_name' ] )
if rec [ 'er_specimen_name' ] not in sids :
sids . append ( rec [ 'er_specimen_name' ] )
k = 0
locname = ''
if pltspec != "" :
k = sids . index ( pltspec )
print ( sids [ k ] )
while k < len ( sids ) :
s = sids [ k ]
if verbose and PLT :
print ( s , k + 1 , 'out of ' , len ( sids ) )
# B , M for hysteresis , Bdcd , Mdcd for irm - dcd data
B , M , Bdcd , Mdcd = [ ] , [ ] , [ ] , [ ]
Bimag , Mimag = [ ] , [ ]
# Bimag , Mimag for initial magnetization curves
first_dcd_rec , first_rec , first_imag_rec = 1 , 1 , 1
for rec in meas_data :
methcodes = rec [ 'magic_method_codes' ] . split ( ':' )
meths = [ ]
for meth in methcodes :
meths . append ( meth . strip ( ) )
if rec [ 'er_specimen_name' ] == s and "LP-HYS" in meths :
B . append ( float ( rec [ 'measurement_lab_field_dc' ] ) )
M . append ( float ( rec [ 'measurement_magn_moment' ] ) )
if first_rec == 1 :
e = rec [ 'magic_experiment_name' ]
HystRec = { }
first_rec = 0
if "er_location_name" in list ( rec . keys ( ) ) :
HystRec [ "er_location_name" ] = rec [ "er_location_name" ]
locname = rec [ 'er_location_name' ] . replace ( '/' , '-' )
if "er_sample_name" in list ( rec . keys ( ) ) :
HystRec [ "er_sample_name" ] = rec [ "er_sample_name" ]
if "er_site_name" in list ( rec . keys ( ) ) :
HystRec [ "er_site_name" ] = rec [ "er_site_name" ]
if "er_synthetic_name" in list ( rec . keys ( ) ) and rec [ 'er_synthetic_name' ] != "" :
HystRec [ "er_synthetic_name" ] = rec [ "er_synthetic_name" ]
else :
HystRec [ "er_specimen_name" ] = rec [ "er_specimen_name" ]
if rec [ 'er_specimen_name' ] == s and "LP-IRM-DCD" in meths :
Bdcd . append ( float ( rec [ 'treatment_dc_field' ] ) )
Mdcd . append ( float ( rec [ 'measurement_magn_moment' ] ) )
if first_dcd_rec == 1 :
RemRec = { }
irm_exp = rec [ 'magic_experiment_name' ]
first_dcd_rec = 0
if "er_location_name" in list ( rec . keys ( ) ) :
RemRec [ "er_location_name" ] = rec [ "er_location_name" ]
if "er_sample_name" in list ( rec . keys ( ) ) :
RemRec [ "er_sample_name" ] = rec [ "er_sample_name" ]
if "er_site_name" in list ( rec . keys ( ) ) :
RemRec [ "er_site_name" ] = rec [ "er_site_name" ]
if "er_synthetic_name" in list ( rec . keys ( ) ) and rec [ 'er_synthetic_name' ] != "" :
RemRec [ "er_synthetic_name" ] = rec [ "er_synthetic_name" ]
else :
RemRec [ "er_specimen_name" ] = rec [ "er_specimen_name" ]
if rec [ 'er_specimen_name' ] == s and "LP-IMAG" in meths :
if first_imag_rec == 1 :
imag_exp = rec [ 'magic_experiment_name' ]
first_imag_rec = 0
Bimag . append ( float ( rec [ 'measurement_lab_field_dc' ] ) )
Mimag . append ( float ( rec [ 'measurement_magn_moment' ] ) )
# now plot the hysteresis curve
if len ( B ) > 0 :
hmeths = [ ]
for meth in meths :
hmeths . append ( meth )
hpars = pmagplotlib . plot_hdd ( HDD , B , M , e )
if verbose and PLT :
pmagplotlib . draw_figs ( HDD )
# get prior interpretations from hyst _ data
if rmag_file != "" :
hpars_prior = { }
for rec in hyst_data :
if rec [ 'magic_experiment_names' ] == e :
if rec [ 'hysteresis_bcr' ] != "" and rec [ 'hysteresis_mr_moment' ] != "" :
hpars_prior [ 'hysteresis_mr_moment' ] = rec [ 'hysteresis_mr_moment' ]
hpars_prior [ 'hysteresis_ms_moment' ] = rec [ 'hysteresis_ms_moment' ]
hpars_prior [ 'hysteresis_bc' ] = rec [ 'hysteresis_bc' ]
hpars_prior [ 'hysteresis_bcr' ] = rec [ 'hysteresis_bcr' ]
break
if verbose :
pmagplotlib . plot_hpars ( HDD , hpars_prior , 'ro' )
else :
if verbose :
pmagplotlib . plot_hpars ( HDD , hpars , 'bs' )
HystRec [ 'hysteresis_mr_moment' ] = hpars [ 'hysteresis_mr_moment' ]
HystRec [ 'hysteresis_ms_moment' ] = hpars [ 'hysteresis_ms_moment' ]
HystRec [ 'hysteresis_bc' ] = hpars [ 'hysteresis_bc' ]
HystRec [ 'hysteresis_bcr' ] = hpars [ 'hysteresis_bcr' ]
HystRec [ 'hysteresis_xhf' ] = hpars [ 'hysteresis_xhf' ]
HystRec [ 'magic_experiment_names' ] = e
HystRec [ 'magic_software_packages' ] = version_num
if hpars [ "magic_method_codes" ] not in hmeths :
hmeths . append ( hpars [ "magic_method_codes" ] )
methods = ""
for meth in hmeths :
methods = methods + meth . strip ( ) + ":"
HystRec [ "magic_method_codes" ] = methods [ : - 1 ]
HystRec [ "er_citation_names" ] = "This study"
HystRecs . append ( HystRec )
if len ( Bdcd ) > 0 :
rmeths = [ ]
for meth in meths :
rmeths . append ( meth )
if verbose and PLT :
print ( 'plotting IRM' )
if irm_init == 0 :
HDD [ 'irm' ] = 5
pmagplotlib . plot_init ( HDD [ 'irm' ] , 5 , 5 )
irm_init = 1
rpars = pmagplotlib . plot_irm ( HDD [ 'irm' ] , Bdcd , Mdcd , irm_exp )
RemRec [ 'remanence_mr_moment' ] = rpars [ 'remanence_mr_moment' ]
RemRec [ 'remanence_bcr' ] = rpars [ 'remanence_bcr' ]
RemRec [ 'magic_experiment_names' ] = irm_exp
if rpars [ "magic_method_codes" ] not in meths :
meths . append ( rpars [ "magic_method_codes" ] )
methods = ""
for meth in rmeths :
methods = methods + meth . strip ( ) + ":"
RemRec [ "magic_method_codes" ] = methods [ : - 1 ]
RemRec [ "er_citation_names" ] = "This study"
RemRecs . append ( RemRec )
else :
if irm_init :
pmagplotlib . clearFIG ( HDD [ 'irm' ] )
if len ( Bimag ) > 0 :
if verbose :
print ( 'plotting initial magnetization curve' )
# first normalize by Ms
Mnorm = [ ]
for m in Mimag :
Mnorm . append ( m / float ( hpars [ 'hysteresis_ms_moment' ] ) )
if imag_init == 0 :
HDD [ 'imag' ] = 4
pmagplotlib . plot_init ( HDD [ 'imag' ] , 5 , 5 )
imag_init = 1
pmagplotlib . plot_imag ( HDD [ 'imag' ] , Bimag , Mnorm , imag_exp )
else :
if imag_init :
pmagplotlib . clearFIG ( HDD [ 'imag' ] )
files = { }
if plots :
if pltspec != "" :
s = pltspec
files = { }
for key in list ( HDD . keys ( ) ) :
files [ key ] = locname + '_' + s + '_' + key + '.' + fmt
pmagplotlib . save_plots ( HDD , files )
if pltspec != "" :
sys . exit ( )
if verbose and PLT :
pmagplotlib . draw_figs ( HDD )
ans = input ( "S[a]ve plots, [s]pecimen name, [q]uit, <return> to continue\n " )
if ans == "a" :
files = { }
for key in list ( HDD . keys ( ) ) :
files [ key ] = locname + '_' + s + '_' + key + '.' + fmt
pmagplotlib . save_plots ( HDD , files )
if ans == '' :
k += 1
if ans == "p" :
del HystRecs [ - 1 ]
k -= 1
if ans == 'q' :
print ( "Good bye" )
sys . exit ( )
if ans == 's' :
keepon = 1
specimen = input ( 'Enter desired specimen name (or first part there of): ' )
while keepon == 1 :
try :
k = sids . index ( specimen )
keepon = 0
except :
tmplist = [ ]
for qq in range ( len ( sids ) ) :
if specimen in sids [ qq ] :
tmplist . append ( sids [ qq ] )
print ( specimen , " not found, but this was: " )
print ( tmplist )
specimen = input ( 'Select one or try again\n ' )
k = sids . index ( specimen )
else :
k += 1
if len ( B ) == 0 and len ( Bdcd ) == 0 :
if verbose :
print ( 'skipping this one - no hysteresis data' )
k += 1
if rmag_out == "" and ans == 's' and verbose :
really = input ( " Do you want to overwrite the existing rmag_hystersis.txt file? 1/[0] " )
if really == "" :
print ( 'i thought not - goodbye' )
sys . exit ( )
rmag_out = "rmag_hysteresis.txt"
if len ( HystRecs ) > 0 :
pmag . magic_write ( rmag_out , HystRecs , "rmag_hysteresis" )
if verbose :
print ( "hysteresis parameters saved in " , rmag_out )
if len ( RemRecs ) > 0 :
pmag . magic_write ( rmag_rem , RemRecs , "rmag_remanence" )
if verbose :
print ( "remanence parameters saved in " , rmag_rem )
|
def checksum ( self ) :
"""Grab checksum string"""
|
md5sum , md5sum64 , = [ ] , [ ]
for line in self . SLACKBUILDS_TXT . splitlines ( ) :
if line . startswith ( self . line_name ) :
sbo_name = line [ 17 : ] . strip ( )
if line . startswith ( self . line_md5_64 ) :
if sbo_name == self . name and line [ 26 : ] . strip ( ) :
md5sum64 = line [ 26 : ] . strip ( ) . split ( )
if line . startswith ( self . line_md5 ) :
if sbo_name == self . name and line [ 19 : ] . strip ( ) :
md5sum = line [ 19 : ] . strip ( ) . split ( )
return self . _select_md5sum_arch ( md5sum , md5sum64 )
|
def send_message ( target , data , auth = None , debug = False ) :
"""Send a single message to AMQP endpoint .
: param target : The target AMQP endpoint .
: type target : str , bytes or ~ uamqp . address . Target
: param data : The contents of the message to send .
: type data : str , bytes or ~ uamqp . message . Message
: param auth : The authentication credentials for the endpoint .
This should be one of the subclasses of uamqp . authentication . AMQPAuth . Currently
this includes :
- uamqp . authentication . SASLAnonymous
- uamqp . authentication . SASLPlain
- uamqp . authentication . SASTokenAuth
If no authentication is supplied , SASLAnnoymous will be used by default .
: type auth : ~ uamqp . authentication . common . AMQPAuth
: param debug : Whether to turn on network trace logs . If ` True ` , trace logs
will be logged at INFO level . Default is ` False ` .
: type debug : bool
: return : A list of states for each message sent .
: rtype : list [ ~ uamqp . constants . MessageState ]"""
|
message = data if isinstance ( data , Message ) else Message ( body = data )
with SendClient ( target , auth = auth , debug = debug ) as send_client :
send_client . queue_message ( message )
return send_client . send_all_messages ( )
|
def _objectify ( self , node , binding , depth , path ) :
"""Given an RDF node URI ( and it ' s associated schema ) , return an
object from the ` ` graph ` ` that represents the information available
about this node ."""
|
if binding . is_object :
obj = { '$schema' : binding . path }
for ( s , p , o ) in self . graph . triples ( ( node , None , None ) ) :
prop = binding . get_property ( p )
if prop is None or depth <= 1 or o in path :
continue
# This is slightly odd but yield purty objects :
if depth <= 2 and ( prop . is_array or prop . is_object ) :
continue
sub_path = path . union ( [ node ] )
value = self . _objectify ( o , prop , depth - 1 , sub_path )
if prop . is_array and prop . name in obj :
obj [ prop . name ] . extend ( value )
else :
obj [ prop . name ] = value
return obj
elif binding . is_array :
for item in binding . items :
return [ self . _objectify ( node , item , depth , path ) ]
else :
return node . toPython ( )
|
def _convert_errors ( func ) :
"""Decorator to convert throws errors to Voluptuous format ."""
|
cast_Invalid = lambda e : Invalid ( u"{message}, expected {expected}" . format ( message = e . message , expected = e . expected ) if e . expected != u'-none-' else e . message , e . path , six . text_type ( e ) )
@ wraps ( func )
def wrapper ( * args , ** kwargs ) :
try :
return func ( * args , ** kwargs )
except good . SchemaError as e :
raise SchemaError ( six . text_type ( e ) )
except good . MultipleInvalid as ee :
raise MultipleInvalid ( [ cast_Invalid ( e ) for e in ee ] )
except good . Invalid as e : # Since voluptuous throws MultipleInvalid almost always - - we follow the same pattern . . .
raise MultipleInvalid ( [ cast_Invalid ( e ) ] )
return wrapper
|
def remove_file_by_id ( self , file_id , target_name = None ) :
"""Removes the file id from given target name . If no target name is given , the file is removed
from all targets
: param file _ id : identifier of the file to be removed
: param target _ name : Target name or list of target names where the file should be removed from ( none for every
target )
: return : True if the file id was removed . False if the file was not removed ."""
|
file_ref = self . get_file_by_id ( file_id )
if file_ref is None :
return False
for target in self . objects . get_targets ( target_name ) :
for build_phase_id in target . buildPhases :
build_phase = self . objects [ build_phase_id ]
for build_file_id in build_phase . files :
build_file = self . objects [ build_file_id ]
if build_file . fileRef == file_ref . get_id ( ) : # remove the build file from the phase
build_phase . remove_build_file ( build_file )
# if the build _ phase is empty remove it too , unless it ' s a shell script .
if build_phase . files . __len__ ( ) == 0 and build_phase . isa != u'PBXShellScriptBuildPhase' : # remove the build phase from the target
target . remove_build_phase ( build_phase )
# remove it iff it ' s removed from all targets or no build file reference it
for build_file in self . objects . get_objects_in_section ( u'PBXBuildFile' ) :
if build_file . fileRef == file_ref . get_id ( ) :
return True
# remove the file from any groups if there is no reference from any target
for group in self . objects . get_objects_in_section ( u'PBXGroup' ) :
if file_ref . get_id ( ) in group . children :
group . remove_child ( file_ref )
# the file is not referenced in any build file , remove it
del self . objects [ file_ref . get_id ( ) ]
return True
|
def write_xml ( self , xmlfile ) :
"""Write the XML model for this analysis component ."""
|
xmlfile = self . get_model_path ( xmlfile )
self . logger . info ( 'Writing %s...' , xmlfile )
self . like . writeXml ( str ( xmlfile ) )
|
def get_machine_group ( self , project_name , group_name ) :
"""get machine group in a project
Unsuccessful opertaion will cause an LogException .
: type project _ name : string
: param project _ name : the Project name
: type group _ name : string
: param group _ name : the group name to get
: return : GetMachineGroupResponse
: raise : LogException"""
|
headers = { }
params = { }
resource = "/machinegroups/" + group_name
( resp , headers ) = self . _send ( "GET" , project_name , None , resource , params , headers )
return GetMachineGroupResponse ( resp , headers )
|
def store ( self ) :
'''Write content of the entire cache to disk'''
|
if msgpack is None :
log . error ( 'Cache cannot be stored on disk: msgpack is missing' )
else : # TODO Dir hashing ?
try :
with salt . utils . files . fopen ( self . _path , 'wb+' ) as fp_ :
cache = { "CacheDisk_data" : self . _dict , "CacheDisk_cachetime" : self . _key_cache_time }
msgpack . dump ( cache , fp_ , use_bin_type = True )
except ( IOError , OSError ) as err :
log . error ( 'Error storing cache data to the disk: %s' , err )
|
def parse_type ( source : SourceType , ** options : dict ) -> TypeNode :
"""Parse the AST for a given string containing a GraphQL Type .
Throws GraphQLError if a syntax error is encountered .
This is useful within tools that operate upon GraphQL Types directly and
in isolation of complete GraphQL documents .
Consider providing the results to the utility function : ` type _ from _ ast ( ) ` ."""
|
if isinstance ( source , str ) :
source = Source ( source )
lexer = Lexer ( source , ** options )
expect_token ( lexer , TokenKind . SOF )
type_ = parse_type_reference ( lexer )
expect_token ( lexer , TokenKind . EOF )
return type_
|
def get_export_launch_description_form ( self ) :
"""Returns a form for editing the virtual system description .
Since the data for the form are fetched from the cloud a
progress object is also returned to indicate if / when the form
is ready to be used .
out form of type : class : ` IVirtualSystemDescriptionForm `
An IForm instance for editing the virtual system description .
return progress of type : class : ` IProgress `
Progress object to track the operation completion ."""
|
( progress , form ) = self . _call ( "getExportLaunchDescriptionForm" )
progress = IProgress ( progress )
form = IVirtualSystemDescriptionForm ( form )
return ( progress , form )
|
def _fill_cache ( self , num = None ) :
"""Fills the result cache with ' num ' more entries ( or until the results
iterator is exhausted ) ."""
|
if self . _iter :
try :
for i in range ( num or ITER_CHUNK_SIZE ) :
self . _result_cache . append ( next ( self . _iter ) )
except StopIteration :
self . _iter = None
|
def register ( self , object_tool_class , model_class = None ) :
"""Registers the given model ( s ) with the given object tool class .
The model ( s ) should be Model classes , not instances .
If a model class isn ' t given the object tool class will be registered
for all models .
If a model is already registered , this will raise AlreadyRegistered .
If a model is abstract , this will raise ImproperlyConfigured ."""
|
if not object_tool_class :
return None
# Don ' t validate unless required .
if object_tool_class and settings . DEBUG :
from object_tools . validation import validate
validate ( object_tool_class , model_class )
# = lambda model , adminclass : None
if not model_class :
models = get_models ( )
else :
models = [ model_class , ]
for model in models :
if model . _meta . abstract :
raise ImproperlyConfigured ( 'The model %s is abstract, so it \
cannot be registered with object tools.' % model . __name__ )
# Instantiate the object _ tools class to save in the registry
if model in self . _registry :
self . _registry [ model ] . append ( object_tool_class ( model ) )
else :
self . _registry [ model ] = [ object_tool_class ( model ) , ]
|
def OnOpenFile ( self , event ) :
"""Request to open a new profile file"""
|
dialog = wx . FileDialog ( self , style = wx . OPEN | wx . FD_MULTIPLE )
if dialog . ShowModal ( ) == wx . ID_OK :
paths = dialog . GetPaths ( )
if self . loader : # we ' ve already got a displayed data - set , open new window . . .
frame = MainFrame ( )
frame . Show ( True )
frame . load ( * paths )
else :
self . load ( * paths )
|
def averages ( self , ** kwargs ) :
"""Get the average time / uptime value for a specified check and time
period .
Optional parameters :
* time _ from - - Start time of period . Format is UNIX timestamp
Type : Integer
Default : 0
* time _ to - - End time of period . Format is UNIX timestamp
Type : Integer
Default : Current time
* probes - - Filter to only use results from a list of probes .
Format is a comma separated list of probe identifiers
Type : String
Default : All probes
* includeuptime - - Include uptime information
Type : Boolean
Default : False
* bycountry - - Split response times into country groups
Type : Boolean
Default : False
* byprobe - - Split response times into probe groups
Type : Boolean
Default : False
Returned structure :
' responsetime ' :
' to ' : < Integer > Start time of period
' from ' : < Integer > End time of period
' avgresponse ' : < Integer > Total average response time in
milliseconds
< More can be included with optional parameters >"""
|
# ' from ' is a reserved word , use time _ from instead
if kwargs . get ( 'time_from' ) :
kwargs [ 'from' ] = kwargs . get ( 'time_from' )
del kwargs [ 'time_from' ]
if kwargs . get ( 'time_to' ) :
kwargs [ 'to' ] = kwargs . get ( 'time_to' )
del kwargs [ 'time_to' ]
# Warn user about unhandled parameters
for key in kwargs :
if key not in [ 'from' , 'to' , 'probes' , 'includeuptime' , 'bycountry' , 'byprobe' ] :
sys . stderr . write ( "'%s'" % key + ' is not a valid argument of' + '<PingdomCheck.averages()\n' )
response = self . pingdom . request ( 'GET' , 'summary.average/%s' % self . id , kwargs )
return response . json ( ) [ 'summary' ]
|
def pull ( self , * , index = None ) :
"""Pull item from the chain ."""
|
item = self . __list . pop ( index )
name = getattr ( item , 'name' , None )
if name is not None :
del self . __dict [ name ]
return item
|
def _create_batches ( self , X , batch_size , shuffle_data = True ) :
"""Create batches out of a sequence of data .
This function will append zeros to the end of your data to ensure that
all batches are even - sized . These are masked out during training ."""
|
if shuffle_data :
X = shuffle ( X )
if batch_size > X . shape [ 0 ] :
batch_size = X . shape [ 0 ]
max_x = int ( np . ceil ( X . shape [ 0 ] / batch_size ) )
X = np . resize ( X , ( max_x , batch_size , X . shape [ - 1 ] ) )
return X
|
def generate_sample_set ( self , tags = None ) :
"""Generate a sample _ set that maches the tags or all if tags are not specified .
Args :
tags : Match samples against this tag list ( or all if not specified )
Returns :
The sample _ set of those samples matching the tags"""
|
if isinstance ( tags , str ) :
tags = [ tags ]
md5_list = self . data_store . tag_match ( tags )
return self . store_sample_set ( md5_list )
|
def set_state ( self , state , speed = None ) :
""": param state : bool
: param speed : a string one of [ " lowest " , " low " ,
" medium " , " high " , " auto " ] defaults to last speed
: return : nothing"""
|
desired_state = { "powered" : state }
if state :
brightness = self . _to_brightness . get ( speed or self . current_fan_speed ( ) , 0.33 )
desired_state . update ( { 'brightness' : brightness } )
response = self . api_interface . set_device_state ( self , { "desired_state" : desired_state } )
self . _update_state_from_response ( response )
|
def define ( self , name : str , default : Any = None , type : type = None , help : str = None , metavar : str = None , multiple : bool = False , group : str = None , callback : Callable [ [ Any ] , None ] = None , ) -> None :
"""Defines a new command line option .
` ` type ` ` can be any of ` str ` , ` int ` , ` float ` , ` bool ` ,
` ~ datetime . datetime ` , or ` ~ datetime . timedelta ` . If no ` ` type ` `
is given but a ` ` default ` ` is , ` ` type ` ` is the type of
` ` default ` ` . Otherwise , ` ` type ` ` defaults to ` str ` .
If ` ` multiple ` ` is True , the option value is a list of ` ` type ` `
instead of an instance of ` ` type ` ` .
` ` help ` ` and ` ` metavar ` ` are used to construct the
automatically generated command line help string . The help
message is formatted like : :
- - name = METAVAR help string
` ` group ` ` is used to group the defined options in logical
groups . By default , command line options are grouped by the
file in which they are defined .
Command line option names must be unique globally .
If a ` ` callback ` ` is given , it will be run with the new value whenever
the option is changed . This can be used to combine command - line
and file - based options : :
define ( " config " , type = str , help = " path to config file " ,
callback = lambda path : parse _ config _ file ( path , final = False ) )
With this definition , options in the file specified by ` ` - - config ` ` will
override options set earlier on the command line , but can be overridden
by later flags ."""
|
normalized = self . _normalize_name ( name )
if normalized in self . _options :
raise Error ( "Option %r already defined in %s" % ( normalized , self . _options [ normalized ] . file_name ) )
frame = sys . _getframe ( 0 )
options_file = frame . f_code . co_filename
# Can be called directly , or through top level define ( ) fn , in which
# case , step up above that frame to look for real caller .
if ( frame . f_back . f_code . co_filename == options_file and frame . f_back . f_code . co_name == "define" ) :
frame = frame . f_back
file_name = frame . f_back . f_code . co_filename
if file_name == options_file :
file_name = ""
if type is None :
if not multiple and default is not None :
type = default . __class__
else :
type = str
if group :
group_name = group
# type : Optional [ str ]
else :
group_name = file_name
option = _Option ( name , file_name = file_name , default = default , type = type , help = help , metavar = metavar , multiple = multiple , group_name = group_name , callback = callback , )
self . _options [ normalized ] = option
|
def files_log_graph ( self , stream ) :
'''Build up a graph ( nodes and edges from a Bro files . log )'''
|
file_log = list ( stream )
print 'Entering file_log_graph...(%d rows)' % len ( file_log )
for row in file_log : # If the mime - type is interesting add the uri and the host - > uri - > host relationships
if row [ 'mime_type' ] not in self . exclude_mime_types : # Check for weird conditions
if ( row [ 'total_bytes' ] == '-' ) :
continue
if ( '-' in row [ 'md5' ] ) :
continue
# Check for missing bytes
if row [ 'missing_bytes' ] :
labels = [ 'file' , 'missing' ]
else :
labels = [ 'file' ]
# Make the file node name kewl
name = '%6s %s %.0f-KB' % ( row [ 'md5' ] [ : 6 ] , row [ 'mime_type' ] , row [ 'total_bytes' ] / 1024.0 )
if row [ 'missing_bytes' ] :
name += '*'
name = name . replace ( 'application/' , '' )
# Add the file node
self . add_node ( row [ 'md5' ] , name , labels )
# Add the tx _ host
self . add_node ( row [ 'tx_hosts' ] , row [ 'tx_hosts' ] , [ 'host' ] )
# Add the file - > tx _ host relationship
self . add_rel ( row [ 'tx_hosts' ] , row [ 'md5' ] , 'file' )
|
def get_bounds ( self , bin_num ) :
"""Get the bonds of a bin , given its index ` bin _ num ` .
: returns : a ` Bounds ` namedtuple with properties min and max
respectively ."""
|
min_bound = ( self . bin_size * bin_num ) + self . min_value
max_bound = min_bound + self . bin_size
return self . Bounds ( min_bound , max_bound )
|
def lookup ( self , req , parent , name ) :
"""Look up a directory entry by name and get its attributes .
Valid replies :
reply _ entry
reply _ err"""
|
self . reply_err ( req , errno . ENOENT )
|
def recipients ( preferences , message , valid_paths , config ) :
"""The main API function .
Accepts a fedmsg message as an argument .
Returns a dict mapping context names to lists of recipients ."""
|
rule_cache = dict ( )
results = defaultdict ( list )
notified = set ( )
for preference in preferences :
user = preference [ 'user' ]
context = preference [ 'context' ]
if ( user [ 'openid' ] , context [ 'name' ] ) in notified :
continue
for filter in preference [ 'filters' ] :
if matches ( filter , message , valid_paths , rule_cache , config ) :
for detail_value in preference [ 'detail_values' ] :
results [ context [ 'name' ] ] . append ( { 'user' : user [ 'openid' ] , context [ 'detail_name' ] : detail_value , 'filter_name' : filter [ 'name' ] , 'filter_id' : filter [ 'id' ] , 'filter_oneshot' : filter [ 'oneshot' ] , 'markup_messages' : preference [ 'markup_messages' ] , 'triggered_by_links' : preference [ 'triggered_by_links' ] , 'shorten_links' : preference [ 'shorten_links' ] , 'verbose' : preference [ 'verbose' ] , } )
notified . add ( ( user [ 'openid' ] , context [ 'name' ] ) )
break
return results
|
def _iter_walk ( self , fs , # type : FS
path , # type : Text
namespaces = None , # type : Optional [ Collection [ Text ] ]
) : # type : ( . . . ) - > Iterator [ Tuple [ Text , Optional [ Info ] ] ]
"""Get the walk generator ."""
|
if self . search == "breadth" :
return self . _walk_breadth ( fs , path , namespaces = namespaces )
else :
return self . _walk_depth ( fs , path , namespaces = namespaces )
|
def concat ( left , rights , distinct = False , axis = 0 ) :
"""Concat collections .
: param left : left collection
: param rights : right collections , can be a DataFrame object or a list of DataFrames
: param distinct : whether to remove duplicate entries . only available when axis = = 0
: param axis : when axis = = 0 , the DataFrames are merged vertically , otherwise horizontally .
: return : collection
Note that axis = = 1 can only be used under Pandas DataFrames or XFlow .
: Example :
> > > df [ ' name ' , ' id ' ] . concat ( df2 [ ' score ' ] , axis = 1)"""
|
from . . utils import to_collection
if isinstance ( rights , Node ) :
rights = [ rights , ]
if not rights :
raise ValueError ( 'At least one DataFrame should be provided.' )
if axis == 0 :
for right in rights :
left = union ( left , right , distinct = distinct )
return left
else :
rights = [ to_collection ( r ) for r in rights ]
ConcatCollectionExpr . validate_input ( left , * rights )
if hasattr ( left , '_xflow_concat' ) :
return left . _xflow_concat ( rights )
else :
return __horz_concat ( left , rights )
|
def prop_eq_or_in_or ( default , key , value , dct ) :
"""Ramda propEq / propIn plus propOr
: param default :
: param key :
: param value :
: param dct :
: return :"""
|
return has ( key , dct ) and ( dct [ key ] == value if key in dct else ( dct [ key ] in value if isinstance ( ( list , tuple ) , value ) and not isinstance ( str , value ) else default ) )
|
def to_json ( self ) :
"""Convert the analysis period to a dictionary ."""
|
return { 'st_month' : self . st_month , 'st_day' : self . st_day , 'st_hour' : self . st_hour , 'end_month' : self . end_month , 'end_day' : self . end_day , 'end_hour' : self . end_hour , 'timestep' : self . timestep , 'is_leap_year' : self . is_leap_year }
|
def getSpec ( cls ) :
"""Return the Spec for ApicalTMPairRegion"""
|
spec = { "description" : ApicalTMPairRegion . __doc__ , "singleNodeOnly" : True , "inputs" : { "activeColumns" : { "description" : ( "An array of 0's and 1's representing the active " "minicolumns, i.e. the input to the TemporalMemory" ) , "dataType" : "Real32" , "count" : 0 , "required" : True , "regionLevel" : True , "isDefaultInput" : True , "requireSplitterMap" : False } , "resetIn" : { "description" : ( "A boolean flag that indicates whether" " or not the input vector received in this compute cycle" " represents the first presentation in a" " new temporal sequence." ) , "dataType" : "Real32" , "count" : 1 , "required" : False , "regionLevel" : True , "isDefaultInput" : False , "requireSplitterMap" : False } , "basalInput" : { "description" : "An array of 0's and 1's representing basal input" , "dataType" : "Real32" , "count" : 0 , "required" : False , "regionLevel" : True , "isDefaultInput" : False , "requireSplitterMap" : False } , "basalGrowthCandidates" : { "description" : ( "An array of 0's and 1's representing basal input " + "that can be learned on new synapses on basal " + "segments. If this input is a length-0 array, the " + "whole basalInput is used." ) , "dataType" : "Real32" , "count" : 0 , "required" : False , "regionLevel" : True , "isDefaultInput" : False , "requireSplitterMap" : False } , "apicalInput" : { "description" : "An array of 0's and 1's representing top down input." " The input will be provided to apical dendrites." , "dataType" : "Real32" , "count" : 0 , "required" : False , "regionLevel" : True , "isDefaultInput" : False , "requireSplitterMap" : False } , "apicalGrowthCandidates" : { "description" : ( "An array of 0's and 1's representing apical input " + "that can be learned on new synapses on apical " + "segments. If this input is a length-0 array, the " + "whole apicalInput is used." ) , "dataType" : "Real32" , "count" : 0 , "required" : False , "regionLevel" : True , "isDefaultInput" : False , "requireSplitterMap" : False } , } , "outputs" : { "predictedCells" : { "description" : ( "A binary output containing a 1 for every " "cell that was predicted for this timestep." ) , "dataType" : "Real32" , "count" : 0 , "regionLevel" : True , "isDefaultOutput" : False } , "predictedActiveCells" : { "description" : ( "A binary output containing a 1 for every " "cell that transitioned from predicted to active." ) , "dataType" : "Real32" , "count" : 0 , "regionLevel" : True , "isDefaultOutput" : False } , "activeCells" : { "description" : ( "A binary output containing a 1 for every " "cell that is currently active." ) , "dataType" : "Real32" , "count" : 0 , "regionLevel" : True , "isDefaultOutput" : True } , "winnerCells" : { "description" : ( "A binary output containing a 1 for every " "'winner' cell in the TM." ) , "dataType" : "Real32" , "count" : 0 , "regionLevel" : True , "isDefaultOutput" : False } , } , "parameters" : { # Input sizes ( the network API doesn ' t provide these during initialize )
"columnCount" : { "description" : ( "The size of the 'activeColumns' input " "(i.e. the number of columns)" ) , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "basalInputWidth" : { "description" : "The size of the 'basalInput' input" , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "apicalInputWidth" : { "description" : "The size of the 'apicalInput' input" , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "learn" : { "description" : "True if the TM should learn." , "accessMode" : "ReadWrite" , "dataType" : "Bool" , "count" : 1 , "defaultValue" : "true" } , "cellsPerColumn" : { "description" : "Number of cells per column" , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "activationThreshold" : { "description" : ( "If the number of active connected synapses on a " "segment is at least this threshold, the segment " "is said to be active." ) , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "reducedBasalThreshold" : { "description" : ( "Activation threshold of basal segments for cells " "with active apical segments (with apicalTiebreak " "implementation). " ) , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "initialPermanence" : { "description" : "Initial permanence of a new synapse." , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 , "constraints" : "" } , "connectedPermanence" : { "description" : ( "If the permanence value for a synapse is greater " "than this value, it is said to be connected." ) , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 , "constraints" : "" } , "minThreshold" : { "description" : ( "If the number of synapses active on a segment is at " "least this threshold, it is selected as the best " "matching cell in a bursting column." ) , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 , "constraints" : "" } , "sampleSize" : { "description" : ( "The desired number of active synapses for an " "active cell" ) , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 } , "learnOnOneCell" : { "description" : ( "If True, the winner cell for each column will be" " fixed between resets." ) , "accessMode" : "Read" , "dataType" : "Bool" , "count" : 1 , "defaultValue" : "false" } , "maxSynapsesPerSegment" : { "description" : "The maximum number of synapses per segment. Use -1 " "for unlimited." , "accessMode" : "Read" , "dataType" : "Int32" , "count" : 1 } , "maxSegmentsPerCell" : { "description" : "The maximum number of segments per cell" , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 } , "permanenceIncrement" : { "description" : ( "Amount by which permanences of synapses are " "incremented during learning." ) , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 } , "permanenceDecrement" : { "description" : ( "Amount by which permanences of synapses are " "decremented during learning." ) , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 } , "basalPredictedSegmentDecrement" : { "description" : ( "Amount by which active permanences of synapses of " "previously predicted but inactive segments are " "decremented." ) , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 } , "apicalPredictedSegmentDecrement" : { "description" : ( "Amount by which active permanences of synapses of " "previously predicted but inactive segments are " "decremented." ) , "accessMode" : "Read" , "dataType" : "Real32" , "count" : 1 } , "seed" : { "description" : "Seed for the random number generator." , "accessMode" : "Read" , "dataType" : "UInt32" , "count" : 1 } , "implementation" : { "description" : "Apical implementation" , "accessMode" : "Read" , "dataType" : "Byte" , "count" : 0 , "constraints" : ( "enum: ApicalTiebreak, ApicalTiebreakCPP, ApicalDependent" ) , "defaultValue" : "ApicalTiebreakCPP" } , } , }
return spec
|
def mac_access_list_extended_name ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
mac = ET . SubElement ( config , "mac" , xmlns = "urn:brocade.com:mgmt:brocade-mac-access-list" )
access_list = ET . SubElement ( mac , "access-list" )
extended = ET . SubElement ( access_list , "extended" )
name = ET . SubElement ( extended , "name" )
name . text = kwargs . pop ( 'name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def check_runtime_errors ( cmd_derived_from_alias , pos_args_table ) :
"""Validate placeholders and their expressions in cmd _ derived _ from _ alias to make sure
that there is no runtime error ( such as index out of range ) .
Args :
cmd _ derived _ from _ alias : The command derived from the alias
( include any positional argument placehodlers )
pos _ args _ table : The positional argument table ."""
|
for placeholder , value in pos_args_table . items ( ) :
exec ( '{} = "{}"' . format ( placeholder , value ) )
# pylint : disable = exec - used
expressions = get_placeholders ( cmd_derived_from_alias )
for expression in expressions :
try :
exec ( expression )
# pylint : disable = exec - used
except Exception as exception : # pylint : disable = broad - except
error_msg = PLACEHOLDER_EVAL_ERROR . format ( expression , exception )
raise CLIError ( error_msg )
|
def run ( self , image , command = None , stdout = True , stderr = False , remove = False , ** kwargs ) :
"""Run a container . By default , it will wait for the container to finish
and return its logs , similar to ` ` docker run ` ` .
If the ` ` detach ` ` argument is ` ` True ` ` , it will start the container
and immediately return a : py : class : ` Container ` object , similar to
` ` docker run - d ` ` .
Example :
Run a container and get its output :
> > > import docker
> > > client = docker . from _ env ( )
> > > client . containers . run ( ' alpine ' , ' echo hello world ' )
b ' hello world \\ n '
Run a container and detach :
> > > container = client . containers . run ( ' bfirsh / reticulate - splines ' ,
detach = True )
> > > container . logs ( )
' Reticulating spline 1 . . . \\ nReticulating spline 2 . . . \\ n '
Args :
image ( str ) : The image to run .
command ( str or list ) : The command to run in the container .
auto _ remove ( bool ) : enable auto - removal of the container on daemon
side when the container ' s process exits .
blkio _ weight _ device : Block IO weight ( relative device weight ) in
the form of : ` ` [ { " Path " : " device _ path " , " Weight " : weight } ] ` ` .
blkio _ weight : Block IO weight ( relative weight ) , accepts a weight
value between 10 and 1000.
cap _ add ( list of str ) : Add kernel capabilities . For example ,
` ` [ " SYS _ ADMIN " , " MKNOD " ] ` ` .
cap _ drop ( list of str ) : Drop kernel capabilities .
cgroup _ parent ( str ) : Override the default parent cgroup .
cpu _ count ( int ) : Number of usable CPUs ( Windows only ) .
cpu _ percent ( int ) : Usable percentage of the available CPUs
( Windows only ) .
cpu _ period ( int ) : The length of a CPU period in microseconds .
cpu _ quota ( int ) : Microseconds of CPU time that the container can
get in a CPU period .
cpu _ rt _ period ( int ) : Limit CPU real - time period in microseconds .
cpu _ rt _ runtime ( int ) : Limit CPU real - time runtime in microseconds .
cpu _ shares ( int ) : CPU shares ( relative weight ) .
cpuset _ cpus ( str ) : CPUs in which to allow execution ( ` ` 0-3 ` ` ,
` ` 0,1 ` ` ) .
cpuset _ mems ( str ) : Memory nodes ( MEMs ) in which to allow execution
( ` ` 0-3 ` ` , ` ` 0,1 ` ` ) . Only effective on NUMA systems .
detach ( bool ) : Run container in the background and return a
: py : class : ` Container ` object .
device _ cgroup _ rules ( : py : class : ` list ` ) : A list of cgroup rules to
apply to the container .
device _ read _ bps : Limit read rate ( bytes per second ) from a device
in the form of : ` [ { " Path " : " device _ path " , " Rate " : rate } ] `
device _ read _ iops : Limit read rate ( IO per second ) from a device .
device _ write _ bps : Limit write rate ( bytes per second ) from a
device .
device _ write _ iops : Limit write rate ( IO per second ) from a device .
devices ( : py : class : ` list ` ) : Expose host devices to the container ,
as a list of strings in the form
` ` < path _ on _ host > : < path _ in _ container > : < cgroup _ permissions > ` ` .
For example , ` ` / dev / sda : / dev / xvda : rwm ` ` allows the container
to have read - write access to the host ' s ` ` / dev / sda ` ` via a
node named ` ` / dev / xvda ` ` inside the container .
dns ( : py : class : ` list ` ) : Set custom DNS servers .
dns _ opt ( : py : class : ` list ` ) : Additional options to be added to the
container ' s ` ` resolv . conf ` ` file .
dns _ search ( : py : class : ` list ` ) : DNS search domains .
domainname ( str or list ) : Set custom DNS search domains .
entrypoint ( str or list ) : The entrypoint for the container .
environment ( dict or list ) : Environment variables to set inside
the container , as a dictionary or a list of strings in the
format ` ` [ " SOMEVARIABLE = xxx " ] ` ` .
extra _ hosts ( dict ) : Additional hostnames to resolve inside the
container , as a mapping of hostname to IP address .
group _ add ( : py : class : ` list ` ) : List of additional group names and / or
IDs that the container process will run as .
healthcheck ( dict ) : Specify a test to perform to check that the
container is healthy .
hostname ( str ) : Optional hostname for the container .
init ( bool ) : Run an init inside the container that forwards
signals and reaps processes
init _ path ( str ) : Path to the docker - init binary
ipc _ mode ( str ) : Set the IPC mode for the container .
isolation ( str ) : Isolation technology to use . Default : ` None ` .
kernel _ memory ( int or str ) : Kernel memory limit
labels ( dict or list ) : A dictionary of name - value labels ( e . g .
` ` { " label1 " : " value1 " , " label2 " : " value2 " } ` ` ) or a list of
names of labels to set with empty values ( e . g .
` ` [ " label1 " , " label2 " ] ` ` )
links ( dict ) : Mapping of links using the
` ` { ' container ' : ' alias ' } ` ` format . The alias is optional .
Containers declared in this dict will be linked to the new
container using the provided alias . Default : ` ` None ` ` .
log _ config ( LogConfig ) : Logging configuration .
lxc _ conf ( dict ) : LXC config .
mac _ address ( str ) : MAC address to assign to the container .
mem _ limit ( int or str ) : Memory limit . Accepts float values
( which represent the memory limit of the created container in
bytes ) or a string with a units identification char
( ` ` 100000b ` ` , ` ` 1000k ` ` , ` ` 128m ` ` , ` ` 1g ` ` ) . If a string is
specified without a units character , bytes are assumed as an
intended unit .
mem _ reservation ( int or str ) : Memory soft limit
mem _ swappiness ( int ) : Tune a container ' s memory swappiness
behavior . Accepts number between 0 and 100.
memswap _ limit ( str or int ) : Maximum amount of memory + swap a
container is allowed to consume .
mounts ( : py : class : ` list ` ) : Specification for mounts to be added to
the container . More powerful alternative to ` ` volumes ` ` . Each
item in the list is expected to be a
: py : class : ` docker . types . Mount ` object .
name ( str ) : The name for this container .
nano _ cpus ( int ) : CPU quota in units of 1e - 9 CPUs .
network ( str ) : Name of the network this container will be connected
to at creation time . You can connect to additional networks
using : py : meth : ` Network . connect ` . Incompatible with
` ` network _ mode ` ` .
network _ disabled ( bool ) : Disable networking .
network _ mode ( str ) : One of :
- ` ` bridge ` ` Create a new network stack for the container on
on the bridge network .
- ` ` none ` ` No networking for this container .
- ` ` container : < name | id > ` ` Reuse another container ' s network
stack .
- ` ` host ` ` Use the host network stack .
Incompatible with ` ` network ` ` .
oom _ kill _ disable ( bool ) : Whether to disable OOM killer .
oom _ score _ adj ( int ) : An integer value containing the score given
to the container in order to tune OOM killer preferences .
pid _ mode ( str ) : If set to ` ` host ` ` , use the host PID namespace
inside the container .
pids _ limit ( int ) : Tune a container ' s pids limit . Set ` ` - 1 ` ` for
unlimited .
platform ( str ) : Platform in the format ` ` os [ / arch [ / variant ] ] ` ` .
Only used if the method needs to pull the requested image .
ports ( dict ) : Ports to bind inside the container .
The keys of the dictionary are the ports to bind inside the
container , either as an integer or a string in the form
` ` port / protocol ` ` , where the protocol is either ` ` tcp ` ` ,
` ` udp ` ` , or ` ` sctp ` ` .
The values of the dictionary are the corresponding ports to
open on the host , which can be either :
- The port number , as an integer . For example ,
` ` { ' 2222 / tcp ' : 3333 } ` ` will expose port 2222 inside the
container as port 3333 on the host .
- ` ` None ` ` , to assign a random host port . For example ,
` ` { ' 2222 / tcp ' : None } ` ` .
- A tuple of ` ` ( address , port ) ` ` if you want to specify the
host interface . For example ,
` ` { ' 1111 / tcp ' : ( ' 127.0.0.1 ' , 1111 ) } ` ` .
- A list of integers , if you want to bind multiple host ports
to a single container port . For example ,
` ` { ' 1111 / tcp ' : [ 1234 , 4567 ] } ` ` .
privileged ( bool ) : Give extended privileges to this container .
publish _ all _ ports ( bool ) : Publish all ports to the host .
read _ only ( bool ) : Mount the container ' s root filesystem as read
only .
remove ( bool ) : Remove the container when it has finished running .
Default : ` ` False ` ` .
restart _ policy ( dict ) : Restart the container when it exits .
Configured as a dictionary with keys :
- ` ` Name ` ` One of ` ` on - failure ` ` , or ` ` always ` ` .
- ` ` MaximumRetryCount ` ` Number of times to restart the
container on failure .
For example :
` ` { " Name " : " on - failure " , " MaximumRetryCount " : 5 } ` `
runtime ( str ) : Runtime to use with this container .
security _ opt ( : py : class : ` list ` ) : A list of string values to
customize labels for MLS systems , such as SELinux .
shm _ size ( str or int ) : Size of / dev / shm ( e . g . ` ` 1G ` ` ) .
stdin _ open ( bool ) : Keep ` ` STDIN ` ` open even if not attached .
stdout ( bool ) : Return logs from ` ` STDOUT ` ` when ` ` detach = False ` ` .
Default : ` ` True ` ` .
stderr ( bool ) : Return logs from ` ` STDERR ` ` when ` ` detach = False ` ` .
Default : ` ` False ` ` .
stop _ signal ( str ) : The stop signal to use to stop the container
( e . g . ` ` SIGINT ` ` ) .
storage _ opt ( dict ) : Storage driver options per container as a
key - value mapping .
stream ( bool ) : If true and ` ` detach ` ` is false , return a log
generator instead of a string . Ignored if ` ` detach ` ` is true .
Default : ` ` False ` ` .
sysctls ( dict ) : Kernel parameters to set in the container .
tmpfs ( dict ) : Temporary filesystems to mount , as a dictionary
mapping a path inside the container to options for that path .
For example :
. . code - block : : python
' / mnt / vol2 ' : ' ' ,
' / mnt / vol1 ' : ' size = 3G , uid = 1000'
tty ( bool ) : Allocate a pseudo - TTY .
ulimits ( : py : class : ` list ` ) : Ulimits to set inside the container ,
as a list of : py : class : ` docker . types . Ulimit ` instances .
use _ config _ proxy ( bool ) : If ` ` True ` ` , and if the docker client
configuration file ( ` ` ~ / . docker / config . json ` ` by default )
contains a proxy configuration , the corresponding environment
variables will be set in the container being built .
user ( str or int ) : Username or UID to run commands as inside the
container .
userns _ mode ( str ) : Sets the user namespace mode for the container
when user namespace remapping option is enabled . Supported
values are : ` ` host ` `
uts _ mode ( str ) : Sets the UTS namespace mode for the container .
Supported values are : ` ` host ` `
version ( str ) : The version of the API to use . Set to ` ` auto ` ` to
automatically detect the server ' s version . Default : ` ` 1.35 ` `
volume _ driver ( str ) : The name of a volume driver / plugin .
volumes ( dict or list ) : A dictionary to configure volumes mounted
inside the container . The key is either the host path or a
volume name , and the value is a dictionary with the keys :
- ` ` bind ` ` The path to mount the volume inside the container
- ` ` mode ` ` Either ` ` rw ` ` to mount the volume read / write , or
` ` ro ` ` to mount it read - only .
For example :
. . code - block : : python
{ ' / home / user1 / ' : { ' bind ' : ' / mnt / vol2 ' , ' mode ' : ' rw ' } ,
' / var / www ' : { ' bind ' : ' / mnt / vol1 ' , ' mode ' : ' ro ' } }
volumes _ from ( : py : class : ` list ` ) : List of container names or IDs to
get volumes from .
working _ dir ( str ) : Path to the working directory .
Returns :
The container logs , either ` ` STDOUT ` ` , ` ` STDERR ` ` , or both ,
depending on the value of the ` ` stdout ` ` and ` ` stderr ` ` arguments .
` ` STDOUT ` ` and ` ` STDERR ` ` may be read only if either ` ` json - file ` `
or ` ` journald ` ` logging driver used . Thus , if you are using none of
these drivers , a ` ` None ` ` object is returned instead . See the
` Engine API documentation
< https : / / docs . docker . com / engine / api / v1.30 / # operation / ContainerLogs / > ` _
for full details .
If ` ` detach ` ` is ` ` True ` ` , a : py : class : ` Container ` object is
returned instead .
Raises :
: py : class : ` docker . errors . ContainerError `
If the container exits with a non - zero exit code and
` ` detach ` ` is ` ` False ` ` .
: py : class : ` docker . errors . ImageNotFound `
If the specified image does not exist .
: py : class : ` docker . errors . APIError `
If the server returns an error ."""
|
if isinstance ( image , Image ) :
image = image . id
stream = kwargs . pop ( 'stream' , False )
detach = kwargs . pop ( 'detach' , False )
platform = kwargs . pop ( 'platform' , None )
if detach and remove :
if version_gte ( self . client . api . _version , '1.25' ) :
kwargs [ "auto_remove" ] = True
else :
raise RuntimeError ( "The options 'detach' and 'remove' cannot " "be used together in api versions < 1.25." )
if kwargs . get ( 'network' ) and kwargs . get ( 'network_mode' ) :
raise RuntimeError ( 'The options "network" and "network_mode" can not be used ' 'together.' )
try :
container = self . create ( image = image , command = command , detach = detach , ** kwargs )
except ImageNotFound :
self . client . images . pull ( image , platform = platform )
container = self . create ( image = image , command = command , detach = detach , ** kwargs )
container . start ( )
if detach :
return container
logging_driver = container . attrs [ 'HostConfig' ] [ 'LogConfig' ] [ 'Type' ]
out = None
if logging_driver == 'json-file' or logging_driver == 'journald' :
out = container . logs ( stdout = stdout , stderr = stderr , stream = True , follow = True )
exit_status = container . wait ( ) [ 'StatusCode' ]
if exit_status != 0 :
out = None
if not kwargs . get ( 'auto_remove' ) :
out = container . logs ( stdout = False , stderr = True )
if remove :
container . remove ( )
if exit_status != 0 :
raise ContainerError ( container , exit_status , command , image , out )
return out if stream or out is None else b'' . join ( [ line for line in out ] )
|
def _check_holiday_structure ( self , times ) :
"""To check the structure of the HolidayClass
: param list times : years or months or days or number week
: rtype : None or Exception
: return : in the case of exception returns the exception"""
|
if not isinstance ( times , list ) :
raise TypeError ( "an list is required" )
for time in times :
if not isinstance ( time , tuple ) :
raise TypeError ( "a tuple is required" )
if len ( time ) > 5 :
raise TypeError ( "Target time takes at most 5 arguments" " ('%d' given)" % len ( time ) )
if len ( time ) < 5 :
raise TypeError ( "Required argument '%s' (pos '%d')" " not found" % ( TIME_LABEL [ len ( time ) ] , len ( time ) ) )
self . _check_time_format ( TIME_LABEL , time )
|
def create ( cls , card_id , name_on_card = None , pin_code = None , second_line = None , custom_headers = None ) :
"""Request a card replacement .
: type user _ id : int
: type card _ id : int
: param name _ on _ card : The user ' s name as it will be on the card . Check
' card - name ' for the available card names for a user .
: type name _ on _ card : str
: param pin _ code : The plaintext pin code . Requests require encryption to
be enabled .
: type pin _ code : str
: param second _ line : The second line on the card .
: type second _ line : str
: type custom _ headers : dict [ str , str ] | None
: rtype : BunqResponseInt"""
|
if custom_headers is None :
custom_headers = { }
request_map = { cls . FIELD_NAME_ON_CARD : name_on_card , cls . FIELD_PIN_CODE : pin_code , cls . FIELD_SECOND_LINE : second_line }
request_map_string = converter . class_to_json ( request_map )
request_map_string = cls . _remove_field_for_request ( request_map_string )
api_client = client . ApiClient ( cls . _get_api_context ( ) )
request_bytes = request_map_string . encode ( )
request_bytes = security . encrypt ( cls . _get_api_context ( ) , request_bytes , custom_headers )
endpoint_url = cls . _ENDPOINT_URL_CREATE . format ( cls . _determine_user_id ( ) , card_id )
response_raw = api_client . post ( endpoint_url , request_bytes , custom_headers )
return BunqResponseInt . cast_from_bunq_response ( cls . _process_for_id ( response_raw ) )
|
def SubtractFromBalance ( self , assetId , fixed8_val ) :
"""Subtract amount to the specified balance .
Args :
assetId ( UInt256 ) :
fixed8 _ val ( Fixed8 ) : amount to add ."""
|
found = False
for key , balance in self . Balances . items ( ) :
if key == assetId :
self . Balances [ assetId ] = self . Balances [ assetId ] - fixed8_val
found = True
if not found :
self . Balances [ assetId ] = fixed8_val * Fixed8 ( - 1 )
|
def set ( self , key , value , time , compress_level = - 1 ) :
"""Set a value for a key on server .
: param key : Key ' s name
: type key : six . string _ types
: param value : A value to be stored on server .
: type value : object
: param time : Time in seconds that your key will expire .
: type time : int
: param compress _ level : How much to compress .
0 = no compression , 1 = fastest , 9 = slowest but best ,
-1 = default compression level .
: type compress _ level : int
: return : True in case of success and False in case of failure
: rtype : bool"""
|
return self . _set_add_replace ( 'set' , key , value , time , compress_level = compress_level )
|
def run_command ( cmd , debug = False ) :
"""Execute the given command and returns None .
: param cmd : A ` ` sh . Command ` ` object to execute .
: param debug : An optional bool to toggle debug output .
: return : ` ` sh ` ` object"""
|
if debug : # WARN ( retr0h ) : Uses an internal ` ` sh ` ` data structure to dig
# the environment out of the ` ` sh . command ` ` object .
print_environment_vars ( cmd . _partial_call_args . get ( 'env' , { } ) )
print_debug ( 'COMMAND' , str ( cmd ) )
print ( )
return cmd ( _truncate_exc = False )
|
def galactic_latlon ( self ) :
"""Compute galactic coordinates ( lat , lon , distance )"""
|
vector = _GALACTIC . dot ( self . position . au )
d , lat , lon = to_polar ( vector )
return ( Angle ( radians = lat , signed = True ) , Angle ( radians = lon ) , Distance ( au = d ) )
|
def dumps ( self , obj , many = None , * args , ** kwargs ) :
"""Same as : meth : ` dump ` , except return a JSON - encoded string .
: param obj : The object to serialize .
: param bool many : Whether to serialize ` obj ` as a collection . If ` None ` , the value
for ` self . many ` is used .
: return : A ` ` json ` ` string
: rtype : str
. . versionadded : : 1.0.0
. . versionchanged : : 3.0.0b7
This method returns the serialized data rather than a ` ` ( data , errors ) ` ` duple .
A : exc : ` ValidationError < marshmallow . exceptions . ValidationError > ` is raised
if ` ` obj ` ` is invalid ."""
|
serialized = self . dump ( obj , many = many )
return self . opts . render_module . dumps ( serialized , * args , ** kwargs )
|
def is_active ( self ) :
"""Determines whether this plugin is active .
This plugin is only active if any run has an embedding .
Returns :
Whether any run has embedding data to show in the projector ."""
|
if not self . multiplexer :
return False
if self . _is_active : # We have already determined that the projector plugin should be active .
# Do not re - compute that . We have no reason to later set this plugin to be
# inactive .
return True
if self . _thread_for_determining_is_active : # We are currently determining whether the plugin is active . Do not start
# a separate thread .
return self . _is_active
# The plugin is currently not active . The frontend might check again later .
# For now , spin off a separate thread to determine whether the plugin is
# active .
new_thread = threading . Thread ( target = self . _determine_is_active , name = 'ProjectorPluginIsActiveThread' )
self . _thread_for_determining_is_active = new_thread
new_thread . start ( )
return False
|
def __handle_scale_rot ( self ) :
"""Handle scaling and rotation of the surface"""
|
if self . __is_rot_pending :
self . __execute_rot ( self . untransformed_image )
self . __is_rot_pending = False
# Scale the image using the recently rotated surface to keep the orientation correct
self . __execute_scale ( self . image , self . image . get_size ( ) )
self . __is_scale_pending = False
# The image is not rotating while scaling , thus use the untransformed image to scale .
if self . __is_scale_pending :
self . __execute_scale ( self . untransformed_image , self . untransformed_image . get_size ( ) )
self . __is_scale_pending = False
|
def _move ( self ) :
"""Called during a PUT request where the action specifies
a move operation . Returns resource URI of the destination file ."""
|
newpath = self . action [ 'newpath' ]
try :
self . fs . move ( self . fp , newpath )
except OSError :
raise tornado . web . HTTPError ( 400 )
return newpath
|
def query ( song_name ) :
"""CLI :
$ iquery - l song _ name"""
|
r = requests_get ( SONG_SEARCH_URL . format ( song_name ) )
try : # Get the first result .
song_url = re . search ( r'(http://www.xiami.com/song/\d+)' , r . text ) . group ( 0 )
except AttributeError :
exit_after_echo ( SONG_NOT_FOUND )
return SongPage ( song_url )
|
def remove_item ( self , item ) :
"""Remove the specified item from the menu .
Args :
item ( MenuItem ) : the item to be removed .
Returns :
bool : True if the item was removed ; False otherwise ."""
|
for idx , _item in enumerate ( self . items ) :
if item == _item :
del self . items [ idx ]
return True
return False
|
def signed_to_twos_comp ( val : int , n_bits : int ) -> int :
"""Convert a signed integer to its " two ' s complement " representation .
Args :
val : signed integer
n _ bits : number of bits ( which must reflect a whole number of bytes )
Returns :
unsigned integer : two ' s complement version"""
|
assert n_bits % 8 == 0 , "Must specify a whole number of bytes"
n_bytes = n_bits // 8
b = val . to_bytes ( n_bytes , byteorder = sys . byteorder , signed = True )
return int . from_bytes ( b , byteorder = sys . byteorder , signed = False )
|
def findattr ( self , name ) :
"""Search the vgroup for a given attribute .
Args : :
name attribute name
Returns : :
if found , VGAttr instance describing the attribute
None otherwise
C library equivalent : Vfindattr"""
|
try :
att = self . attr ( name )
if att . _index is None :
att = None
except HDF4Error :
att = None
return att
|
def create_graphics ( self ) :
"""Create images related to this panel ."""
|
if len ( self . _svg_fns ) > 0 :
rnftools . utils . shell ( '"{}" "{}"' . format ( "gnuplot" , self . _gp_fn ) )
if self . render_pdf_method is not None :
for svg_fn in self . _svg_fns :
pdf_fn = re . sub ( r'\.svg$' , r'.pdf' , svg_fn )
svg42pdf ( svg_fn , pdf_fn , method = self . render_pdf_method )
|
def is_theme ( self , name ) :
"""Return True if the theme * name * should be used ."""
|
return getattr ( self . args , 'theme_' + name ) or self . theme [ 'name' ] == name
|
def _get_vrf_name ( self , ri ) :
"""overloaded method for generating a vrf _ name that supports
region _ id"""
|
router_id = ri . router_name ( ) [ : self . DEV_NAME_LEN ]
is_multi_region_enabled = cfg . CONF . multi_region . enable_multi_region
if is_multi_region_enabled :
region_id = cfg . CONF . multi_region . region_id
vrf_name = "%s-%s" % ( router_id , region_id )
else :
vrf_name = router_id
return vrf_name
|
def compute_panel ( cls , data , scales , params ) :
"""Positions must override this function
Notes
Make necessary adjustments to the columns in the dataframe .
Create the position transformation functions and
use self . transform _ position ( ) do the rest .
See Also
position _ jitter . compute _ panel"""
|
msg = '{} needs to implement this method'
raise NotImplementedError ( msg . format ( cls . __name__ ) )
|
def predict_proba ( self , L ) :
"""Returns the [ n , k ] matrix of label probabilities P ( Y | \ lambda )
Args :
L : An [ n , m ] scipy . sparse label matrix with values in { 0,1 , . . . , k }"""
|
self . _set_constants ( L )
L_aug = self . _get_augmented_label_matrix ( L )
mu = np . clip ( self . mu . detach ( ) . clone ( ) . numpy ( ) , 0.01 , 0.99 )
# Create a " junction tree mask " over the columns of L _ aug / mu
if len ( self . deps ) > 0 :
jtm = np . zeros ( L_aug . shape [ 1 ] )
# All maximal cliques are + 1
for i in self . c_tree . nodes ( ) :
node = self . c_tree . node [ i ]
jtm [ node [ "start_index" ] : node [ "end_index" ] ] = 1
# All separator sets are - 1
for i , j in self . c_tree . edges ( ) :
edge = self . c_tree [ i ] [ j ]
jtm [ edge [ "start_index" ] : edge [ "end_index" ] ] = 1
else :
jtm = np . ones ( L_aug . shape [ 1 ] )
# Note : We omit abstains , effectively assuming uniform distribution here
X = np . exp ( L_aug @ np . diag ( jtm ) @ np . log ( mu ) + np . log ( self . p ) )
Z = np . tile ( X . sum ( axis = 1 ) . reshape ( - 1 , 1 ) , self . k )
return X / Z
|
def prt_ntgos ( self , prt , ntgos ) :
"""Print the Grouper namedtuples ."""
|
for ntgo in ntgos :
key2val = ntgo . _asdict ( )
prt . write ( "{GO_LINE}\n" . format ( GO_LINE = self . prtfmt . format ( ** key2val ) ) )
|
def put ( self , key , value ) :
"""Associate key and value in the cache .
@ param key : the key
@ type key : ( dns . name . Name , int , int ) tuple whose values are the
query name , rdtype , and rdclass .
@ param value : The answer being cached
@ type value : dns . resolver . Answer object"""
|
self . maybe_clean ( )
self . data [ key ] = value
|
def project ( num = None , * args , ** kwargs ) :
"""Create a new main project
Parameters
num : int
The number of the project
% ( Project . parameters . no _ num ) s
Returns
Project
The with the given ` num ` ( if it does not already exist , it is created )
See Also
scp : Sets the current project
gcp : Returns the current project"""
|
numbers = [ project . num for project in _open_projects ]
if num in numbers :
return _open_projects [ numbers . index ( num ) ]
if num is None :
num = max ( numbers ) + 1 if numbers else 1
project = PROJECT_CLS . new ( num , * args , ** kwargs )
_open_projects . append ( project )
return project
|
def has_suffix ( path_name , suffix ) :
"""Determines if path _ name has a suffix of at least ' suffix '"""
|
if isinstance ( suffix , str ) :
suffix = disintegrate ( suffix )
components = disintegrate ( path_name )
for i in range ( - 1 , - ( len ( suffix ) + 1 ) , - 1 ) :
if components [ i ] != suffix [ i ] :
break
else :
return True
return False
|
def getpreferredencoding ( ) :
"""Return preferred encoding for text I / O ."""
|
encoding = locale . getpreferredencoding ( False )
if sys . platform == 'darwin' and encoding . startswith ( 'mac-' ) : # Upgrade ancient MacOS encodings in Python < 2.7
encoding = 'utf-8'
return encoding
|
def extract_element ( self , vector , idx , name = '' ) :
"""Returns the value at position idx ."""
|
instr = instructions . ExtractElement ( self . block , vector , idx , name = name )
self . _insert ( instr )
return instr
|
def Concat ( self : Iterable , * others ) :
"""' self ' : [ 1 , 2 , 3 ] ,
' : args ' : [ [ 4 , 5 , 6 ] , [ 7 , 8 , 9 ] ] ,
' assert ' : lambda ret : list ( ret ) = = [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9]"""
|
return concat_generator ( self , * [ unbox_if_flow ( other ) for other in others ] )
|
def _get_all_field_names ( model ) :
"""100 % compatible version of the old API of model . _ meta . get _ all _ field _ names ( )
From : https : / / docs . djangoproject . com / en / 1.9 / ref / models / meta / # migrating - from - the - old - api"""
|
return list ( set ( chain . from_iterable ( ( field . name , field . attname ) if hasattr ( field , 'attname' ) else ( field . name , ) for field in model . _meta . get_fields ( ) # For complete backwards compatibility , you may want to exclude
# GenericForeignKey from the results .
if not ( field . many_to_one and field . related_model is None ) ) ) )
|
def export ( gandi , resource , output , force , intermediate ) :
"""Write the certificate to < output > or < fqdn > . crt .
Resource can be a CN or an ID"""
|
ids = [ ]
for res in resource :
ids . extend ( gandi . certificate . usable_ids ( res ) )
if output and len ( ids ) > 1 :
gandi . echo ( 'Too many certs found, you must specify which cert you ' 'want to export' )
return
for id_ in set ( ids ) :
cert = gandi . certificate . info ( id_ )
if 'cert' not in cert :
continue
if cert [ 'status' ] != 'valid' :
gandi . echo ( 'The certificate must be in valid status to be ' 'exported (%s).' % id_ )
continue
cert_filename = cert [ 'cn' ] . replace ( '*.' , 'wildcard.' , 1 )
crt_filename = output or cert_filename + '.crt'
if not force and os . path . isfile ( crt_filename ) :
gandi . echo ( 'The file %s already exists.' % crt_filename )
continue
crt = gandi . certificate . pretty_format_cert ( cert )
if crt :
with open ( crt_filename , 'w' ) as crt_file :
crt_file . write ( crt )
gandi . echo ( 'wrote %s' % crt_filename )
package = cert [ 'package' ]
if 'bus' in package and intermediate :
gandi . echo ( 'Business certs do not need intermediates.' )
elif intermediate :
crtf = 'pem'
sha_version = cert [ 'sha_version' ]
type_ = package . split ( '_' ) [ 1 ]
extra = ( 'sgc' if 'SGC' in package and 'pro' in package and sha_version == 1 else 'default' )
if extra == 'sgc' :
crtf = 'pem'
inters = gandi . certificate . urls [ sha_version ] [ type_ ] [ extra ] [ crtf ]
if isinstance ( inters , basestring ) :
inters = [ inters ]
fhandle = open ( cert_filename + '.inter.crt' , 'w+b' )
for inter in inters :
if inter . startswith ( 'http' ) :
data = requests . get ( inter ) . text
else :
data = inter
fhandle . write ( data . encode ( 'latin1' ) )
gandi . echo ( 'wrote %s' % cert_filename + '.inter.crt' )
fhandle . close ( )
return crt
|
def _lease_owned ( self , lease , current_uuid_path ) :
"""Checks if the given lease is owned by the prefix whose uuid is in
the given path
Note :
The prefix must be also in the same path it was when it took the
lease
Args :
path ( str ) : Path to the lease
current _ uuid _ path ( str ) : Path to the uuid to check ownership of
Returns :
bool : ` ` True ` ` if the given lease in owned by the prefix ,
` ` False ` ` otherwise"""
|
prev_uuid_path , prev_uuid = lease . metadata
with open ( current_uuid_path ) as f :
current_uuid = f . read ( )
return current_uuid_path == prev_uuid_path and prev_uuid == current_uuid
|
def parse_values ( self , query ) :
"""extract values from query"""
|
values = { }
for name , filt in self . filters . items ( ) :
val = filt . parse_value ( query )
if val is None :
continue
values [ name ] = val
return values
|
def print_formatted ( datas ) :
"""Pretty print JSON DATA
Argument :
datas : dictionary of data"""
|
if not datas :
print ( "No data" )
exit ( 1 )
if isinstance ( datas , list ) : # get all zones
# API / zone without : identifier
hr ( )
print ( '%-20s %-8s %-12s' % ( 'name' , 'type' , 'notified_serial' ) )
hr ( )
for record in datas : # print ' NAME '
utils . print_inline ( "%(name)-20s" % record )
# print ' TYPE ' of SOA record
utils . print_inline ( "%(type)-8s" % record )
if record . get ( 'notified_serial' ) :
print ( "%(notified_serial)s" % record )
else :
print ( '' )
exit ( 0 )
elif datas . get ( 'records' ) :
print ( "domain: %(name)s" % datas )
if datas . get ( 'type' ) == 'MASTER' and datas . get ( 'notified_serial' ) :
print ( "serial: %(notified_serial)s" % datas )
print ( "DNS : %(type)s" % datas )
# print header
hr ( )
print ( '%-33s %-5s %-25s %-5s %-3s' % ( 'name' , 'type' , 'content' , 'ttl' , 'prio' ) )
hr ( )
for record in datas . get ( 'records' ) : # print ' NAME '
utils . print_inline ( "%(name)-33s" % record )
# print ' TYPE ' of SOA record
if record . get ( 'type' ) == 'SOA' :
print ( "%(type)-5s" % record )
# print ' TYPE ' of non SOA record
else :
utils . print_inline ( "%(type)-5s" % record )
# print ' CONTENT ' of non SOA
if record . get ( 'type' ) == 'SOA' :
utils . print_inline ( ">\t\t%(content)-25s " % record )
# print ' CONTENT ' of SOA record
else :
utils . print_inline ( "%(content)-25s" % record )
# print TTL , and PRIORITY for MX , SRV record
if record . get ( 'priority' ) :
utils . print_inline ( "%(ttl)5s" % record )
print ( "%(priority)2s" % record )
# print ttl for non SOA record
else :
print ( "%(ttl)5s " % record )
hr ( )
elif datas . get ( 'identifier' ) : # for template
print ( "identifier : %(identifier)s" % datas )
print ( "description: %(description)s" % datas )
hr ( )
print ( '%-33s %-5s %-25s %-5s %-3s' % ( 'name' , 'type' , 'content' , 'ttl' , 'prio' ) )
for record in datas . get ( 'entries' ) : # print ' NAME '
utils . print_inline ( "%(name)-33s" % record )
# print ' TYPE ' for SOA
if record . get ( 'type' ) == 'SOA' :
print ( "%(type)-5s" % record )
# print ' TYPE ' for non SOA
else :
utils . print_inline ( "%(type)-5s" % record )
# print ' CONTENT ' for SOA
if record . get ( 'type' ) == 'SOA' :
utils . print_inline ( "> %(content)-25s " % record )
# print ' CONTENT ' for non SOA
else :
utils . print_inline ( "%(content)-24s" % record )
# print ' TTL ' , and ' PRIORITY '
if record . get ( 'priority' ) is not None :
utils . print_inline ( "%(ttl)5s" % record )
print ( "%(priority)2s" % record )
# print
else :
print ( "%(ttl)5s " % record )
hr ( )
else :
print ( "No match records" )
|
def extract_path ( prec , v ) :
"""extracts a path in form of vertex list from source to vertex v
given a precedence table prec leading to the source
: param prec : precedence table of a tree
: param v : vertex on the tree
: returns : path from root to v , in form of a list
: complexity : linear"""
|
L = [ ]
while v is not None :
L . append ( v )
v = prec [ v ]
assert v not in L
# prevent infinite loops for a bad formed table prec
return L [ : : - 1 ]
|
def add ( self , key , value = None ) :
"""Adds the new key to this enumerated type .
: param key | < str >"""
|
if value is None :
value = 2 ** ( len ( self ) )
self [ key ] = value
setattr ( self , key , self [ key ] )
return value
|
def _from_inferred_categories ( cls , inferred_categories , inferred_codes , dtype , true_values = None ) :
"""Construct a Categorical from inferred values .
For inferred categories ( ` dtype ` is None ) the categories are sorted .
For explicit ` dtype ` , the ` inferred _ categories ` are cast to the
appropriate type .
Parameters
inferred _ categories : Index
inferred _ codes : Index
dtype : CategoricalDtype or ' category '
true _ values : list , optional
If none are provided , the default ones are
" True " , " TRUE " , and " true . "
Returns
Categorical"""
|
from pandas import Index , to_numeric , to_datetime , to_timedelta
cats = Index ( inferred_categories )
known_categories = ( isinstance ( dtype , CategoricalDtype ) and dtype . categories is not None )
if known_categories : # Convert to a specialized type with ` dtype ` if specified .
if dtype . categories . is_numeric ( ) :
cats = to_numeric ( inferred_categories , errors = "coerce" )
elif is_datetime64_dtype ( dtype . categories ) :
cats = to_datetime ( inferred_categories , errors = "coerce" )
elif is_timedelta64_dtype ( dtype . categories ) :
cats = to_timedelta ( inferred_categories , errors = "coerce" )
elif dtype . categories . is_boolean ( ) :
if true_values is None :
true_values = [ "True" , "TRUE" , "true" ]
cats = cats . isin ( true_values )
if known_categories : # Recode from observation order to dtype . categories order .
categories = dtype . categories
codes = _recode_for_categories ( inferred_codes , cats , categories )
elif not cats . is_monotonic_increasing : # Sort categories and recode for unknown categories .
unsorted = cats . copy ( )
categories = cats . sort_values ( )
codes = _recode_for_categories ( inferred_codes , unsorted , categories )
dtype = CategoricalDtype ( categories , ordered = False )
else :
dtype = CategoricalDtype ( cats , ordered = False )
codes = inferred_codes
return cls ( codes , dtype = dtype , fastpath = True )
|
def isOpen ( self ) :
"""Returns whether all analyses from this Analysis Request are open
( their status is either " assigned " or " unassigned " )"""
|
for analysis in self . getAnalyses ( ) :
if not api . get_object ( analysis ) . isOpen ( ) :
return False
return True
|
def plugins ( self , typ = None , group = None ) :
"""Returns the plugins used for this dialog .
: param typ | < str > | | None
group | < str > | | None
: return [ < XWizardPlugin > , . . ]"""
|
if ( typ is None ) :
output = [ ]
for wlang in self . _plugins . values ( ) :
for wgrp in wlang . values ( ) :
output += wgrp
return output
elif ( group is None ) :
output = [ ]
for wgrp in self . _plugins . get ( nativestring ( typ ) , { } ) . values ( ) :
output += wgrp
return output
else :
return self . _plugins . get ( nativestring ( typ ) , { } ) . get ( nativestring ( group ) , [ ] )
|
def status ( DomainName , region = None , key = None , keyid = None , profile = None ) :
'''Given a domain name describe its status .
Returns a dictionary of interesting properties .
CLI Example :
. . code - block : : bash
salt myminion boto _ elasticsearch _ domain . status mydomain'''
|
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
try :
domain = conn . describe_elasticsearch_domain ( DomainName = DomainName )
if domain and 'DomainStatus' in domain :
domain = domain . get ( 'DomainStatus' , { } )
keys = ( 'Endpoint' , 'Created' , 'Deleted' , 'DomainName' , 'DomainId' , 'EBSOptions' , 'SnapshotOptions' , 'AccessPolicies' , 'Processing' , 'AdvancedOptions' , 'ARN' , 'ElasticsearchVersion' )
return { 'domain' : dict ( [ ( k , domain . get ( k ) ) for k in keys if k in domain ] ) }
else :
return { 'domain' : None }
except ClientError as e :
return { 'error' : __utils__ [ 'boto3.get_error' ] ( e ) }
|
def get_data ( model , instance_id , kind = '' ) :
"""Get instance data by id .
: param model : a string , model name in rio . models
: param id : an integer , instance id .
: param kind : a string specified which kind of dict tranformer should be called .
: return : data ."""
|
instance = get_instance ( model , instance_id )
if not instance :
return
return ins2dict ( instance , kind )
|
def get_dates ( self , request ) :
"""Get available Sentinel - 2 acquisitions at least time _ difference apart
List of all available Sentinel - 2 acquisitions for given bbox with max cloud coverage and the specified
time interval . When a single time is specified the request will return that specific date , if it exists .
If a time range is specified the result is a list of all scenes between the specified dates conforming to
the cloud coverage criteria . Most recent acquisition being first in the list .
When a time _ difference threshold is set to a positive value , the function filters out all datetimes which
are within the time difference . The oldest datetime is preserved , all others all deleted .
: param request : OGC - type request
: type request : WmsRequest or WcsRequest
: return : List of dates of existing acquisitions for the given request
: rtype : list ( datetime . datetime ) or [ None ]"""
|
if DataSource . is_timeless ( request . data_source ) :
return [ None ]
date_interval = parse_time_interval ( request . time )
LOGGER . debug ( 'date_interval=%s' , date_interval )
if request . wfs_iterator is None :
self . wfs_iterator = WebFeatureService ( request . bbox , date_interval , data_source = request . data_source , maxcc = request . maxcc , base_url = self . base_url , instance_id = self . instance_id )
else :
self . wfs_iterator = request . wfs_iterator
dates = sorted ( set ( self . wfs_iterator . get_dates ( ) ) )
if request . time is OgcConstants . LATEST :
dates = dates [ - 1 : ]
return OgcService . _filter_dates ( dates , request . time_difference )
|
def content_negotiation ( formats , default_type = 'text/html' ) :
"""Provides basic content negotiation and returns a view method based on the
best match of content types as indicated in formats .
: param formats : dictionary of content types and corresponding methods
: param default _ type : string the decorated method is the return type for .
Example usage : :
def rdf _ view ( request , arg ) :
return RDF _ RESPONSE
@ content _ negotiation ( { ' application / rdf + xml ' : rdf _ view } )
def html _ view ( request , arg ) :
return HTML _ RESPONSE
The above example would return the rdf _ view on a request type of
` ` application / rdf + xml ` ` and the normal view for anything else .
Any : class : ` django . http . HttpResponse ` returned by the view method chosen
by content negotiation will have a ' Vary : Accept ' HTTP header added .
* * NOTE : * * Some web browsers do content negotiation poorly , requesting
` ` application / xml ` ` when what they really want is ` ` application / xhtml + xml ` ` or
` ` text / html ` ` . When this type of Accept request is detected , the default type
will be returned rather than the best match that would be determined by parsing
the Accept string properly ( since in some cases the best match is
` ` application / xml ` ` , which could return non - html content inappropriate for
display in a web browser ) ."""
|
def _decorator ( view_method ) :
@ wraps ( view_method )
def _wrapped ( request , * args , ** kwargs ) : # Changed this to be a value passed as a method argument defaulting
# to text / html instead so it ' s more flexible .
# default _ type = ' text / html ' # If not specificied assume HTML request .
# Add text / html for the original method if not already included .
if default_type not in formats :
formats [ default_type ] = view_method
try :
req_type = request . META [ 'HTTP_ACCEPT' ]
# If this request is coming from a browser like that , just
# give them our default type instead of honoring the actual best match
# ( see note above for more detail )
if '*/*' in req_type :
req_type = default_type
except KeyError :
req_type = default_type
# Get the best match for the content type requested .
content_type = mimeparse . best_match ( formats . keys ( ) , req_type )
# Return the view matching content type or the original view
# if no match .
if not content_type or content_type not in formats :
response = view_method ( request , * args , ** kwargs )
else :
response = formats [ content_type ] ( request , * args , ** kwargs )
# set a Vary header to indicate content may vary based on Accept header
if isinstance ( response , HttpResponse ) : # views should return HttpResponse objects , but check to be sure
# note : using the same utility method used by django ' s vary _ on _ headers decorator
patch_vary_headers ( response , [ 'Accept' ] )
return response
return _wrapped
return _decorator
|
def run ( paths , output = _I_STILL_HATE_EVERYTHING , recurse = core . flat , sort_by = None , ls = core . ls , stdout = stdout , ) :
"""Project - oriented directory and file information lister ."""
|
if output is _I_STILL_HATE_EVERYTHING :
output = core . columnized if stdout . isatty ( ) else core . one_per_line
if sort_by is None :
if output == core . as_tree :
def sort_by ( thing ) :
return ( thing . parent ( ) , thing . basename ( ) . lstrip ( string . punctuation ) . lower ( ) , )
else :
def sort_by ( thing ) :
return thing
def _sort_by ( thing ) :
return not getattr ( thing , "_always_sorts_first" , False ) , sort_by ( thing )
contents = [ path_and_children for path in paths or ( project . from_path ( FilePath ( "." ) ) , ) for path_and_children in recurse ( path = path , ls = ls ) ]
for line in output ( contents , sort_by = _sort_by ) :
stdout . write ( line )
stdout . write ( "\n" )
|
def make_fileitem_streamlist_stream_name ( stream_name , condition = 'is' , negate = False , preserve_case = False ) :
"""Create a node for FileItem / StreamList / Stream / Name
: return : A IndicatorItem represented as an Element node"""
|
document = 'FileItem'
search = 'FileItem/StreamList/Stream/Name'
content_type = 'string'
content = stream_name
ii_node = ioc_api . make_indicatoritem_node ( condition , document , search , content_type , content , negate = negate , preserve_case = preserve_case )
return ii_node
|
def get_prepopulated_value ( field , instance ) :
"""Returns preliminary value based on ` populate _ from ` ."""
|
if hasattr ( field . populate_from , '__call__' ) : # AutoSlugField ( populate _ from = lambda instance : . . . )
return field . populate_from ( instance )
else : # AutoSlugField ( populate _ from = ' foo ' )
attr = getattr ( instance , field . populate_from )
return callable ( attr ) and attr ( ) or attr
|
def upcoming_shabbat ( self ) :
"""Return the HDate for either the upcoming or current Shabbat .
If it is currently Shabbat , returns the HDate of the Saturday ."""
|
if self . is_shabbat :
return self
# If it ' s Sunday , fast forward to the next Shabbat .
saturday = self . gdate + datetime . timedelta ( ( 12 - self . gdate . weekday ( ) ) % 7 )
return HDate ( saturday , diaspora = self . diaspora , hebrew = self . hebrew )
|
def _get_bb_addr_from_instr ( self , instr ) :
"""Returns the address of the methods basic block that contains the given
instruction .
: param instr : The index of the instruction ( within the current method ) .
: rtype : SootAddressDescriptor"""
|
current_method = self . state . addr . method
try :
bb = current_method . block_by_label [ instr ]
except KeyError :
l . error ( "Possible jump to a non-existing bb %s --> %d" , self . state . addr , instr )
raise IncorrectLocationException ( )
return SootAddressDescriptor ( current_method , bb . idx , 0 )
|
def link_reads ( self , analysistype ) :
"""Create folders with relative symlinks to the desired simulated / sampled reads . These folders will contain all
the reads created for each sample , and will be processed with GeneSippr and COWBAT pipelines
: param analysistype : Current analysis type . Will either be ' simulated ' or ' sampled '"""
|
logging . info ( 'Linking {at} reads' . format ( at = analysistype ) )
for sample in self . metadata : # Create the output directories
genesippr_dir = os . path . join ( self . path , 'genesippr' , sample . name )
sample . genesippr_dir = genesippr_dir
make_path ( genesippr_dir )
cowbat_dir = os . path . join ( self . path , 'cowbat' , sample . name )
sample . cowbat_dir = cowbat_dir
make_path ( cowbat_dir )
# Iterate through all the desired depths of coverage
for depth in self . read_depths :
for read_pair in self . read_lengths : # Create variables using the analysis type . These will be used in setting GenObject attributes
read_type = '{at}_reads' . format ( at = analysistype )
fastq_type = 'trimmed_{at}_fastq' . format ( at = analysistype )
# Link reads to both output directories
for output_dir in [ genesippr_dir , cowbat_dir ] : # If the original reads are shorter than the specified read length , the FASTQ files will exist ,
# but will be empty . Do not create links for these files
size = os . path . getsize ( sample [ read_type ] [ depth ] [ read_pair ] . forward_reads [ fastq_type ] )
if size > 20 : # Create relative symlinks to the FASTQ files - use the relative path from the desired
# output directory to the read storage path e . g .
# . . / . . / 2013 - SEQ - 0072 / simulated / 40/50_150 / simulated _ trimmed / 2013 - SEQ - 0072 _ simulated _ 40_50_150 _ R1 . fastq . gz
# is the relative path to the output _ dir . The link name is the base name of the reads
# joined to the desired output directory e . g .
# output _ dir / 2013 - SEQ - 0072/2013 - SEQ - 0072 _ simulated _ 40_50_150 _ R1 . fastq . gz
relative_symlink ( sample [ read_type ] [ depth ] [ read_pair ] . forward_reads [ fastq_type ] , output_dir )
# Original FASTQ files
relative_symlink ( sample . forward_fastq , output_dir )
relative_symlink ( sample . reverse_fastq , output_dir )
# Reverse reads
try :
size = os . path . getsize ( sample [ read_type ] [ depth ] [ read_pair ] . reverse_reads [ fastq_type ] )
if size > 20 :
relative_symlink ( sample [ read_type ] [ depth ] [ read_pair ] . reverse_reads [ fastq_type ] , output_dir )
except FileNotFoundError :
pass
|
def update ( self , reference , field_updates , option = None ) :
"""Add a " change " to update a document .
See
: meth : ` ~ . firestore _ v1beta1 . document . DocumentReference . update ` for
more information on ` ` field _ updates ` ` and ` ` option ` ` .
Args :
reference ( ~ . firestore _ v1beta1 . document . DocumentReference ) : A
document reference that will be deleted in this batch .
field _ updates ( dict ) : Field names or paths to update and values
to update with .
option ( Optional [ ~ . firestore _ v1beta1 . client . WriteOption ] ) : A
write option to make assertions / preconditions on the server
state of the document before applying changes ."""
|
if option . __class__ . __name__ == "ExistsOption" :
raise ValueError ( "you must not pass an explicit write option to " "update." )
write_pbs = _helpers . pbs_for_update ( reference . _document_path , field_updates , option )
self . _add_write_pbs ( write_pbs )
|
def _InitializeGraph ( self , os_name , artifact_list ) :
"""Creates the nodes and directed edges of the dependency graph .
Args :
os _ name : String specifying the OS name .
artifact _ list : List of requested artifact names ."""
|
dependencies = artifact_registry . REGISTRY . SearchDependencies ( os_name , artifact_list )
artifact_names , attribute_names = dependencies
self . _AddAttributeNodes ( attribute_names )
self . _AddArtifactNodesAndEdges ( artifact_names )
|
def delete_zombie_checks ( self ) :
"""Remove checks that have a zombie status ( usually timeouts )
: return : None"""
|
id_to_del = [ ]
for chk in list ( self . checks . values ( ) ) :
if chk . status == ACT_STATUS_ZOMBIE :
id_to_del . append ( chk . uuid )
# une petite tape dans le dos et tu t ' en vas , merci . . .
# * pat pat * GFTO , thks : )
for c_id in id_to_del :
del self . checks [ c_id ]
|
def status ( self , s = None ) :
"""Set / Get the status of the button ."""
|
if s is None :
return self . states [ self . _status ]
if isinstance ( s , str ) :
s = self . states . index ( s )
self . _status = s
self . textproperty . SetLineOffset ( self . offset )
self . actor . SetInput ( self . spacer + self . states [ s ] + self . spacer )
s = s % len ( self . colors )
# to avoid mismatch
self . textproperty . SetColor ( colors . getColor ( self . colors [ s ] ) )
bcc = numpy . array ( colors . getColor ( self . bcolors [ s ] ) )
self . textproperty . SetBackgroundColor ( bcc )
if self . showframe :
self . textproperty . FrameOn ( )
self . textproperty . SetFrameWidth ( self . framewidth )
self . textproperty . SetFrameColor ( numpy . sqrt ( bcc ) )
|
def on_remove ( self , callable_ ) :
"""Add a remove observer to this entity ."""
|
self . model . add_observer ( callable_ , self . entity_type , 'remove' , self . entity_id )
|
def setTransducer ( self , edfsignal , transducer ) :
"""Sets the transducer of signal edfsignal
: param edfsignal : int
: param transducer : str
Notes
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action ."""
|
if ( edfsignal < 0 or edfsignal > self . n_channels ) :
raise ChannelDoesNotExist ( edfsignal )
self . channels [ edfsignal ] [ 'transducer' ] = transducer
self . update_header ( )
|
def load_pdb ( self , pdb_id , mapped_chains = None , pdb_file = None , file_type = None , is_experimental = True , set_as_representative = False , representative_chain = None , force_rerun = False ) :
"""Load a structure ID and optional structure file into the structures attribute .
Args :
pdb _ id ( str ) : PDB ID
mapped _ chains ( str , list ) : Chain ID or list of IDs which you are interested in
pdb _ file ( str ) : Path to PDB file
file _ type ( str ) : Type of PDB file
is _ experimental ( bool ) : If this structure file is experimental
set _ as _ representative ( bool ) : If this structure should be set as the representative structure
representative _ chain ( str ) : If ` ` set _ as _ representative ` ` is ` ` True ` ` , provide the representative chain ID
force _ rerun ( bool ) : If the PDB should be reloaded if it is already in the list of structures
Returns :
PDBProp : The object that is now contained in the structures attribute"""
|
if self . structures . has_id ( pdb_id ) : # Remove the structure if set to force rerun
if force_rerun :
existing = self . structures . get_by_id ( pdb_id )
self . structures . remove ( existing )
# Otherwise just retrieve it
else :
log . debug ( '{}: PDB ID already present in list of structures' . format ( pdb_id ) )
pdb = self . structures . get_by_id ( pdb_id )
if pdb_file :
pdb . load_structure_path ( pdb_file , file_type )
if mapped_chains :
pdb . add_mapped_chain_ids ( mapped_chains )
# Create a new StructProp entry
if not self . structures . has_id ( pdb_id ) :
if is_experimental :
pdb = PDBProp ( ident = pdb_id , mapped_chains = mapped_chains , structure_path = pdb_file , file_type = file_type )
else :
pdb = StructProp ( ident = pdb_id , mapped_chains = mapped_chains , structure_path = pdb_file , file_type = file_type )
self . structures . append ( pdb )
if set_as_representative : # Parse structure so chains are stored before setting representative
pdb . parse_structure ( )
self . _representative_structure_setter ( structprop = pdb , keep_chain = representative_chain , force_rerun = force_rerun )
return self . structures . get_by_id ( pdb_id )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.