signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def index_next_crossing ( value , array , starting_index = 0 , direction = 1 ) :
"""starts at starting _ index , and walks through the array until
it finds a crossing point with value
set direction = - 1 for down crossing"""
|
for n in range ( starting_index , len ( array ) - 1 ) :
if ( value - array [ n ] ) * direction >= 0 and ( value - array [ n + 1 ] ) * direction < 0 :
return n
# no crossing found
return - 1
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'scope' ) and self . scope is not None :
_dict [ 'scope' ] = self . scope
if hasattr ( self , 'status' ) and self . status is not None :
_dict [ 'status' ] = self . status
if hasattr ( self , 'status_description' ) and self . status_description is not None :
_dict [ 'status_description' ] = self . status_description
if hasattr ( self , 'last_trained' ) and self . last_trained is not None :
_dict [ 'last_trained' ] = self . last_trained
return _dict
|
def fork_exec ( args , stdin = '' , ** kwargs ) :
"""Do a fork - exec through the subprocess . Popen abstraction in a way
that takes a stdin and return stdout ."""
|
as_bytes = isinstance ( stdin , bytes )
source = stdin if as_bytes else stdin . encode ( locale )
p = Popen ( args , stdin = PIPE , stdout = PIPE , stderr = PIPE , ** kwargs )
stdout , stderr = p . communicate ( source )
if as_bytes :
return stdout , stderr
return ( stdout . decode ( locale ) , stderr . decode ( locale ) )
|
def _truncate_bitmap ( what ) :
"""Determine the index of greatest byte that isn ' t all zeros , and
return the bitmap that contains all the bytes less than that index .
@ param what : a string of octets representing a bitmap .
@ type what : string
@ rtype : string"""
|
for i in xrange ( len ( what ) - 1 , - 1 , - 1 ) :
if what [ i ] != '\x00' :
break
return '' . join ( what [ 0 : i + 1 ] )
|
def from_code ( cls , schema , # type : GraphQLSchema
code , # type : Union [ str , Any ]
uptodate = None , # type : Optional [ bool ]
extra_namespace = None , # type : Optional [ Dict [ str , Any ] ]
) : # type : ( . . . ) - > GraphQLCompiledDocument
"""Creates a GraphQLDocument object from compiled code and the globals . This
is used by the loaders and schema to create a document object ."""
|
if isinstance ( code , string_types ) :
filename = "<document>"
code = compile ( code , filename , "exec" )
namespace = { "__file__" : code . co_filename }
exec ( code , namespace )
if extra_namespace :
namespace . update ( extra_namespace )
rv = cls . _from_namespace ( schema , namespace )
# rv . _ uptodate = uptodate
return rv
|
def generate ( self , tool , copied = False , copy = False ) :
"""Generates a project"""
|
tools = self . _validate_tools ( tool )
if tools == - 1 :
return - 1
generated_files = { }
result = 0
for export_tool in tools :
exporter = ToolsSupported ( ) . get_tool ( export_tool )
# None is an error
if exporter is None :
result = - 1
logger . debug ( "Tool: %s was not found" % export_tool )
continue
self . _fill_export_dict ( export_tool , copied )
if copy :
logger . debug ( "Copying sources to the output directory" )
self . _copy_sources_to_generated_destination ( )
# dump a log file if debug is enabled
if logger . isEnabledFor ( logging . DEBUG ) :
dump_data = { }
dump_data [ 'common' ] = self . project [ 'common' ]
dump_data [ 'tool_specific' ] = self . project [ 'tool_specific' ]
dump_data [ 'merged' ] = self . project [ 'export' ]
handler = logging . FileHandler ( os . path . join ( os . getcwd ( ) , "%s.log" % self . name ) , "w" , encoding = None , delay = "true" )
handler . setLevel ( logging . DEBUG )
logger . addHandler ( handler )
logger . debug ( "\n" + yaml . dump ( dump_data ) )
files = exporter ( self . project [ 'export' ] , self . settings ) . export_project ( )
generated_files [ export_tool ] = files
self . generated_files = generated_files
return result
|
def _expand_paths ( self , paths ) :
"""Expand $ vars in an array of paths , e . g . from a ' build ' block ."""
|
paths = ninja_syntax . as_list ( paths )
return ' ' . join ( map ( self . _shell_escape , ( map ( self . _expand , paths ) ) ) )
|
def transitions ( self ) :
"""Transition matrix ( sparse matrix ) .
Is conjugate to the symmetrized transition matrix via : :
self . transitions = self . Z * self . transitions _ sym / self . Z
where ` ` self . Z ` ` is the diagonal matrix storing the normalization of the
underlying kernel matrix .
Notes
This has not been tested , in contrast to ` transitions _ sym ` ."""
|
if issparse ( self . Z ) :
Zinv = self . Z . power ( - 1 )
else :
Zinv = np . diag ( 1. / np . diag ( self . Z ) )
return self . Z . dot ( self . transitions_sym ) . dot ( Zinv )
|
def fmt_type ( data_type ) :
"""Returns a JSDoc annotation for a data type .
May contain a union of enumerated subtypes ."""
|
if is_struct_type ( data_type ) and data_type . has_enumerated_subtypes ( ) :
possible_types = [ ]
possible_subtypes = data_type . get_all_subtypes_with_tags ( )
for _ , subtype in possible_subtypes :
possible_types . append ( fmt_type_name ( subtype ) )
if data_type . is_catch_all ( ) :
possible_types . append ( fmt_type_name ( data_type ) )
return fmt_jsdoc_union ( possible_types )
else :
return fmt_type_name ( data_type )
|
def zdecr ( self , name , key , amount = 1 ) :
"""Decrease the value of ` ` key ` ` in zset ` ` name ` ` by ` ` amount ` ` . If no key
exists , the value will be initialized as 0 - ` ` amount ` `
: param string name : the zset name
: param string key : the key name
: param int amount : increments
: return : the integer value of ` ` key ` ` in zset ` ` name ` `
: rtype : int
> > > ssdb . zdecr ( ' zset _ 2 ' , ' key1 ' , 7)
36
> > > ssdb . zdecr ( ' zset _ 2 ' , ' key2 ' , 3)
311
> > > ssdb . zdecr ( ' zset _ 2 ' , ' key _ not _ exists ' , 101)
-101
> > > ssdb . zdecr ( ' zset _ not _ exists ' , ' key _ not _ exists ' , 8848)
-8848"""
|
amount = get_positive_integer ( 'amount' , amount )
return self . execute_command ( 'zdecr' , name , key , amount )
|
def _parse_alias_rule ( alias , alias_spec ) :
"""Parse an alias rule . The first token is the canonical name of the
version . The remaining tokens are key = " quoted value " pairs that
specify parameters ; these parameters are ignored by AVersion , but
may be used by the application .
: param alias : The alias name .
: param alias _ spec : The alias text , described above .
: returns : A dictionary of three keys : " alias " is the alias name ;
" version " is the canonical version identification
string ; and " params " is a dictionary of parameters ."""
|
result = dict ( alias = alias , params = { } )
for token in quoted_split ( alias_spec , ' ' , quotes = '"\'' ) :
if not token :
continue
# Suck out the canonical version name
if 'version' not in result :
result [ 'version' ] = token
continue
# What remains is key = " quoted value " pairs . . .
key , _eq , value = token . partition ( '=' )
# Set the parameter key
_set_key ( 'alias.%s' % alias , result [ 'params' ] , key , value )
# Make sure we have a canonical version
if 'version' not in result :
raise KeyError ( "Cannot determine canonical version for alias %r" % alias )
return result
|
def run ( self ) :
"""Run FastGapFill command"""
|
# Create solver
solver = self . _get_solver ( )
# Load compound information
def compound_name ( id ) :
if id not in self . _model . compounds :
return id
return self . _model . compounds [ id ] . properties . get ( 'name' , id )
# TODO : The exchange and transport reactions have tuple names . This
# means that in Python 3 the reactions can no longer be directly
# compared ( e . g . while sorting ) so define this helper function as a
# workaround .
def reaction_key ( r ) :
return r if isinstance ( r , tuple ) else ( r , )
# Calculate penalty if penalty file exists
penalties = { }
if self . _args . penalty is not None :
for line in self . _args . penalty :
line , _ , comment = line . partition ( '#' )
line = line . strip ( )
if line == '' :
continue
rxnid , penalty = line . split ( None , 1 )
penalties [ rxnid ] = float ( penalty )
model_extended , weights = create_extended_model ( self . _model , db_penalty = self . _args . db_penalty , ex_penalty = self . _args . ex_penalty , tp_penalty = self . _args . tp_penalty , penalties = penalties )
epsilon = self . _args . epsilon
core = set ( )
if self . _args . subset is None :
for r in self . _mm . reactions :
if not self . _mm . is_exchange ( r ) :
core . add ( r )
else :
for line in self . _args . subset :
line = line . strip ( )
if line == '' :
continue
core . add ( line )
induced = fastgapfill ( model_extended , core , weights = weights , epsilon = epsilon , solver = solver )
for reaction_id in sorted ( self . _mm . reactions ) :
rx = self . _mm . get_reaction ( reaction_id )
rxt = rx . translated_compounds ( compound_name )
print ( '{}\t{}\t{}\t{}' . format ( reaction_id , 'Model' , 0 , rxt ) )
for rxnid in sorted ( induced , key = reaction_key ) :
if self . _mm . has_reaction ( rxnid ) :
continue
rx = model_extended . get_reaction ( rxnid )
rxt = rx . translated_compounds ( compound_name )
print ( '{}\t{}\t{}\t{}' . format ( rxnid , 'Add' , weights . get ( rxnid , 1 ) , rxt ) )
|
def decode_dict ( data , encoding = None , errors = 'strict' , keep = False , normalize = False , preserve_dict_class = False , preserve_tuples = False , to_str = False ) :
'''Decode all string values to Unicode . Optionally use to _ str = True to ensure
strings are str types and not unicode on Python 2.'''
|
_decode_func = salt . utils . stringutils . to_unicode if not to_str else salt . utils . stringutils . to_str
# Make sure we preserve OrderedDicts
rv = data . __class__ ( ) if preserve_dict_class else { }
for key , value in six . iteritems ( data ) :
if isinstance ( key , tuple ) :
key = decode_tuple ( key , encoding , errors , keep , normalize , preserve_dict_class , to_str ) if preserve_tuples else decode_list ( key , encoding , errors , keep , normalize , preserve_dict_class , preserve_tuples , to_str )
else :
try :
key = _decode_func ( key , encoding , errors , normalize )
except TypeError : # to _ unicode raises a TypeError when input is not a
# string / bytestring / bytearray . This is expected and simply
# means we are going to leave the value as - is .
pass
except UnicodeDecodeError :
if not keep :
raise
if isinstance ( value , list ) :
value = decode_list ( value , encoding , errors , keep , normalize , preserve_dict_class , preserve_tuples , to_str )
elif isinstance ( value , tuple ) :
value = decode_tuple ( value , encoding , errors , keep , normalize , preserve_dict_class , to_str ) if preserve_tuples else decode_list ( value , encoding , errors , keep , normalize , preserve_dict_class , preserve_tuples , to_str )
elif isinstance ( value , Mapping ) :
value = decode_dict ( value , encoding , errors , keep , normalize , preserve_dict_class , preserve_tuples , to_str )
else :
try :
value = _decode_func ( value , encoding , errors , normalize )
except TypeError : # to _ unicode raises a TypeError when input is not a
# string / bytestring / bytearray . This is expected and simply
# means we are going to leave the value as - is .
pass
except UnicodeDecodeError :
if not keep :
raise
rv [ key ] = value
return rv
|
def interpolate_holes ( self ) :
"""Linearly interpolate over holes in this collection to make it continuous .
Returns :
continuous _ collection : A HourlyContinuousCollection with the same data
as this collection but with missing data filled by means of a
linear interpolation ."""
|
# validate analysis _ period and use the resulting period to generate datetimes
assert self . validated_a_period is True , 'validated_a_period property must be' ' True to use interpolate_holes(). Run validate_analysis_period().'
mins_per_step = int ( 60 / self . header . analysis_period . timestep )
new_datetimes = self . header . analysis_period . datetimes
new_values = [ ]
# if the first steps are a hole , duplicate the first value .
i = 0
if new_datetimes [ 0 ] != self . datetimes [ 0 ] :
n_steps = int ( ( self . datetimes [ 0 ] . moy - new_datetimes [ 0 ] . moy ) / mins_per_step )
new_values . extend ( [ self . _values [ 0 ] ] * n_steps )
i = n_steps - 1
# go through the values interpolating any holes .
for j in xrange ( len ( self . _values ) ) :
if new_datetimes [ i ] == self . datetimes [ j ] : # there is no hole .
new_values . append ( self . _values [ j ] )
i += 1
else : # there is a hole between this step and the previous step .
n_steps = int ( ( self . datetimes [ j ] . moy - new_datetimes [ i ] . moy ) / mins_per_step )
intp_vals = self . _xxrange ( self . _values [ j - 1 ] , self . _values [ j ] , n_steps )
new_values . extend ( list ( intp_vals ) [ 1 : ] + [ self . _values [ j ] ] )
i += n_steps
# if the last steps are a hole duplicate the last value .
if len ( new_values ) != len ( new_datetimes ) :
n_steps = len ( new_datetimes ) - len ( new_values )
new_values . extend ( [ self . _values [ - 1 ] ] * n_steps )
# build the new continuous data collection .
return HourlyContinuousCollection ( self . header . duplicate ( ) , new_values )
|
def get_string ( self , key , default = UndefinedKey ) :
"""Return string representation of value found at key
: param key : key to use ( dot separated ) . E . g . , a . b . c
: type key : basestring
: param default : default value if key not found
: type default : basestring
: return : string value
: type return : basestring"""
|
value = self . get ( key , default )
if value is None :
return None
string_value = unicode ( value )
if isinstance ( value , bool ) :
string_value = string_value . lower ( )
return string_value
|
def auth_kubernetes ( self , role , jwt , use_token = True , mount_point = 'kubernetes' ) :
"""POST / auth / < mount _ point > / login
: param role : Name of the role against which the login is being attempted .
: type role : str .
: param jwt : Signed JSON Web Token ( JWT ) for authenticating a service account .
: type jwt : str .
: param use _ token : if True , uses the token in the response received from the auth request to set the " token "
attribute on the current Client class instance .
: type use _ token : bool .
: param mount _ point : The " path " the k8s auth backend was mounted on . Vault currently defaults to " kubernetes " .
: type mount _ point : str .
: return : Parsed JSON response from the config POST request .
: rtype : dict ."""
|
params = { 'role' : role , 'jwt' : jwt }
url = 'v1/auth/{0}/login' . format ( mount_point )
return self . login ( url , json = params , use_token = use_token )
|
def apply_effect ( layer , image ) :
"""Apply effect to the image .
. . note : Correct effect order is the following . All the effects are first
applied to the original image then blended together .
* dropshadow
* outerglow
* ( original )
* patternoverlay
* gradientoverlay
* coloroverlay
* innershadow
* innerglow
* bevelemboss
* satin
* stroke"""
|
for effect in layer . effects :
if effect . __class__ . __name__ == 'PatternOverlay' :
draw_pattern_fill ( image , layer . _psd , effect . value )
for effect in layer . effects :
if effect . __class__ . __name__ == 'GradientOverlay' :
draw_gradient_fill ( image , effect . value )
for effect in layer . effects :
if effect . __class__ . __name__ == 'ColorOverlay' :
draw_solid_color_fill ( image , effect . value )
|
def loadtxt ( fname , dtype = "float" , delimiter = "\t" , usecols = None , comments = "#" ) :
r"""Load unyt _ arrays with unit information from a text file . Each row in the
text file must have the same number of values .
Parameters
fname : str
Filename to read .
dtype : data - type , optional
Data - type of the resulting array ; default : float .
delimiter : str , optional
The string used to separate values . By default , this is any
whitespace .
usecols : sequence , optional
Which columns to read , with 0 being the first . For example ,
` ` usecols = ( 1,4,5 ) ` ` will extract the 2nd , 5th and 6th columns .
The default , None , results in all columns being read .
comments : str , optional
The character used to indicate the start of a comment ;
default : ' # ' .
Examples
> > > temp , velx = loadtxt (
. . . " sphere . dat " , usecols = ( 1,2 ) , delimiter = " \ t " ) # doctest : + SKIP"""
|
f = open ( fname , "r" )
next_one = False
units = [ ]
num_cols = - 1
for line in f . readlines ( ) :
words = line . strip ( ) . split ( )
if len ( words ) == 0 :
continue
if line [ 0 ] == comments :
if next_one :
units = words [ 1 : ]
if len ( words ) == 2 and words [ 1 ] == "Units" :
next_one = True
else : # Here we catch the first line of numbers
col_words = line . strip ( ) . split ( delimiter )
for word in col_words :
float ( word )
num_cols = len ( col_words )
break
f . close ( )
if len ( units ) != num_cols :
units = [ "dimensionless" ] * num_cols
arrays = np . loadtxt ( fname , dtype = dtype , comments = comments , delimiter = delimiter , converters = None , unpack = True , usecols = usecols , ndmin = 0 , )
if len ( arrays . shape ) < 2 :
arrays = [ arrays ]
if usecols is not None :
units = [ units [ col ] for col in usecols ]
ret = tuple ( [ unyt_array ( arr , unit ) for arr , unit in zip ( arrays , units ) ] )
if len ( ret ) == 1 :
return ret [ 0 ]
return ret
|
def send_measurements ( self , list_of_measurements ) :
"""Posts data about the provided list of Measurement objects to the
Station API . The objects may be related to different station IDs .
: param list _ of _ measurements : list of * pyowm . stationsapi30 . measurement . Measurement *
objects to be posted
: type list _ of _ measurements : list of * pyowm . stationsapi30 . measurement . Measurement *
instances
: returns : ` None ` if creation is successful , an exception otherwise"""
|
assert list_of_measurements is not None
assert all ( [ m . station_id is not None for m in list_of_measurements ] )
msmts = [ self . _structure_dict ( m ) for m in list_of_measurements ]
status , _ = self . http_client . post ( MEASUREMENTS_URI , params = { 'appid' : self . API_key } , data = msmts , headers = { 'Content-Type' : 'application/json' } )
|
def unsign ( self , token ) :
"""Extract the data from a signed ` ` token ` ` ."""
|
if self . max_age is None :
data = self . signer . unsign ( token )
else :
data = self . signer . unsign ( token , max_age = self . max_age )
return signing . b64_decode ( data . encode ( ) )
|
def CreateBitmap ( self , artid , client , size ) :
"""Adds custom images to Artprovider"""
|
if artid in self . extra_icons :
return wx . Bitmap ( self . extra_icons [ artid ] , wx . BITMAP_TYPE_ANY )
else :
return wx . ArtProvider . GetBitmap ( artid , client , size )
|
def _author_line ( self ) :
"""Helper method to concatenate author and institution values , if necessary
: return : string"""
|
if self . author and self . institution :
return self . author + ";" + self . institution
elif self . author :
return self . author
else :
return self . institution
|
def __make_dynamic ( self , method ) :
'''Create a method for each of the exit codes .'''
|
def dynamic ( * args ) :
self . plugin_info [ 'status' ] = method
if not args :
args = None
self . output ( args )
sys . exit ( getattr ( self . exit_code , method ) )
method_lc = method . lower ( )
dynamic . __doc__ = "%s method" % method_lc
dynamic . __name__ = method_lc
setattr ( self , dynamic . __name__ , dynamic )
|
def parseline ( self , line : str ) -> Tuple [ str , str , str ] :
"""Parse the line into a command name and a string containing the arguments .
NOTE : This is an override of a parent class method . It is only used by other parent class methods .
Different from the parent class method , this ignores self . identchars .
: param line : line read by readline
: return : tuple containing ( command , args , line )"""
|
statement = self . statement_parser . parse_command_only ( line )
return statement . command , statement . args , statement . command_and_args
|
def fromProfile ( cls , profile ) :
"""Return an ` Origin ` from a given configuration profile .
: see : ` ProfileStore ` ."""
|
session = bones . SessionAPI . fromProfile ( profile )
return cls ( session )
|
def AnalizarAjusteCredito ( self ) :
"Método para analizar la respuesta de AFIP para Ajuste Credito"
|
liq = { }
if hasattr ( self , "liquidacion" ) and self . liquidacion :
liq . update ( self . liquidacion )
if hasattr ( self , "ajuste" ) and 'ajusteCredito' in self . ajuste :
liq . update ( self . ajuste [ 'ajusteCredito' ] )
if self . __ajuste_credito :
liq . update ( self . __ajuste_credito )
self . AnalizarLiquidacion ( aut = self . __ajuste_credito , liq = liq , ajuste = True )
self . AnalizarAjuste ( self . __ajuste_base , base = False )
# datos generales
return True
|
def merge_split ( * paths ) :
"""Merge paths into a single path delimited by colons and split on colons to return
a list of paths .
: param paths : a variable length list of path strings
: return : a list of paths from the merged path list split by colons"""
|
filtered_paths = filter ( None , paths )
return [ p for p in ':' . join ( filtered_paths ) . split ( ':' ) if p ]
|
def get_times_from_cli ( cli_token ) :
"""Convert a CLI token to a datetime tuple .
Argument :
cli _ token ( str ) : an isoformat datetime token ( [ ISO date ] : [ ISO date ] )
or a special value among :
* thisday
* thisweek
* thismonth
* thisyear
Returns :
tuple : a datetime . date objects couple , where the first item is
the start of a time frame and the second item the end of the
time frame . Both elements can also be None , if no date was
provided .
Raises :
ValueError : when the CLI token is not in the right format
( no colon in the token , not one of the special values , dates
are not in proper ISO - 8601 format . )
See Also :
` ISO - 8601 specification < https : / / en . wikipedia . org / wiki / ISO _ 8601 > ` _ ."""
|
today = datetime . date . today ( )
if cli_token == "thisday" :
return today , today
elif cli_token == "thisweek" :
return today , today - dateutil . relativedelta . relativedelta ( days = 7 )
elif cli_token == "thismonth" :
return today , today - dateutil . relativedelta . relativedelta ( months = 1 )
elif cli_token == "thisyear" :
return today , today - dateutil . relativedelta . relativedelta ( years = 1 )
else :
try :
start_date , stop_date = cli_token . split ( ':' )
except ValueError :
raise ValueError ( "--time parameter must contain a colon (:)" )
if not start_date and not stop_date : # ' : ' , no start date , no stop date
return None , None
try :
start_date = date_from_isoformat ( start_date ) if start_date else None
stop_date = date_from_isoformat ( stop_date ) if stop_date else None
except ValueError :
raise ValueError ( "--time parameter was not provided ISO formatted dates" )
return start_date , stop_date
|
def diff_colormap ( ) :
"Custom colormap to map low values to black or another color ."
|
# bottom = plt . cm . copper ( np . linspace ( 0 . , 1 , 6 ) )
black = np . atleast_2d ( [ 0. , 0. , 0. , 1. ] )
bottom = np . repeat ( black , 6 , axis = 0 )
middle = plt . cm . copper ( np . linspace ( 0 , 1 , 250 ) )
# remain = plt . cm . Reds ( np . linspace ( 0 , 1 , 240 ) )
colors = np . vstack ( ( bottom , middle ) )
diff_colormap = mpl . colors . LinearSegmentedColormap . from_list ( 'diff_colormap' , colors )
return diff_colormap
|
def _from_fields ( self , fields ) :
'''Parse from generator . Raise StopIteration if the property could
not be read .'''
|
return _np . dtype ( self . dtype ( ) ) . type ( next ( fields ) )
|
def _set_retain ( self , v , load = False ) :
"""Setter method for retain , mapped from YANG variable / rbridge _ id / router / router _ bgp / address _ family / l2vpn / evpn / retain ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ retain is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ retain ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = retain . retain , is_container = 'container' , presence = False , yang_name = "retain" , rest_name = "retain" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Retain route targets' } } , namespace = 'urn:brocade.com:mgmt:brocade-bgp' , defining_module = 'brocade-bgp' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """retain must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=retain.retain, is_container='container', presence=False, yang_name="retain", rest_name="retain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Retain route targets'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""" , } )
self . __retain = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def cache_card ( self , card ) :
"""Cache the card for faster future lookups . Removes the oldest card
when the card cache stores more cards then this libraries cache limit ."""
|
code = card . code
self . card_cache [ code ] = card
if code in self . card_cache_list :
self . card_cache_list . remove ( code )
self . card_cache_list . append ( code )
if len ( self . card_cache_list ) > self . cachelimit :
del self . card_cache [ self . card_cache_list . pop ( 0 ) ]
|
def marvcli_run ( ctx , datasets , deps , excluded_nodes , force , force_dependent , force_deps , keep , keep_going , list_nodes , list_dependent , selected_nodes , update_detail , update_listing , cachesize , collections ) :
"""Run nodes for selected datasets .
Datasets are specified by a list of set ids , or - - collection
< name > , use - - collection = * to run for all collections . - - node in
conjunction with - - collection = * will pick those collections for
which the selected nodes are configured .
Set ids may be abbreviated to any uniquely identifying
prefix . Suffix a prefix by ' + ' to match multiple ."""
|
if collections and datasets :
ctx . fail ( '--collection and DATASETS are mutually exclusive' )
if list_dependent and not selected_nodes :
ctx . fail ( '--list-dependent needs at least one selected --node' )
if not any ( [ datasets , collections , list_nodes ] ) :
click . echo ( ctx . get_help ( ) )
ctx . exit ( 1 )
deps = 'force' if force_deps else deps
force = force_deps or force
site = create_app ( ) . site
if '*' in collections :
if selected_nodes :
collections = [ k for k , v in site . collections . items ( ) if set ( v . nodes ) . issuperset ( selected_nodes ) ]
if not collections :
ctx . fail ( 'No collections have all selected nodes' )
else :
collections = None
else :
for col in collections :
if col not in site . collections :
ctx . fail ( 'Unknown collection: {}' . format ( col ) )
if list_nodes :
for col in ( collections or sorted ( site . collections . keys ( ) ) ) :
click . echo ( '{}:' . format ( col ) )
for name in sorted ( site . collections [ col ] . nodes ) :
if name == 'dataset' :
continue
click . echo ( ' {}' . format ( name ) )
return
if list_dependent :
for col in ( collections or sorted ( site . collections . keys ( ) ) ) :
click . echo ( '{}:' . format ( col ) )
dependent = { x for name in selected_nodes for x in site . collections [ col ] . nodes [ name ] . dependent }
for name in sorted ( x . name for x in dependent ) :
click . echo ( ' {}' . format ( name ) )
return
errors = [ ]
setids = [ SetID ( x ) for x in parse_setids ( datasets ) ]
if not setids :
query = db . session . query ( Dataset . setid ) . filter ( Dataset . discarded . isnot ( True ) ) . filter ( Dataset . status . op ( '&' ) ( STATUS_MISSING ) == 0 )
if collections is not None :
query = query . filter ( Dataset . collection . in_ ( collections ) )
setids = ( SetID ( x [ 0 ] ) for x in query )
for setid in setids :
if IPDB :
site . run ( setid , selected_nodes , deps , force , keep , force_dependent , update_detail , update_listing , excluded_nodes , cachesize = cachesize )
else :
try :
site . run ( setid , selected_nodes , deps , force , keep , force_dependent , update_detail , update_listing , excluded_nodes , cachesize = cachesize )
except UnknownNode as e :
ctx . fail ( 'Collection {} has no node {}' . format ( * e . args ) )
except NoResultFound :
click . echo ( 'ERROR: unknown {!r}' . format ( setid ) , err = True )
if not keep_going :
raise
except BaseException as e :
errors . append ( setid )
if isinstance ( e , KeyboardInterrupt ) :
log . warn ( 'KeyboardInterrupt: aborting' )
raise
elif isinstance ( e , DirectoryAlreadyExists ) :
click . echo ( """
ERROR: Directory for node run already exists:
{!r}
In case no other node run is in progress, this is a bug which you are kindly
asked to report, providing information regarding any previous, failed node runs.
""" . format ( e . args [ 0 ] ) , err = True )
if not keep_going :
ctx . abort ( )
else :
log . error ( 'Exception occured for dataset %s:' , setid , exc_info = True )
log . error ( 'Error occured for dataset %s: %s' , setid , e )
if not keep_going :
ctx . exit ( 1 )
if errors :
log . error ( 'There were errors for %r' , errors )
|
def pointm ( self , x , y , m = None ) :
"""Creates a POINTM shape .
If the m ( measure ) value is not set , it defaults to NoData ."""
|
shapeType = POINTM
pointShape = Shape ( shapeType )
pointShape . points . append ( [ x , y , m ] )
self . shape ( pointShape )
|
def wraps ( wrapped , assigned = functools . WRAPPER_ASSIGNMENTS , updated = functools . WRAPPER_UPDATES ) :
"""Cython - compatible functools . wraps implementation ."""
|
if not is_cython_function ( wrapped ) :
return functools . wraps ( wrapped , assigned , updated )
else :
return lambda wrapper : wrapper
|
def graph_from_edges ( edges : Iterable [ Edge ] , ** kwargs ) -> BELGraph :
"""Build a BEL graph from edges ."""
|
graph = BELGraph ( ** kwargs )
for edge in edges :
edge . insert_into_graph ( graph )
return graph
|
def get_range_around ( range_value , current_item , padding ) :
"""Returns a range of numbers around the given number .
This is useful for pagination , where you might want to show something
like this : :
< < < . . . 4 5 ( 6 ) 7 8 . . > > >
In this example ` 6 ` would be the current page and we show 2 items around
that page ( including the page itself ) .
Usage : :
{ % load libs _ tags % }
{ % get _ range _ around page _ obj . paginator . num _ pages page _ obj . number 5
as pages % }
: param range _ amount : Number of total items in your range ( 1 indexed )
: param current _ item : The item around which the result should be centered
(1 indexed )
: param padding : Number of items to show left and right from the current
item ."""
|
total_items = 1 + padding * 2
left_bound = padding
right_bound = range_value - padding
if range_value <= total_items :
range_items = range ( 1 , range_value + 1 )
return { 'range_items' : range_items , 'left_padding' : False , 'right_padding' : False , }
if current_item <= left_bound :
range_items = range ( 1 , range_value + 1 ) [ : total_items ]
return { 'range_items' : range_items , 'left_padding' : range_items [ 0 ] > 1 , 'right_padding' : range_items [ - 1 ] < range_value , }
if current_item >= right_bound :
range_items = range ( 1 , range_value + 1 ) [ - total_items : ]
return { 'range_items' : range_items , 'left_padding' : range_items [ 0 ] > 1 , 'right_padding' : range_items [ - 1 ] < range_value , }
range_items = range ( current_item - padding , current_item + padding + 1 )
return { 'range_items' : range_items , 'left_padding' : True , 'right_padding' : True , }
|
def about ( self ) :
"""Shows the about message window ."""
|
aboutDialog = AboutDialog ( parent = self )
aboutDialog . show ( )
aboutDialog . addDependencyInfo ( )
|
def load_children ( self ) :
"""If the Shard doesn ' t have any children , tries to find some from DescribeStream .
If the Shard is open this won ' t find any children , so an empty response doesn ' t
mean the Shard will * * never * * have children ."""
|
# Child count is fixed the first time any of the following happen :
# 0 : : stream closed or throughput decreased
# 1 : : shard was open for ~ 4 hours
# 2 : : throughput increased
if self . children :
return self . children
# ParentShardId - > [ Shard , . . . ]
by_parent = collections . defaultdict ( list )
# ShardId - > Shard
by_id = { }
for shard in self . session . describe_stream ( stream_arn = self . stream_arn , first_shard = self . shard_id ) [ "Shards" ] :
parent_list = by_parent [ shard . get ( "ParentShardId" ) ]
shard = Shard ( stream_arn = self . stream_arn , shard_id = shard [ "ShardId" ] , parent = shard . get ( "ParentShardId" ) , session = self . session )
parent_list . append ( shard )
by_id [ shard . shard_id ] = shard
# Find this shard when looking up shards by ParentShardId
by_id [ self . shard_id ] = self
# Insert this shard ' s children , then handle its child ' s descendants etc .
to_insert = collections . deque ( by_parent [ self . shard_id ] )
while to_insert :
shard = to_insert . popleft ( )
# ParentShardId - > Shard
shard . parent = by_id [ shard . parent ]
shard . parent . children . append ( shard )
# Continue for any shards that have this shard as their parent
to_insert . extend ( by_parent [ shard . shard_id ] )
return self . children
|
async def login ( url , * , username = None , password = None , insecure = False ) :
"""Connect to MAAS at ` url ` with a user name and password .
: param url : The URL of MAAS , e . g . http : / / maas . example . com : 5240 / MAAS /
: param username : The user name to use , e . g . fred .
: param password : The user ' s password .
: param insecure : Whether to check TLS certificates when using HTTPS .
: return : A client object ."""
|
from . facade import Client
# Lazy .
from . viscera import Origin
# Lazy .
profile , origin = await Origin . login ( url , username = username , password = password , insecure = insecure )
return Client ( origin )
|
async def wait_for_read_result ( self ) :
"""This is a utility function to wait for return data call back
@ return : Returns resultant data from callback"""
|
while not self . callback_data :
await asyncio . sleep ( .001 )
rval = self . callback_data
self . callback_data = [ ]
return rval
|
def _ancestry_line ( self ) :
'''Returns the ancestry of this dict , back to the first dict that we don ' t
recognize or that has more than one backer .'''
|
b = self . _get_backers ( )
while len ( b ) == 1 :
yield b [ 0 ]
if not hasattr ( b [ 0 ] , '_get_backers' ) :
break
b = b [ 0 ] . _get_backers ( )
|
def wrap ( cls , private_key , algorithm ) :
"""Wraps a private key in a PrivateKeyInfo structure
: param private _ key :
A byte string or Asn1Value object of the private key
: param algorithm :
A unicode string of " rsa " , " dsa " or " ec "
: return :
A PrivateKeyInfo object"""
|
if not isinstance ( private_key , byte_cls ) and not isinstance ( private_key , Asn1Value ) :
raise TypeError ( unwrap ( '''
private_key must be a byte string or Asn1Value, not %s
''' , type_name ( private_key ) ) )
if algorithm == 'rsa' :
if not isinstance ( private_key , RSAPrivateKey ) :
private_key = RSAPrivateKey . load ( private_key )
params = Null ( )
elif algorithm == 'dsa' :
if not isinstance ( private_key , DSAPrivateKey ) :
private_key = DSAPrivateKey . load ( private_key )
params = DSAParams ( )
params [ 'p' ] = private_key [ 'p' ]
params [ 'q' ] = private_key [ 'q' ]
params [ 'g' ] = private_key [ 'g' ]
public_key = private_key [ 'public_key' ]
private_key = private_key [ 'private_key' ]
elif algorithm == 'ec' :
if not isinstance ( private_key , ECPrivateKey ) :
private_key = ECPrivateKey . load ( private_key )
else :
private_key = private_key . copy ( )
params = private_key [ 'parameters' ]
del private_key [ 'parameters' ]
else :
raise ValueError ( unwrap ( '''
algorithm must be one of "rsa", "dsa", "ec", not %s
''' , repr ( algorithm ) ) )
private_key_algo = PrivateKeyAlgorithm ( )
private_key_algo [ 'algorithm' ] = PrivateKeyAlgorithmId ( algorithm )
private_key_algo [ 'parameters' ] = params
container = cls ( )
container . _algorithm = algorithm
container [ 'version' ] = Integer ( 0 )
container [ 'private_key_algorithm' ] = private_key_algo
container [ 'private_key' ] = private_key
# Here we save the DSA public key if possible since it is not contained
# within the PKCS # 8 structure for a DSA key
if algorithm == 'dsa' :
container . _public_key = public_key
return container
|
def refactor_ifs ( stmnt , ifs ) :
'''for if statements in list comprehension'''
|
if isinstance ( stmnt , _ast . BoolOp ) :
test , right = stmnt . values
if isinstance ( stmnt . op , _ast . Or ) :
test = _ast . UnaryOp ( op = _ast . Not ( ) , operand = test , lineno = 0 , col_offset = 0 )
ifs . append ( test )
return refactor_ifs ( right , ifs )
return stmnt
|
def point_mid ( pt1 , pt2 ) :
"""Computes the midpoint of the input points .
: param pt1 : point 1
: type pt1 : list , tuple
: param pt2 : point 2
: type pt2 : list , tuple
: return : midpoint
: rtype : list"""
|
if len ( pt1 ) != len ( pt2 ) :
raise ValueError ( "The input points should have the same dimension" )
dist_vector = vector_generate ( pt1 , pt2 , normalize = False )
half_dist_vector = vector_multiply ( dist_vector , 0.5 )
return point_translate ( pt1 , half_dist_vector )
|
def parse_config_path ( args = sys . argv ) :
"""Preprocess sys . argv and extract - - config argument ."""
|
config = CONFIG_PATH
if '--config' in args :
idx = args . index ( '--config' )
if len ( args ) > idx + 1 :
config = args . pop ( idx + 1 )
args . pop ( idx )
return config
|
def put ( self , key , data ) :
"""Implementation of : meth : ` ~ simplekv . KeyValueStore . put ` .
Will store the value in the backing store . After a successful or
unsuccessful store , the cache will be invalidated by deleting the key
from it ."""
|
try :
return self . _dstore . put ( key , data )
finally :
self . cache . delete ( key )
|
def cull_portals ( self , stat , threshold = 0.5 , comparator = ge ) :
"""Delete portals whose stat > = ` ` threshold ` ` ( default 0.5 ) .
Optional argument ` ` comparator ` ` will replace > = as the test
for whether to cull . You can use the name of a stored function ."""
|
comparator = self . _lookup_comparator ( comparator )
dead = [ ]
for u in self . portal :
for v in self . portal [ u ] :
if stat in self . portal [ u ] [ v ] and comparator ( self . portal [ u ] [ v ] [ stat ] , threshold ) :
dead . append ( ( u , v ) )
self . remove_edges_from ( dead )
return self
|
def export_csv_file ( self , directory , filename ) :
"""Exports diagram inner graph to BPMN 2.0 XML file ( with Diagram Interchange data ) .
: param directory : strings representing output directory ,
: param filename : string representing output file name ."""
|
bpmn_csv_export . BpmnDiagramGraphCsvExport . export_process_to_csv ( self , directory , filename )
|
def is_noncontinuable ( self ) :
"""@ see : U { http : / / msdn . microsoft . com / en - us / library / aa363082 ( VS . 85 ) . aspx }
@ rtype : bool
@ return : C { True } if the exception is noncontinuable ,
C { False } otherwise .
Attempting to continue a noncontinuable exception results in an
EXCEPTION _ NONCONTINUABLE _ EXCEPTION exception to be raised ."""
|
return bool ( self . raw . u . Exception . ExceptionRecord . ExceptionFlags & win32 . EXCEPTION_NONCONTINUABLE )
|
def remove_extracontigs ( in_bam , data ) :
"""Remove extra contigs ( non chr1-22 , X , Y ) from an input BAM .
These extra contigs can often be arranged in different ways , causing
incompatibility issues with GATK and other tools . This also fixes the
read group header as in fixrg .
This does not yet handle mapping over 1 - > chr1 issues since this requires
a ton of search / replace which slows down conversion ."""
|
work_dir = utils . safe_makedir ( os . path . join ( dd . get_work_dir ( data ) , "bamclean" , dd . get_sample_name ( data ) ) )
out_file = os . path . join ( work_dir , "%s-noextras.bam" % utils . splitext_plus ( os . path . basename ( in_bam ) ) [ 0 ] )
if not utils . file_exists ( out_file ) :
out_file = os . path . join ( work_dir , "%s-noextras.bam" % dd . get_sample_name ( data ) )
if not utils . file_uptodate ( out_file , in_bam ) :
with file_transaction ( data , out_file ) as tx_out_file :
target_chroms = _target_chroms_and_header ( in_bam , data )
str_chroms = " " . join ( target_chroms )
rg_info = novoalign . get_rg_info ( data [ "rgnames" ] )
bcbio_py = sys . executable
ref_file = dd . get_ref_file ( data )
local_bam = os . path . join ( os . path . dirname ( tx_out_file ) , os . path . basename ( in_bam ) )
cores = dd . get_cores ( data )
utils . symlink_plus ( in_bam , local_bam )
bam . index ( local_bam , data [ "config" ] )
cmd = ( "samtools view -@ {cores} -h {local_bam} {str_chroms} | " """{bcbio_py} -c 'from bcbio.pipeline import cleanbam; """ """cleanbam.fix_header("{ref_file}")' | """ "samtools view -@ {cores} -u - | " "samtools addreplacerg -@ {cores} -r '{rg_info}' -m overwrite_all -O bam -o {tx_out_file} - " )
do . run ( cmd . format ( ** locals ( ) ) , "bamprep, remove extra contigs: %s" % dd . get_sample_name ( data ) )
return out_file
|
def median_fltr_opencv ( dem , size = 3 , iterations = 1 ) :
"""OpenCV median filter"""
|
import cv2
dem = malib . checkma ( dem )
if size > 5 :
print ( "Need to implement iteration" )
n = 0
out = dem
while n <= iterations :
dem_cv = cv2 . medianBlur ( out . astype ( np . float32 ) . filled ( np . nan ) , size )
out = np . ma . fix_invalid ( dem_cv )
out . set_fill_value ( dem . fill_value )
n += 1
return out
|
def validate_type ( prop , value , expected ) :
"""Default validation for all types"""
|
# Validate on expected type ( s ) , but ignore None : defaults handled elsewhere
if value is not None and not isinstance ( value , expected ) :
_validation_error ( prop , type ( value ) . __name__ , None , expected )
|
def get_objective_admin_session ( self , proxy , * args , ** kwargs ) :
"""Gets the ` ` OsidSession ` ` associated with the objective administration service .
: param proxy : a proxy
: type proxy : ` ` osid . proxy . Proxy ` `
: return : an ` ` ObjectiveAdminSession ` `
: rtype : ` ` osid . learning . ObjectiveAdminSession ` `
: raise : ` ` NullArgument ` ` - - ` ` proxy ` ` is ` ` null ` `
: raise : ` ` OperationFailed ` ` - - unable to complete request
: raise : ` ` Unimplemented ` ` - - ` ` supports _ objective _ admin ( ) ` ` is ` ` false ` `
* compliance : optional - - This method must be implemented if ` ` supports _ objective _ admin ( ) ` ` is ` ` true ` ` . *"""
|
if not self . supports_objective_admin ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise OperationFailed ( )
proxy = self . _convert_proxy ( proxy )
try :
session = sessions . ObjectiveAdminSession ( proxy = proxy , runtime = self . _runtime )
except AttributeError :
raise OperationFailed ( )
return session
|
def get_distro_info ( self , loglevel = logging . DEBUG ) :
"""Get information about which distro we are using , placing it in the environment object .
Fails if distro could not be determined .
Should be called with the container is started up , and uses as core info
as possible .
Note : if the install type is apt , it issues the following :
- apt - get update
- apt - get install - y - qq lsb - release"""
|
shutit = self . shutit
install_type = ''
distro = ''
distro_version = ''
if shutit . build [ 'distro_override' ] != '' :
key = shutit . build [ 'distro_override' ]
distro = shutit . build [ 'distro_override' ]
install_type = package_map . INSTALL_TYPE_MAP [ key ]
distro_version = ''
if install_type == 'apt' and shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) :
if not self . command_available ( 'lsb_release' ) :
if not shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'apt_update_done' ] and self . whoami ( ) == 'root' :
shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'apt_update_done' ] = True
self . send ( ShutItSendSpec ( self , send = 'DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -qq lsb-release' , loglevel = loglevel , ignore_background = True ) )
d = self . lsb_release ( )
install_type = d [ 'install_type' ]
distro = d [ 'distro' ]
distro_version = d [ 'distro_version' ]
elif install_type == 'yum' and shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) :
if self . file_exists ( '/etc/redhat-release' ) :
output = self . send_and_get_output ( ' command cat /etc/redhat-release' , echo = False , loglevel = loglevel )
if re . match ( '^centos.*$' , output . lower ( ) ) or re . match ( '^red hat.*$' , output . lower ( ) ) or re . match ( '^fedora.*$' , output . lower ( ) ) or True :
self . send_and_match_output ( 'yum install -y -t redhat-lsb' , 'Complete!' , loglevel = loglevel )
else :
if not self . command_available ( 'lsb_release' ) :
self . send ( ShutItSendSpec ( self , send = 'yum install -y lsb-release' , loglevel = loglevel , ignore_background = True ) )
install_type = d [ 'install_type' ]
distro = d [ 'distro' ]
distro_version = d [ 'distro_version' ]
elif install_type == 'apk' and shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) :
if not shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'apk_update_done' ] and self . whoami ( ) == 'root' :
self . send ( ShutItSendSpec ( self , send = 'apk -q update' , ignore_background = True , loglevel = logging . INFO ) )
shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'apk_update_done' ] = True
self . send ( ShutItSendSpec ( self , send = 'apk -q add bash' , ignore_background = True , loglevel = loglevel ) )
install_type = 'apk'
distro = 'alpine'
distro_version = '1.0'
elif install_type == 'pacman' and shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) and self . whoami ( ) == 'root' :
if not shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'pacman_update_done' ] :
shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'pacman_update_done' ] = True
self . send ( ShutItSendSpec ( self , send = 'pacman -Syy' , ignore_background = True , loglevel = logging . INFO ) )
install_type = d [ 'install_type' ]
distro = d [ 'distro' ]
distro_version = '1.0'
elif install_type == 'emerge' and shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) :
if not shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'emerge_update_done' ] and self . whoami ( ) == 'root' : # Takes bloody ages !
# self . send ( ShutItSendSpec ( self , send = ' emerge - - sync ' , loglevel = loglevel , timeout = 9999 , ignore _ background = True ) )
pass
install_type = 'emerge'
distro = 'gentoo'
distro_version = '1.0'
elif install_type == 'docker' and shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) :
distro = 'coreos'
distro_version = '1.0'
elif self . command_available ( 'lsb_release' ) :
d = self . lsb_release ( )
install_type = d [ 'install_type' ]
distro = d [ 'distro' ]
distro_version = d [ 'distro_version' ]
else :
issue_output = self . send_and_get_output ( ' command cat /etc/issue' , echo = False , ignore_background = True , loglevel = loglevel ) . lower ( )
if not re . match ( '.*No such file.*' , issue_output ) :
for key in package_map . INSTALL_TYPE_MAP :
if issue_output . find ( key ) != - 1 :
distro = key
install_type = package_map . INSTALL_TYPE_MAP [ key ]
break
elif self . file_exists ( '/cygdrive' ) :
distro = 'cygwin'
install_type = 'apt-cyg'
if install_type == '' or distro == '' :
if self . file_exists ( '/etc/os-release' ) :
os_name = self . send_and_get_output ( ' command cat /etc/os-release | grep ^NAME' , echo = False , ignore_background = True , loglevel = loglevel ) . lower ( )
if os_name . find ( 'centos' ) != - 1 :
distro = 'centos'
install_type = 'yum'
elif os_name . find ( 'red hat' ) != - 1 :
distro = 'red hat'
install_type = 'yum'
elif os_name . find ( 'fedora' ) != - 1 : # TODO : distinguish with dnf - fedora 23 + ? search for dnf in here
distro = 'fedora'
install_type = 'yum'
elif os_name . find ( 'gentoo' ) != - 1 :
distro = 'gentoo'
install_type = 'emerge'
elif os_name . find ( 'coreos' ) != - 1 :
distro = 'coreos'
install_type = 'docker'
else :
uname_output = self . send_and_get_output ( " command uname -a | awk '{print $1}'" , echo = False , ignore_background = True , loglevel = loglevel )
if uname_output == 'Darwin' :
distro = 'osx'
install_type = 'brew'
if not self . command_available ( 'brew' ) :
shutit . fail ( 'ShutiIt requires brew be installed. See http://brew.sh for details on installation.' )
# pragma : no cover
if not self . file_exists ( '/tmp/shutit_brew_list' ) :
if self . whoami ( ) != 'root' :
self . send ( ShutItSendSpec ( self , send = ' brew list > .shutit_brew_list' , echo = False , ignore_background = True , loglevel = loglevel ) )
else :
pass
for package in ( 'coreutils' , 'findutils' , 'gnu-tar' , 'gnu-sed' , 'gawk' , 'gnutls' , 'gnu-indent' , 'gnu-getopt' ) :
if self . send_and_get_output ( ' command cat .shutit_brew_list | grep -w ' + package , echo = False , loglevel = loglevel ) == '' :
self . send ( ShutItSendSpec ( self , send = 'brew install ' + package , ignore_background = True , loglevel = loglevel ) )
self . send ( ShutItSendSpec ( self , send = 'rm -f .shutit_brew_list' , echo = False , ignore_background = True , loglevel = loglevel ) )
if uname_output [ : 6 ] == 'CYGWIN' :
distro = 'cygwin'
install_type = 'apt-cyg'
if install_type == '' or distro == '' :
shutit . fail ( 'Could not determine Linux distro information. ' + 'Please inform ShutIt maintainers at https://github.com/ianmiell/shutit' , shutit_pexpect_child = self . pexpect_child )
# pragma : no cover
# The call to self . package _ installed with lsb - release above
# may fail if it doesn ' t know the install type , so
# if we ' ve determined that now
if install_type == 'apt' and shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) :
if not self . command_available ( 'lsb_release' ) :
if not shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'apt_update_done' ] and self . whoami ( ) == 'root' :
shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'apt_update_done' ] = True
self . send ( ShutItSendSpec ( self , send = 'DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -qq lsb-release' , loglevel = loglevel , ignore_background = True ) )
self . send ( ShutItSendSpec ( self , send = 'DEBIAN_FRONTEND=noninteractive apt-get install -y -qq lsb-release' , loglevel = loglevel , ignore_background = True ) )
d = self . lsb_release ( )
install_type = d [ 'install_type' ]
distro = d [ 'distro' ]
distro_version = d [ 'distro_version' ]
elif install_type == 'yum' and shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) :
if self . file_exists ( '/etc/redhat-release' ) :
output = self . send_and_get_output ( ' command cat /etc/redhat-release' , echo = False , loglevel = loglevel )
if re . match ( '^centos.*$' , output . lower ( ) ) or re . match ( '^red hat.*$' , output . lower ( ) ) or re . match ( '^fedora.*$' , output . lower ( ) ) or True :
self . send_and_match_output ( 'yum install -y -t redhat-lsb' , 'Complete!' , loglevel = loglevel )
else :
if not self . command_available ( 'lsb_release' ) :
self . send ( ShutItSendSpec ( self , send = 'yum install -y lsb-release' , ignore_background = True , loglevel = loglevel ) )
d = self . lsb_release ( )
install_type = d [ 'install_type' ]
distro = d [ 'distro' ]
distro_version = d [ 'distro_version' ]
elif install_type == 'apk' and shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) :
if not shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'apk_update_done' ] and self . whoami ( ) == 'root' :
self . send ( ShutItSendSpec ( self , send = 'apk -q update' , ignore_background = True , loglevel = logging . INFO ) )
shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'apk_update_done' ] = True
self . send ( ShutItSendSpec ( self , send = 'apk -q add bash' , ignore_background = True , loglevel = loglevel ) )
install_type = 'apk'
distro = 'alpine'
distro_version = '1.0'
elif install_type == 'emerge' and shutit . build [ 'delivery' ] in ( 'docker' , 'dockerfile' ) :
if not shutit . get_current_shutit_pexpect_session_environment ( ) . build [ 'emerge_update_done' ] and self . whoami ( ) == 'root' : # Takes bloody ages !
# self . send ( ShutItSendSpec ( self , send = ' emerge - - sync ' , loglevel = logging . INFO , ignore _ background = True ) )
pass
install_type = 'emerge'
distro = 'gentoo'
distro_version = '1.0'
# We should have the distro info now , let ' s assign to target config
# if this is not a one - off .
self . current_environment . install_type = install_type
self . current_environment . distro = distro
self . current_environment . distro_version = distro_version
return True
|
def validate_encryption_services ( cmd , namespace ) :
"""Builds up the encryption services object for storage account operations based on the list of services passed in ."""
|
if namespace . encryption_services :
t_encryption_services , t_encryption_service = get_sdk ( cmd . cli_ctx , CUSTOM_MGMT_STORAGE , 'EncryptionServices' , 'EncryptionService' , mod = 'models' )
services = { service : t_encryption_service ( enabled = True ) for service in namespace . encryption_services }
namespace . encryption_services = t_encryption_services ( ** services )
|
def process_update_records ( update_records ) :
"""Process the requests for S3 bucket update requests"""
|
events = sorted ( update_records , key = lambda x : x [ 'account' ] )
# Group records by account for more efficient processing
for account_id , events in groupby ( events , lambda x : x [ 'account' ] ) :
events = list ( events )
# Grab the bucket names ( de - dupe events ) :
buckets = { }
for event in events : # If the creation date is present , then use it :
bucket_event = buckets . get ( event [ 'detail' ] [ 'requestParameters' ] [ 'bucketName' ] , { 'creationDate' : event [ 'detail' ] [ 'requestParameters' ] . get ( 'creationDate' ) } )
bucket_event . update ( event [ 'detail' ] [ 'requestParameters' ] )
buckets [ event [ 'detail' ] [ 'requestParameters' ] [ 'bucketName' ] ] = bucket_event
buckets [ event [ 'detail' ] [ 'requestParameters' ] [ 'bucketName' ] ] [ 'eventDetails' ] = event
# Query AWS for current configuration
for b_name , item in buckets . items ( ) :
LOG . debug ( f'[~] Processing Create/Update for: {b_name}' )
# If the bucket does not exist , then simply drop the request - -
# If this happens , there is likely a Delete event that has occurred and will be processed soon .
try :
bucket_details = get_bucket ( b_name , account_number = account_id , include_created = ( item . get ( 'creationDate' ) is None ) , assume_role = HISTORICAL_ROLE , region = CURRENT_REGION )
if bucket_details . get ( 'Error' ) :
LOG . error ( f"[X] Unable to fetch details about bucket: {b_name}. " f"The error details are: {bucket_details['Error']}" )
continue
except ClientError as cerr :
if cerr . response [ 'Error' ] [ 'Code' ] == 'NoSuchBucket' :
LOG . warning ( f'[?] Received update request for bucket: {b_name} that does not ' 'currently exist. Skipping.' )
continue
# Catch Access Denied exceptions as well :
if cerr . response [ 'Error' ] [ 'Code' ] == 'AccessDenied' :
LOG . error ( f'[X] Unable to fetch details for S3 Bucket: {b_name} in {account_id}. Access is Denied. ' 'Skipping...' )
continue
raise Exception ( cerr )
# Pull out the fields we want :
data = { 'arn' : f'arn:aws:s3:::{b_name}' , 'principalId' : cloudwatch . get_principal ( item [ 'eventDetails' ] ) , 'userIdentity' : cloudwatch . get_user_identity ( item [ 'eventDetails' ] ) , 'userAgent' : item [ 'eventDetails' ] [ 'detail' ] . get ( 'userAgent' ) , 'sourceIpAddress' : item [ 'eventDetails' ] [ 'detail' ] . get ( 'sourceIPAddress' ) , 'requestParameters' : item [ 'eventDetails' ] [ 'detail' ] . get ( 'requestParameters' ) , 'accountId' : account_id , 'eventTime' : item [ 'eventDetails' ] [ 'detail' ] [ 'eventTime' ] , 'BucketName' : b_name , 'Region' : bucket_details . pop ( 'Region' ) , # Duplicated in top level and configuration for secondary index
'Tags' : bucket_details . pop ( 'Tags' , { } ) or { } , 'eventSource' : item [ 'eventDetails' ] [ 'detail' ] [ 'eventSource' ] , 'eventName' : item [ 'eventDetails' ] [ 'detail' ] [ 'eventName' ] , 'version' : VERSION }
# Remove the fields we don ' t care about :
del bucket_details [ 'Arn' ]
del bucket_details [ 'GrantReferences' ]
del bucket_details [ '_version' ]
del bucket_details [ 'Name' ]
if not bucket_details . get ( 'CreationDate' ) :
bucket_details [ 'CreationDate' ] = item [ 'creationDate' ]
data [ 'configuration' ] = bucket_details
current_revision = CurrentS3Model ( ** data )
current_revision . save ( )
|
def output ( s ) :
"""Parse , transform , and pretty print
the result"""
|
p = Parser ( )
t = ExpressionsTransformer ( )
ast = p . parse ( s )
logging . debug ( ast . pretty ( ) )
print ( ast . pretty ( ) )
d = t . transform ( ast )
print ( json . dumps ( d , indent = 4 ) )
return d
|
def _prepair ( self ) :
'''Try to connect to the given dbus services . If successful it will
return a callable dbus proxy and those arguments .'''
|
try :
sessionbus = dbus . SessionBus ( )
systembus = dbus . SystemBus ( )
except :
return ( None , None )
for dbus_props in self . DBUS_SHUTDOWN . values ( ) :
try :
if dbus_props [ 'bus' ] == SESSION_BUS :
bus = sessionbus
else :
bus = systembus
interface = bus . get_object ( dbus_props [ 'service' ] , dbus_props [ 'objectPath' ] )
proxy = interface . get_dbus_method ( dbus_props [ 'method' ] , dbus_props [ 'interface' ] )
return ( proxy , dbus_props [ 'arguments' ] )
except dbus . exceptions . DBusException :
continue
return ( None , None )
|
def tmppath ( path = None , include_unix_username = True ) :
"""@ param path : target path for which it is needed to generate temporary location
@ type path : str
@ type include _ unix _ username : bool
@ rtype : str
Note that include _ unix _ username might work on windows too ."""
|
addon = "luigitemp-%08d" % random . randrange ( 1e9 )
temp_dir = '/tmp'
# default tmp dir if none is specified in config
# 1 . Figure out to which temporary directory to place
configured_hdfs_tmp_dir = hdfs ( ) . tmp_dir
if configured_hdfs_tmp_dir is not None : # config is superior
base_dir = configured_hdfs_tmp_dir
elif path is not None : # need to copy correct schema and network location
parsed = urlparse ( path )
base_dir = urlunparse ( ( parsed . scheme , parsed . netloc , temp_dir , '' , '' , '' ) )
else : # just system temporary directory
base_dir = temp_dir
# 2 . Figure out what to place
if path is not None :
if path . startswith ( temp_dir + '/' ) : # Not 100 % , but some protection from directories like / tmp / tmp / file
subdir = path [ len ( temp_dir ) : ]
else : # Protection from / tmp / hdfs : / dir / file
parsed = urlparse ( path )
subdir = parsed . path
subdir = subdir . lstrip ( '/' ) + '-'
else : # just return any random temporary location
subdir = ''
if include_unix_username :
subdir = os . path . join ( getpass . getuser ( ) , subdir )
return os . path . join ( base_dir , subdir + addon )
|
def prevnode ( edges , component ) :
"""get the pervious component in the loop"""
|
e = edges
c = component
n2c = [ ( a , b ) for a , b in e if type ( a ) == tuple ]
c2n = [ ( a , b ) for a , b in e if type ( b ) == tuple ]
node2cs = [ ( a , b ) for a , b in e if b == c ]
c2nodes = [ ]
for node2c in node2cs :
c2node = [ ( a , b ) for a , b in c2n if b == node2c [ 0 ] ]
if len ( c2node ) == 0 : # return [ ]
c2nodes = [ ]
break
c2nodes . append ( c2node [ 0 ] )
cs = [ a for a , b in c2nodes ]
# test for connections that have no nodes
# filter for no nodes
nonodes = [ ( a , b ) for a , b in e if type ( a ) != tuple and type ( b ) != tuple ]
for a , b in nonodes :
if b == component :
cs . append ( a )
return cs
|
def set_screen ( self , screen , overwrite = False ) :
"""Set a screen on this Pipeline .
Parameters
filter : zipline . pipeline . Filter
The filter to apply as a screen .
overwrite : bool
Whether to overwrite any existing screen . If overwrite is False
and self . screen is not None , we raise an error ."""
|
if self . _screen is not None and not overwrite :
raise ValueError ( "set_screen() called with overwrite=False and screen already " "set.\n" "If you want to apply multiple filters as a screen use " "set_screen(filter1 & filter2 & ...).\n" "If you want to replace the previous screen with a new one, " "use set_screen(new_filter, overwrite=True)." )
self . _screen = screen
|
def clearCanvas ( self , fillColor = 0 ) :
"""\~engliash
Clear up canvas and fill color at same time
@ param fillColor : a color value
@ note
The fillColor value range depends on the setting of _ buffer _ color _ mode .
* If it is SS _ COLOR _ MODE _ MONO ( " 1 " ) monochrome mode , it can only select 0 : black and 1 : white
* If it is SS _ COLOR _ MODE _ RGB ( " RGB " ) color mode , RGB color values can be used
\~chinese
清除画布并同时填充颜色
@ param fillColor : 颜色值
@ note
fillColor 取值范围取决于 _ buffer _ color _ mode 的设定 。
* 如果是 SS _ COLOR _ MODE _ MONO ( " 1 " ) 单色模式 , 只能选择 0 : 黑色 和 1 : 白色
* 如果是 SS _ COLOR _ MODE _ RGB ( " RGB " ) 彩色模式 , 可以使用 RGB 色彩值"""
|
self . Canvas . rectangle ( ( 0 , 0 , self . _display_size [ 0 ] , self . _display_size [ 1 ] ) , outline = 0 , fill = fillColor )
|
def get_properties ( obj ) :
"""Get values of all properties in specified object and its subobjects and returns them as a map .
The object can be a user defined object , map or array .
Returned properties correspondently are object properties , map key - pairs or array elements with their indexes .
: param obj : an object to get properties from .
: return : a map , containing the names of the object ' s properties and their values ."""
|
properties = { }
if obj != None :
cycle_detect = [ ]
RecursiveObjectReader . _perform_get_properties ( obj , None , properties , cycle_detect )
return properties
|
def contains ( self , key ) :
'''Returns whether the object named by ` key ` exists .
Optimized to only check whether the file object exists .
Args :
key : Key naming the object to check .
Returns :
boalean whether the object exists'''
|
path = self . object_path ( key )
return os . path . exists ( path ) and os . path . isfile ( path )
|
def identify ( self , req , resp , resource , uri_kwargs ) :
"""Identify user using Authenticate header with Basic auth ."""
|
header = req . get_header ( "Authorization" , False )
auth = header . split ( " " ) if header else None
if auth is None or auth [ 0 ] . lower ( ) != 'basic' :
return None
if len ( auth ) != 2 :
raise HTTPBadRequest ( "Invalid Authorization header" , "The Authorization header for Basic auth should be in form:\n" "Authorization: Basic <base64-user-pass>" )
user_pass = auth [ 1 ]
try :
decoded = base64 . b64decode ( user_pass ) . decode ( )
except ( TypeError , UnicodeDecodeError , binascii . Error ) :
raise HTTPBadRequest ( "Invalid Authorization header" , "Credentials for Basic auth not correctly base64 encoded." )
username , _ , password = decoded . partition ( ":" )
return username , password
|
def to_bioul ( tag_sequence : List [ str ] , encoding : str = "IOB1" ) -> List [ str ] :
"""Given a tag sequence encoded with IOB1 labels , recode to BIOUL .
In the IOB1 scheme , I is a token inside a span , O is a token outside
a span and B is the beginning of span immediately following another
span of the same type .
In the BIO scheme , I is a token inside a span , O is a token outside
a span and B is the beginning of a span .
Parameters
tag _ sequence : ` ` List [ str ] ` ` , required .
The tag sequence encoded in IOB1 , e . g . [ " I - PER " , " I - PER " , " O " ] .
encoding : ` str ` , optional , ( default = ` ` IOB1 ` ` ) .
The encoding type to convert from . Must be either " IOB1 " or " BIO " .
Returns
bioul _ sequence : ` ` List [ str ] ` `
The tag sequence encoded in IOB1 , e . g . [ " B - PER " , " L - PER " , " O " ] ."""
|
if not encoding in { "IOB1" , "BIO" } :
raise ConfigurationError ( f"Invalid encoding {encoding} passed to 'to_bioul'." )
# pylint : disable = len - as - condition
def replace_label ( full_label , new_label ) : # example : full _ label = ' I - PER ' , new _ label = ' U ' , returns ' U - PER '
parts = list ( full_label . partition ( '-' ) )
parts [ 0 ] = new_label
return '' . join ( parts )
def pop_replace_append ( in_stack , out_stack , new_label ) : # pop the last element from in _ stack , replace the label , append
# to out _ stack
tag = in_stack . pop ( )
new_tag = replace_label ( tag , new_label )
out_stack . append ( new_tag )
def process_stack ( stack , out_stack ) : # process a stack of labels , add them to out _ stack
if len ( stack ) == 1 : # just a U token
pop_replace_append ( stack , out_stack , 'U' )
else : # need to code as BIL
recoded_stack = [ ]
pop_replace_append ( stack , recoded_stack , 'L' )
while len ( stack ) >= 2 :
pop_replace_append ( stack , recoded_stack , 'I' )
pop_replace_append ( stack , recoded_stack , 'B' )
recoded_stack . reverse ( )
out_stack . extend ( recoded_stack )
# Process the tag _ sequence one tag at a time , adding spans to a stack ,
# then recode them .
bioul_sequence = [ ]
stack : List [ str ] = [ ]
for label in tag_sequence : # need to make a dict like
# token = { ' token ' : ' Matt ' , " labels " : { ' conll2003 ' : " B - PER " }
# ' gold ' : ' I - PER ' }
# where ' gold ' is the raw value from the CoNLL data set
if label == 'O' and len ( stack ) == 0 :
bioul_sequence . append ( label )
elif label == 'O' and len ( stack ) > 0 : # need to process the entries on the stack plus this one
process_stack ( stack , bioul_sequence )
bioul_sequence . append ( label )
elif label [ 0 ] == 'I' : # check if the previous type is the same as this one
# if it is then append to stack
# otherwise this start a new entity if the type
# is different
if len ( stack ) == 0 :
if encoding == "BIO" :
raise InvalidTagSequence ( tag_sequence )
stack . append ( label )
else : # check if the previous type is the same as this one
this_type = label . partition ( '-' ) [ 2 ]
prev_type = stack [ - 1 ] . partition ( '-' ) [ 2 ]
if this_type == prev_type :
stack . append ( label )
else :
if encoding == "BIO" :
raise InvalidTagSequence ( tag_sequence )
# a new entity
process_stack ( stack , bioul_sequence )
stack . append ( label )
elif label [ 0 ] == 'B' :
if len ( stack ) > 0 :
process_stack ( stack , bioul_sequence )
stack . append ( label )
else :
raise InvalidTagSequence ( tag_sequence )
# process the stack
if len ( stack ) > 0 :
process_stack ( stack , bioul_sequence )
return bioul_sequence
|
def get_utt_regions ( self ) :
"""Return the regions of all utterances , assuming all utterances are concatenated .
It is assumed that the utterances are sorted in ascending order for concatenation .
A region is defined by offset ( in chunks ) , length ( num - chunks ) and
a list of references to the utterance datasets in the containers .
Returns :
list : List of with a tuple for every utterances containing the region info ."""
|
regions = [ ]
current_offset = 0
for utt_idx in sorted ( self . utt_ids ) :
offset = current_offset
num_frames = [ ]
refs = [ ]
for cnt in self . containers :
num_frames . append ( cnt . get ( utt_idx ) . shape [ 0 ] )
refs . append ( cnt . get ( utt_idx , mem_map = True ) )
if len ( set ( num_frames ) ) != 1 :
raise ValueError ( 'Utterance {} has not the same number of frames in all containers!' . format ( utt_idx ) )
num_chunks = math . ceil ( num_frames [ 0 ] / float ( self . frames_per_chunk ) )
region = ( offset , num_chunks , refs )
regions . append ( region )
# Sets the offset for the next utterances
current_offset += num_chunks
return regions
|
def _update_raid_input_data ( target_raid_config , raid_input ) :
"""Process raid input data .
: param target _ raid _ config : node raid info
: param raid _ input : raid information for creating via eLCM
: raises ELCMValueError : raise msg if wrong input
: return : raid _ input : raid input data which create raid configuration
" Server " : {
" HWConfigurationIrmc " : {
" @ Processing " : " execute " ,
" Adapters " : {
" RAIDAdapter " : [
" @ AdapterId " : " RAIDAdapter0 " ,
" @ ConfigurationType " : " Addressing " ,
" LogicalDrives " : {
" LogicalDrive " : [
" @ Number " : 0,
" @ Action " : " Create " ,
" RaidLevel " : " 1"
" @ Version " : " 1.00"
" @ Version " : " 1.01" """
|
logical_disk_list = target_raid_config [ 'logical_disks' ]
raid_input [ 'Server' ] [ 'HWConfigurationIrmc' ] . update ( { '@Processing' : 'execute' } )
array_info = raid_input [ 'Server' ] [ 'HWConfigurationIrmc' ] [ 'Adapters' ] [ 'RAIDAdapter' ] [ 0 ]
array_info [ 'LogicalDrives' ] = { 'LogicalDrive' : [ ] }
array_info [ 'Arrays' ] = { 'Array' : [ ] }
for i , logical_disk in enumerate ( logical_disk_list ) :
physical_disks = logical_disk . get ( 'physical_disks' )
# Auto create logical drive along with random physical disks .
# Allow auto create along with raid 10 and raid 50
# with specific physical drive .
if not physical_disks or logical_disk [ 'raid_level' ] in ( '10' , '50' ) :
array_info [ 'LogicalDrives' ] [ 'LogicalDrive' ] . append ( { '@Action' : 'Create' , 'RaidLevel' : logical_disk [ 'raid_level' ] , 'InitMode' : 'slow' } )
array_info [ 'LogicalDrives' ] [ 'LogicalDrive' ] [ i ] . update ( { "@Number" : i } )
else : # Create array disks with specific physical servers
arrays = { "@Number" : i , "@ConfigurationType" : "Setting" , "PhysicalDiskRefs" : { "PhysicalDiskRef" : [ ] } }
lo_drive = { "@Number" : i , "@Action" : "Create" , "RaidLevel" : "" , "ArrayRefs" : { "ArrayRef" : [ ] } , "InitMode" : "slow" }
array_info [ 'Arrays' ] [ 'Array' ] . append ( arrays )
array_info [ 'LogicalDrives' ] [ 'LogicalDrive' ] . append ( lo_drive )
lo_drive . update ( { 'RaidLevel' : logical_disk [ 'raid_level' ] } )
lo_drive [ 'ArrayRefs' ] [ 'ArrayRef' ] . append ( { "@Number" : i } )
for element in logical_disk [ 'physical_disks' ] :
arrays [ 'PhysicalDiskRefs' ] [ 'PhysicalDiskRef' ] . append ( { '@Number' : element } )
if logical_disk [ 'size_gb' ] != "MAX" : # Ensure correctly order these items in dict
size = collections . OrderedDict ( )
size [ '@Unit' ] = 'GB'
size [ '#text' ] = logical_disk [ 'size_gb' ]
array_info [ 'LogicalDrives' ] [ 'LogicalDrive' ] [ i ] [ 'Size' ] = size
return raid_input
|
def get_template ( self , template_name , ** parameters ) :
"""Pull templates from the AWS templates folder"""
|
template_path = pathlib . Path ( self . template_dir ) . joinpath ( template_name )
return get_template ( template_path , ** parameters )
|
def split_buffer ( stream , splitter = None , decoder = lambda a : a ) :
"""Given a generator which yields strings and a splitter function ,
joins all input , splits on the separator and yields each chunk .
Unlike string . split ( ) , each chunk includes the trailing
separator , except for the last one if none was found on the end
of the input ."""
|
splitter = splitter or line_splitter
buffered = six . text_type ( '' )
for data in stream_as_text ( stream ) :
buffered += data
while True :
buffer_split = splitter ( buffered )
if buffer_split is None :
break
item , buffered = buffer_split
yield item
if buffered :
yield decoder ( buffered )
|
def present_active ( self ) :
"""Weak verbs
> > > verb = WeakOldNorseVerb ( )
> > > verb . set _ canonic _ forms ( [ " kalla " , " kallaði " , " kallaðinn " ] )
> > > verb . present _ active ( )
[ ' kalla ' , ' kallar ' , ' kallar ' , ' köllum ' , ' kallið ' , ' kalla ' ]
II
> > > verb = WeakOldNorseVerb ( )
> > > verb . set _ canonic _ forms ( [ " mæla " , " mælti " , " mæltr " ] )
> > > verb . present _ active ( )
[ ' mæli ' , ' mælir ' , ' mælir ' , ' mælum ' , ' mælið ' , ' mæla ' ]
III
> > > verb = WeakOldNorseVerb ( )
> > > verb . set _ canonic _ forms ( [ " telja " , " taldi " , " talinn " ] )
> > > verb . present _ active ( )
[ ' tel ' , ' telr ' , ' telr ' , ' teljum ' , ' telið ' , ' telja ' ]
IV
> > > verb = WeakOldNorseVerb ( )
> > > verb . set _ canonic _ forms ( [ " vaka " , " vakti " , " vakat " ] )
> > > verb . present _ active ( )
[ ' vaki ' , ' vakir ' , ' vakir ' , ' vökum ' , ' vakið ' , ' vaka ' ]
: return :"""
|
forms = [ ]
stem_ending_by_j = self . sng [ - 1 ] == "a" and self . sng [ - 2 ] == "j"
stem_ending_by_v = self . sng [ - 1 ] == "a" and self . sng [ - 2 ] == "v"
stem = self . sng [ : - 1 ] if self . sng [ - 1 ] == "a" else self . sng
if stem_ending_by_j or stem_ending_by_v :
stem = stem [ : - 1 ]
if self . subclass == 1 :
if stem_ending_by_v :
forms . append ( stem + "va" )
forms . append ( stem + "r" )
forms . append ( stem + "r" )
forms . append ( apply_u_umlaut ( stem ) + "um" )
# apply u umlaut
forms . append ( stem + "við" )
forms . append ( stem + "va" )
elif stem_ending_by_j :
forms . append ( stem + "ja" )
forms . append ( stem + "r" )
forms . append ( stem + "r" )
forms . append ( apply_u_umlaut ( stem ) + "jum" )
# apply u umlaut
forms . append ( stem + "ið" )
forms . append ( stem + "ja" )
else :
forms . append ( stem + "a" )
forms . append ( stem + "ar" )
forms . append ( stem + "ar" )
forms . append ( apply_u_umlaut ( stem ) + "um" )
# apply u umlaut
forms . append ( stem + "ið" )
forms . append ( self . sng )
elif self . subclass == 2 :
if stem_ending_by_v :
forms . append ( stem + "vi" )
forms . append ( stem + "vir" )
forms . append ( stem + "vir" )
forms . append ( apply_u_umlaut ( stem ) + "um" )
# apply u umlaut
forms . append ( stem + "við" )
forms . append ( self . sng )
elif stem_ending_by_j :
forms . append ( stem + "i" )
forms . append ( stem + "ir" )
forms . append ( stem + "ir" )
forms . append ( apply_u_umlaut ( stem ) + "jum" )
# apply u umlaut
forms . append ( stem + "ið" )
forms . append ( self . sng )
else :
forms . append ( stem + "i" )
forms . append ( stem + "ir" )
forms . append ( stem + "ir" )
forms . append ( apply_u_umlaut ( stem ) + "um" )
# apply u umlaut
forms . append ( stem + "ið" )
forms . append ( self . sng )
elif self . subclass == 3 :
if stem_ending_by_v :
forms . append ( stem )
forms . append ( stem + "r" )
forms . append ( stem + "r" )
forms . append ( apply_u_umlaut ( stem ) + "um" )
# apply u umlaut
forms . append ( stem + "við" )
forms . append ( self . sng )
elif stem_ending_by_j :
forms . append ( stem )
forms . append ( stem + "r" )
forms . append ( stem + "r" )
forms . append ( apply_u_umlaut ( stem ) + "jum" )
# apply u umlaut
forms . append ( stem + "ið" )
forms . append ( self . sng )
else :
forms . append ( stem )
forms . append ( stem + "r" )
forms . append ( stem + "r" )
forms . append ( apply_u_umlaut ( stem ) + "um" )
# apply u umlaut
forms . append ( stem + "ið" )
forms . append ( self . sng )
elif self . subclass == 4 :
if stem_ending_by_v :
forms . append ( stem + "vi" )
forms . append ( stem + "vir" )
forms . append ( stem + "vir" )
forms . append ( apply_u_umlaut ( stem ) + "um" )
# apply u umlaut
forms . append ( stem + "við" )
forms . append ( self . sng )
elif stem_ending_by_j :
forms . append ( stem + "i" )
forms . append ( stem + "ir" )
forms . append ( stem + "ir" )
forms . append ( apply_u_umlaut ( stem ) + "jum" )
# apply u umlaut
forms . append ( stem + "ið" )
forms . append ( self . sng )
else :
forms . append ( stem + "i" )
forms . append ( stem + "ir" )
forms . append ( stem + "ir" )
forms . append ( apply_u_umlaut ( stem ) + "um" )
# apply u umlaut
forms . append ( stem + "ið" )
forms . append ( self . sng )
return forms
|
def _log_message ( self , level , process_name , timeperiod , msg ) :
"""method performs logging into log file and Timetable ' s tree node"""
|
self . timetable . add_log_entry ( process_name , timeperiod , msg )
self . logger . log ( level , msg )
|
def polygon ( self ) :
'''return a polygon for the fence'''
|
points = [ ]
for fp in self . points [ 1 : ] :
points . append ( ( fp . lat , fp . lng ) )
return points
|
def help_func ( ) :
"""Print help page .
: return : None"""
|
tprint ( "art" )
tprint ( "v" + VERSION )
print ( DESCRIPTION + "\n" )
print ( "Webpage : http://art.shaghighi.ir\n" )
print ( "Help : \n" )
print ( " - list --> (list of arts)\n" )
print ( " - fonts --> (list of fonts)\n" )
print ( " - test --> (run tests)\n" )
print ( " - text 'yourtext' 'font(optional)' --> (text art) Example : 'python -m art text exampletext block'\n" )
print ( " - shape 'shapename' --> (shape art) Example : 'python -m art shape butterfly'\n" )
print ( " - save 'yourtext' 'font(optional)' --> Example : 'python -m art save exampletext block'\n" )
print ( " - all 'yourtext' --> Example : 'python -m art all exampletext'" )
|
def delete_boot_script ( self ) :
"""DELETE / : login / machines / : id / metadata / user - script
Deletes any existing boot script on the machine ."""
|
j , r = self . datacenter . request ( 'DELETE' , self . path + '/metadata/user-script' )
r . raise_for_status ( )
self . boot_script = None
|
def agreement_weighted ( ci , wts ) :
'''D = AGREEMENT _ WEIGHTED ( CI , WTS ) is identical to AGREEMENT , with the
exception that each partitions contribution is weighted according to
the corresponding scalar value stored in the vector WTS . As an example ,
suppose CI contained partitions obtained using some heuristic for
maximizing modularity . A possible choice for WTS might be the Q metric
( Newman ' s modularity score ) . Such a choice would add more weight to
higher modularity partitions .
NOTE : Unlike AGREEMENT , this script does not have the input argument
BUFFSZ .
Parameters
ci : MxN np . ndarray
set of M ( possibly degenerate ) partitions of N nodes
wts : Mx1 np . ndarray
relative weight of each partition
Returns
D : NxN np . ndarray
weighted agreement matrix'''
|
ci = np . array ( ci )
m , n = ci . shape
wts = np . array ( wts ) / np . sum ( wts )
D = np . zeros ( ( n , n ) )
for i in range ( m ) :
d = dummyvar ( ci [ i , : ] . reshape ( 1 , n ) )
D += np . dot ( d , d . T ) * wts [ i ]
return D
|
def action_spatial ( self , action ) :
"""Given an Action , return the right spatial action ."""
|
if self . surf . surf_type & SurfType . FEATURE :
return action . action_feature_layer
elif self . surf . surf_type & SurfType . RGB :
return action . action_render
else :
assert self . surf . surf_type & ( SurfType . RGB | SurfType . FEATURE )
|
def put_settings ( self , app = None , index = None , settings = None , es = None ) :
"""Modify index settings .
Index must exist already ."""
|
if not index :
index = self . index
if not app :
app = self . app
if not es :
es = self . es
if not settings :
return
for alias , old_settings in self . es . indices . get_settings ( index = index ) . items ( ) :
try :
if test_settings_contain ( old_settings [ 'settings' ] [ 'index' ] , settings [ 'settings' ] ) :
return
except KeyError :
pass
es . indices . close ( index = index )
es . indices . put_settings ( index = index , body = settings )
es . indices . open ( index = index )
|
def check_payment_v3 ( state_engine , state_op_type , nameop , fee_block_id , token_address , burn_address , name_fee , block_id ) :
"""Verify that for a version - 3 namespace ( burn Stacks ) , the nameop paid the right amount of STACKs .
Return { ' status ' : True , ' tokens _ paid ' : . . . , ' token _ units ' : . . . } if so
Return { ' status ' : False } if not"""
|
# priced in STACKs only . Name price will be STACKs
epoch_features = get_epoch_features ( block_id )
name = nameop [ 'name' ]
namespace_id = get_namespace_from_name ( name )
name_without_namespace = get_name_from_fq_name ( name )
namespace = state_engine . get_namespace ( namespace_id )
assert namespace [ 'version' ] == NAMESPACE_VERSION_PAY_WITH_STACKS
# need to be in the right epoch - - i . e . need STACKs to exist
if EPOCH_FEATURE_NAMESPACE_PAY_WITH_STACKS not in epoch_features :
log . warning ( "Name '{}' was created in namespace '{}', with version bits 0x{:x}, which is not supported in this epoch" . format ( name , namespace [ 'namespace_id' ] , namespace [ 'version' ] ) )
return { 'status' : False }
# burn address must be the default burn address
if burn_address != BLOCKSTACK_BURN_ADDRESS :
log . warning ( 'Buyer of {} used the wrong burn address ({}): expected {}' . format ( name , burn_address , BLOCKSTACK_BURN_ADDRESS ) )
return { 'status' : False }
# priced in STACKs only . Name price will be STACKs , and the preorder or renewal must have spent STACKs .
stacks_payment_info = get_stacks_payment ( state_engine , nameop , state_op_type )
if not stacks_payment_info [ 'status' ] : # failed to query , and Stacks are required
return { 'status' : False }
stacks_price = price_name ( name_without_namespace , namespace , fee_block_id )
# price in Stacks , since this is a Stacks namespace
res = check_token_payment ( name , stacks_price , stacks_payment_info )
if not res [ 'status' ] : # invalid payment
return { 'status' : False }
tokens_paid = stacks_payment_info [ 'tokens_paid' ]
token_units = stacks_payment_info [ 'token_units' ]
return { 'status' : True , 'tokens_paid' : tokens_paid , 'token_units' : token_units }
|
def download ( name , filenames ) :
'''Download a file from the virtual folder to the current working directory .
The files with the same names will be overwirtten .
NAME : Name of a virtual folder .
FILENAMES : Paths of the files to be uploaded .'''
|
with Session ( ) as session :
try :
session . VFolder ( name ) . download ( filenames , show_progress = True )
print_done ( 'Done.' )
except Exception as e :
print_error ( e )
sys . exit ( 1 )
|
def draw ( self , surface ) :
"""Draw all sprites and map onto the surface
: param surface : pygame surface to draw to
: type surface : pygame . surface . Surface"""
|
ox , oy = self . _map_layer . get_center_offset ( )
new_surfaces = list ( )
spritedict = self . spritedict
gl = self . get_layer_of_sprite
new_surfaces_append = new_surfaces . append
for spr in self . sprites ( ) :
new_rect = spr . rect . move ( ox , oy )
try :
new_surfaces_append ( ( spr . image , new_rect , gl ( spr ) , spr . blendmode ) )
except AttributeError : # generally should only fail when no blendmode available
new_surfaces_append ( ( spr . image , new_rect , gl ( spr ) ) )
spritedict [ spr ] = new_rect
self . lostsprites = [ ]
return self . _map_layer . draw ( surface , surface . get_rect ( ) , new_surfaces )
|
def calcparams_desoto ( effective_irradiance , temp_cell , alpha_sc , a_ref , I_L_ref , I_o_ref , R_sh_ref , R_s , EgRef = 1.121 , dEgdT = - 0.0002677 , irrad_ref = 1000 , temp_ref = 25 ) :
'''Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the De Soto et al .
model described in [ 1 ] . The five values returned by calcparams _ desoto
can be used by singlediode to calculate an IV curve .
Parameters
effective _ irradiance : numeric
The irradiance ( W / m2 ) that is converted to photocurrent .
temp _ cell : numeric
The average cell temperature of cells within a module in C .
alpha _ sc : float
The short - circuit current temperature coefficient of the
module in units of A / C .
a _ ref : float
The product of the usual diode ideality factor ( n , unitless ) ,
number of cells in series ( Ns ) , and cell thermal voltage at reference
conditions , in units of V .
I _ L _ ref : float
The light - generated current ( or photocurrent ) at reference conditions ,
in amperes .
I _ o _ ref : float
The dark or diode reverse saturation current at reference conditions ,
in amperes .
R _ sh _ ref : float
The shunt resistance at reference conditions , in ohms .
R _ s : float
The series resistance at reference conditions , in ohms .
EgRef : float
The energy bandgap at reference temperature in units of eV .
1.121 eV for crystalline silicon . EgRef must be > 0 . For parameters
from the SAM CEC module database , EgRef = 1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL .
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1 / K . May be either a scalar value
( e . g . - 0.0002677 as in [ 1 ] ) or a DataFrame ( this may be useful if
dEgdT is a modeled as a function of temperature ) . For parameters from
the SAM CEC module database , dEgdT = - 0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL .
irrad _ ref : float ( optional , default = 1000)
Reference irradiance in W / m ^ 2.
temp _ ref : float ( optional , default = 25)
Reference cell temperature in C .
Returns
Tuple of the following results :
photocurrent : numeric
Light - generated current in amperes
saturation _ current : numeric
Diode saturation curent in amperes
resistance _ series : float
Series resistance in ohms
resistance _ shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor ( n , unitless ) ,
number of cells in series ( Ns ) , and cell thermal voltage at
specified effective irradiance and cell temperature .
References
[1 ] W . De Soto et al . , " Improvement and validation of a model for
photovoltaic array performance " , Solar Energy , vol 80 , pp . 78-88,
2006.
[2 ] System Advisor Model web page . https : / / sam . nrel . gov .
[3 ] A . Dobos , " An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model " , Journal of
Solar Energy Engineering , vol 134 , 2012.
[4 ] O . Madelung , " Semiconductors : Data Handbook , 3rd ed . " ISBN
3-540-40488-0
See Also
singlediode
retrieve _ sam
Notes
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters ( e . g . System Advisor
Model ) , it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters , regardless of the
actual bandgap characteristics of the semiconductor . For example , in
the case of the System Advisor Model library , created as described
in [ 3 ] , EgRef and dEgdT for all modules were 1.121 and - 0.0002677,
respectively .
This table of reference bandgap energies ( EgRef ) , bandgap energy
temperature dependence ( dEgdT ) , and " typical " airmass response ( M )
is provided purely as reference to those who may generate their own
reference module parameters ( a _ ref , IL _ ref , I0 _ ref , etc . ) based upon
the various PV semiconductors . Again , we stress the importance of
using identical EgRef and dEgdT when generation reference parameters
and modifying the reference parameters ( for irradiance , temperature ,
and airmass ) per DeSoto ' s equations .
Crystalline Silicon ( Si ) :
* EgRef = 1.121
* dEgdT = - 0.0002677
> > > M = np . polyval ( [ - 1.26E - 4 , 2.816E - 3 , - 0.024459 , 0.086257 , 0.9181 ] ,
. . . AMa ) # doctest : + SKIP
Source : [ 1]
Cadmium Telluride ( CdTe ) :
* EgRef = 1.475
* dEgdT = - 0.0003
> > > M = np . polyval ( [ - 2.46E - 5 , 9.607E - 4 , - 0.0134 , 0.0716 , 0.9196 ] ,
. . . AMa ) # doctest : + SKIP
Source : [ 4]
Copper Indium diSelenide ( CIS ) :
* EgRef = 1.010
* dEgdT = - 0.00011
> > > M = np . polyval ( [ - 3.74E - 5 , 0.00125 , - 0.01462 , 0.0718 , 0.9210 ] ,
. . . AMa ) # doctest : + SKIP
Source : [ 4]
Copper Indium Gallium diSelenide ( CIGS ) :
* EgRef = 1.15
* dEgdT = ? ? ? ?
> > > M = np . polyval ( [ - 9.07E - 5 , 0.0022 , - 0.0202 , 0.0652 , 0.9417 ] ,
. . . AMa ) # doctest : + SKIP
Source : Wikipedia
Gallium Arsenide ( GaAs ) :
* EgRef = 1.424
* dEgdT = - 0.000433
* M = unknown
Source : [ 4]'''
|
# test for use of function pre - v0.6.0 API change
if isinstance ( a_ref , dict ) or ( isinstance ( a_ref , pd . Series ) and ( 'a_ref' in a_ref . keys ( ) ) ) :
import warnings
warnings . warn ( 'module_parameters detected as fourth positional' + ' argument of calcparams_desoto. calcparams_desoto' + ' will require one argument for each module model' + ' parameter in v0.7.0 and later' , DeprecationWarning )
try :
module_parameters = a_ref
a_ref = module_parameters [ 'a_ref' ]
I_L_ref = module_parameters [ 'I_L_ref' ]
I_o_ref = module_parameters [ 'I_o_ref' ]
R_sh_ref = module_parameters [ 'R_sh_ref' ]
R_s = module_parameters [ 'R_s' ]
except Exception as e :
raise e ( 'Module parameters could not be extracted from fourth' + ' positional argument of calcparams_desoto. Check that' + ' parameters are from the CEC database and/or update' + ' your code for the new API for calcparams_desoto' )
# Boltzmann constant in eV / K
k = 8.617332478e-05
# reference temperature
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
E_g = EgRef * ( 1 + dEgdT * ( Tcell_K - Tref_K ) )
nNsVth = a_ref * ( Tcell_K / Tref_K )
# In the equation for IL , the single factor effective _ irradiance is
# used , in place of the product S * M in [ 1 ] . effective _ irradiance is
# equivalent to the product of S ( irradiance reaching a module ' s cells ) *
# M ( spectral adjustment factor ) as described in [ 1 ] .
IL = effective_irradiance / irrad_ref * ( I_L_ref + alpha_sc * ( Tcell_K - Tref_K ) )
I0 = ( I_o_ref * ( ( Tcell_K / Tref_K ) ** 3 ) * ( np . exp ( EgRef / ( k * ( Tref_K ) ) - ( E_g / ( k * ( Tcell_K ) ) ) ) ) )
# Note that the equation for Rsh differs from [ 1 ] . In [ 1 ] Rsh is given as
# Rsh = Rsh _ ref * ( S _ ref / S ) where S is broadband irradiance reaching
# the module ' s cells . If desired this model behavior can be duplicated
# by applying reflection and soiling losses to broadband plane of array
# irradiance and not applying a spectral loss modifier , i . e . ,
# spectral _ modifier = 1.0.
Rsh = R_sh_ref * ( irrad_ref / effective_irradiance )
Rs = R_s
return IL , I0 , Rs , Rsh , nNsVth
|
def reissueOverLongJobs ( self ) :
"""Check each issued job - if it is running for longer than desirable
issue a kill instruction .
Wait for the job to die then we pass the job to processFinishedJob ."""
|
maxJobDuration = self . config . maxJobDuration
jobsToKill = [ ]
if maxJobDuration < 10000000 : # We won ' t bother doing anything if rescue time > 16 weeks .
runningJobs = self . batchSystem . getRunningBatchJobIDs ( )
for jobBatchSystemID in list ( runningJobs . keys ( ) ) :
if runningJobs [ jobBatchSystemID ] > maxJobDuration :
logger . warn ( "The job: %s has been running for: %s seconds, more than the " "max job duration: %s, we'll kill it" , str ( self . jobBatchSystemIDToIssuedJob [ jobBatchSystemID ] . jobStoreID ) , str ( runningJobs [ jobBatchSystemID ] ) , str ( maxJobDuration ) )
jobsToKill . append ( jobBatchSystemID )
self . killJobs ( jobsToKill )
|
def _signature_hash ( self , tx_out_script , unsigned_txs_out_idx , hash_type ) :
"""Return the canonical hash for a transaction . We need to
remove references to the signature , since it ' s a signature
of the hash before the signature is applied .
: param tx _ out _ script : the script the coins for unsigned _ txs _ out _ idx are coming from
: param unsigned _ txs _ out _ idx : where to put the tx _ out _ script
: param hash _ type : one of SIGHASH _ NONE , SIGHASH _ SINGLE , SIGHASH _ ALL ,
optionally bitwise or ' ed with SIGHASH _ ANYONECANPAY"""
|
# In case concatenating two scripts ends up with two codeseparators ,
# or an extra one at the end , this prevents all those possible incompatibilities .
tx_out_script = self . delete_subscript ( tx_out_script , self . ScriptTools . compile ( "OP_CODESEPARATOR" ) )
# blank out other inputs ' signatures
txs_in = [ self . _tx_in_for_idx ( i , tx_in , tx_out_script , unsigned_txs_out_idx ) for i , tx_in in enumerate ( self . tx . txs_in ) ]
txs_out = self . tx . txs_out
# Blank out some of the outputs
if ( hash_type & 0x1f ) == SIGHASH_NONE : # Wildcard payee
txs_out = [ ]
# Let the others update at will
for i in range ( len ( txs_in ) ) :
if i != unsigned_txs_out_idx :
txs_in [ i ] . sequence = 0
elif ( hash_type & 0x1f ) == SIGHASH_SINGLE : # This preserves the ability to validate existing legacy
# transactions which followed a buggy path in Satoshi ' s
# original code .
if unsigned_txs_out_idx >= len ( txs_out ) : # This should probably be moved to a constant , but the
# likelihood of ever getting here is already really small
# and getting smaller
return ( 1 << 248 )
# Only lock in the txout payee at same index as txin ; delete
# any outputs after this one and set all outputs before this
# one to " null " ( where " null " means an empty script and a
# value of - 1)
txs_out = [ self . tx . TxOut ( 0xffffffffffffffff , b'' ) ] * unsigned_txs_out_idx
txs_out . append ( self . tx . txs_out [ unsigned_txs_out_idx ] )
# Let the others update at will
for i in range ( len ( txs_in ) ) :
if i != unsigned_txs_out_idx :
txs_in [ i ] . sequence = 0
# Blank out other inputs completely , not recommended for open transactions
if hash_type & SIGHASH_ANYONECANPAY :
txs_in = [ txs_in [ unsigned_txs_out_idx ] ]
tmp_tx = self . tx . __class__ ( self . tx . version , txs_in , txs_out , self . tx . lock_time )
return from_bytes_32 ( tmp_tx . hash ( hash_type = hash_type ) )
|
def toposimplify ( geojson , p ) :
"""Convert geojson and simplify topology .
geojson is a dict representing geojson .
p is a simplification threshold value between 0 and 1."""
|
proc_out = subprocess . run ( [ 'geo2topo' ] , input = bytes ( json . dumps ( geojson ) , 'utf-8' ) , stdout = subprocess . PIPE )
proc_out = subprocess . run ( [ 'toposimplify' , '-P' , p ] , input = proc_out . stdout , stdout = subprocess . PIPE , stderr = subprocess . DEVNULL )
topojson = json . loads ( proc_out . stdout )
# Standardize object name
topojson [ 'objects' ] [ 'divisions' ] = topojson [ 'objects' ] . pop ( '-' )
return topojson
|
def process_request ( self , req ) :
'''Checks to see if data returned from database is useable'''
|
# Check status code of request
req . raise_for_status ( )
# if codes not in 200s ; error raise
# Proper status code , but check if server returned a warning
try :
output = req . json ( )
except :
exit ( req . text )
# server returned html error
# Try to find an error msg in the server response
try :
error = output [ 'data' ] . get ( 'errormsg' )
except :
error = output . get ( 'errormsg' )
# server has 2 variations of errormsg
finally :
if error :
exit ( error )
return output
|
def prt_objdesc ( self , prt ) :
"""Return description of this GoSubDag object ."""
|
txt = "INITIALIZING GoSubDag: {N:3} sources in {M:3} GOs rcnt({R}). {A} alt GO IDs\n"
alt2obj = { go : o for go , o in self . go2obj . items ( ) if go != o . id }
prt . write ( txt . format ( N = len ( self . go_sources ) , M = len ( self . go2obj ) , R = self . rcntobj is not None , A = len ( alt2obj ) ) )
prt . write ( " GoSubDag: namedtuple fields: {FLDS}\n" . format ( FLDS = " " . join ( self . prt_attr [ 'flds' ] ) ) )
prt . write ( " GoSubDag: relationships: {RELS}\n" . format ( RELS = self . relationships ) )
|
def invoke_tool ( namespace , tool_class = None ) :
"""Invoke a tool and exit .
` namespace ` is a namespace - type dict from which the tool is initialized .
It should contain exactly one value that is a ` Multitool ` subclass , and
this subclass will be instantiated and populated ( see
` Multitool . populate ( ) ` ) using the other items in the namespace . Instances
and subclasses of ` Command ` will therefore be registered with the
` Multitool ` . The tool is then invoked .
` pwkit . cli . propagate _ sigint ( ) ` and ` pwkit . cli . unicode _ stdio ( ) ` are called
at the start of this function . It should therefore be only called immediately
upon startup of the Python interpreter .
This function always exits with an exception . The exception will be
SystemExit ( 0 ) in case of success .
The intended invocation is ` invoke _ tool ( globals ( ) ) ` in some module that
defines a ` Multitool ` subclass and multiple ` Command ` subclasses .
If ` tool _ class ` is not None , this is used as the tool class rather than
searching ` namespace ` , potentially avoiding problems with modules
containing multiple ` Multitool ` implementations ."""
|
import sys
from . . import cli
cli . propagate_sigint ( )
cli . unicode_stdio ( )
cli . backtrace_on_usr1 ( )
if tool_class is None :
for value in itervalues ( namespace ) :
if is_strict_subclass ( value , Multitool ) :
if tool_class is not None :
raise PKError ( 'do not know which Multitool implementation to use' )
tool_class = value
if tool_class is None :
raise PKError ( 'no Multitool implementation to use' )
tool = tool_class ( )
tool . populate ( itervalues ( namespace ) )
tool . commandline ( sys . argv )
|
def ensure_unicode ( str_ ) :
"""TODO :
rob gp " isinstance \\ ( . * \\ \\ bstr \\ \\ b \\ ) " """
|
if isinstance ( str_ , __STR__ ) :
return str_
else :
try :
return __STR__ ( str_ )
except UnicodeDecodeError :
if str_ . startswith ( codecs . BOM_UTF8 ) : # Can safely remove the utf8 marker
# http : / / stackoverflow . com / questions / 12561063 / python - extract - data - from - file
str_ = str_ [ len ( codecs . BOM_UTF8 ) : ]
return str_ . decode ( 'utf-8' )
|
def insert_taxon_in_new_fasta_file ( self , aln ) :
"""primer4clades infers the codon usage table from the taxon names in the
sequences .
These names need to be enclosed by square brackets and be
present in the description of the FASTA sequence . The position is not
important . I will insert the names in the description in a new FASTA
file .
Returns :
Filename of modified FASTA file that includes the name of the taxon ."""
|
new_seq_records = [ ]
for seq_record in SeqIO . parse ( aln , 'fasta' ) :
new_seq_record_id = "[{0}] {1}" . format ( self . taxon_for_codon_usage , seq_record . id )
new_seq_record = SeqRecord ( seq_record . seq , id = new_seq_record_id )
new_seq_records . append ( new_seq_record )
base_filename = os . path . splitext ( aln )
new_filename = '{0}_modified{1}' . format ( base_filename [ 0 ] , base_filename [ 1 ] )
SeqIO . write ( new_seq_records , new_filename , "fasta" )
return new_filename
|
def word_texts ( self ) :
"""The list of words representing ` ` words ` ` layer elements ."""
|
if not self . is_tagged ( WORDS ) :
self . tokenize_words ( )
return [ word [ TEXT ] for word in self [ WORDS ] ]
|
def ng_save ( self , request , * args , ** kwargs ) :
"""Called on $ save ( )
Use modelform to save new object or modify an existing one"""
|
form = self . get_form ( self . get_form_class ( ) )
if form . is_valid ( ) :
obj = form . save ( )
return self . build_json_response ( obj )
raise ValidationError ( form . errors )
|
def get_server_public ( self , password_verifier , server_private ) :
"""B = ( k * v + g ^ b ) % N
: param int password _ verifier :
: param int server _ private :
: rtype : int"""
|
return ( ( self . _mult * password_verifier ) + pow ( self . _gen , server_private , self . _prime ) ) % self . _prime
|
def forget ( self , * keys ) :
"""Remove an item from the collection by key .
: param keys : The keys to remove
: type keys : tuple
: rtype : Collection"""
|
keys = reversed ( sorted ( keys ) )
for key in keys :
del self [ key ]
return self
|
def connect_channels ( self , channels ) :
"""Connect the provided channels"""
|
self . log . info ( f"Connecting to channels..." )
for chan in channels :
chan . connect ( self . sock )
self . log . info ( f"\t{chan.channel}" )
|
def c_ideal_gas ( T , k , MW ) :
r'''Calculates speed of sound ` c ` in an ideal gas at temperature T .
. . math : :
c = \ sqrt { kR _ { specific } T }
Parameters
T : float
Temperature of fluid , [ K ]
k : float
Isentropic exponent of fluid , [ - ]
MW : float
Molecular weight of fluid , [ g / mol ]
Returns
c : float
Speed of sound in fluid , [ m / s ]
Notes
Used in compressible flow calculations .
Note that the gas constant used is the specific gas constant :
. . math : :
R _ { specific } = R \ frac { 1000 } { MW }
Examples
> > > c _ ideal _ gas ( T = 303 , k = 1.4 , MW = 28.96)
348.9820953185441
References
. . [ 1 ] Green , Don , and Robert Perry . Perry ' s Chemical Engineers ' Handbook ,
Eighth Edition . McGraw - Hill Professional , 2007.
. . [ 2 ] Cengel , Yunus , and John Cimbala . Fluid Mechanics : Fundamentals and
Applications . Boston : McGraw Hill Higher Education , 2006.'''
|
Rspecific = R * 1000. / MW
return ( k * Rspecific * T ) ** 0.5
|
def extended_fade_in ( self , segment , duration ) :
"""Add a fade - in to a segment that extends the beginning of the
segment .
: param segment : Segment to fade in
: type segment : : py : class : ` radiotool . composer . Segment `
: param duration : Duration of fade - in ( in seconds )
: returns : The fade that has been added to the composition
: rtype : : py : class : ` Fade `"""
|
dur = int ( duration * segment . track . samplerate )
if segment . start - dur >= 0 :
segment . start -= dur
else :
raise Exception ( "Cannot create fade-in that extends " "past the track's beginning" )
if segment . comp_location - dur >= 0 :
segment . comp_location -= dur
else :
raise Exception ( "Cannot create fade-in the extends past the score's beginning" )
segment . duration += dur
f = Fade ( segment . track , segment . comp_location_in_seconds , duration , 0.0 , 1.0 )
self . add_dynamic ( f )
return f
|
def load ( obj , env = None , silent = None , key = None ) :
"""Reads and loads in to " settings " a single key or all keys from vault
: param obj : the settings instance
: param env : settings env default = ' DYNACONF '
: param silent : if errors should raise
: param key : if defined load a single key , else load all in env
: return : None"""
|
client = get_client ( obj )
env_list = _get_env_list ( obj , env )
for env in env_list :
path = "/" . join ( [ obj . VAULT_PATH_FOR_DYNACONF , env ] ) . replace ( "//" , "/" )
data = client . read ( path )
if data : # There seems to be a data dict within a data dict ,
# extract the inner data
data = data . get ( "data" , { } ) . get ( "data" , { } )
try :
if data and key :
value = parse_conf_data ( data . get ( key ) , tomlfy = True )
if value :
obj . logger . debug ( "vault_loader: loading by key: %s:%s (%s:%s)" , key , "****" , IDENTIFIER , path , )
obj . set ( key , value )
elif data :
obj . logger . debug ( "vault_loader: loading: %s (%s:%s)" , list ( data . keys ( ) ) , IDENTIFIER , path , )
obj . update ( data , loader_identifier = IDENTIFIER , tomlfy = True )
except Exception as e :
if silent :
if hasattr ( obj , "logger" ) :
obj . logger . error ( str ( e ) )
return False
raise
|
def _l2deriv ( self , l , n ) :
"""NAME :
_ l2deriv
PURPOSE :
evaluate the second derivative w . r . t . lambda for this potential
INPUT :
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
OUTPUT :
second derivative w . r . t . lambda
HISTORY :
2015-02-15 - Written - Trick ( MPIA )"""
|
numer = - 3. * nu . sqrt ( l ) - nu . sqrt ( n )
denom = 4. * l ** 1.5 * ( nu . sqrt ( l ) + nu . sqrt ( n ) ) ** 3
return numer / denom
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.