signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def get_weight ( self , weight = operator . attrgetter ( 'weight' ) ) :
""": param weight : source weight function
: returns : total weight of the source model"""
|
return sum ( weight ( src ) for src in self . get_sources ( ) )
|
def get_occupation ( self , atom_index , orbital ) :
"""Returns the occupation for a particular orbital of a particular atom .
Args :
atom _ num ( int ) : Index of atom in the PROCAR . It should be noted
that VASP uses 1 - based indexing for atoms , but this is
converted to 0 - based indexing in this parser to be
consistent with representation of structures in pymatgen .
orbital ( str ) : An orbital . If it is a single character , e . g . , s ,
p , d or f , the sum of all s - type , p - type , d - type or f - type
orbitals occupations are returned respectively . If it is a
specific orbital , e . g . , px , dxy , etc . , only the occupation
of that orbital is returned .
Returns :
Sum occupation of orbital of atom ."""
|
orbital_index = self . orbitals . index ( orbital )
return { spin : np . sum ( d [ : , : , atom_index , orbital_index ] * self . weights [ : , None ] ) for spin , d in self . data . items ( ) }
|
def listar_healthcheck_expect ( self , id_ambiente ) :
"""Lista os healthcheck _ expect ́s de um ambiente .
: param id _ ambiente : Identificador do ambiente .
: return : Dicionário com a seguinte estrutura :
{ ' healthcheck _ expect ' : [ { ' id ' : < id _ healthcheck _ expect > ,
' expect _ string ' : < expect _ string > ,
' match _ list ' : < match _ list > ,
' id _ ambiente ' : < id _ ambiente > } ,
. . . demais healthcheck _ expects . . . ] }
: raise InvalidParameterError : O identificador do ambiente é nulo ou inválido .
: raise DataBaseError : Falha na networkapi ao acessar o banco de dados .
: raise XMLError : Falha na networkapi ao gerar o XML de resposta ."""
|
if not is_valid_int_param ( id_ambiente ) :
raise InvalidParameterError ( u'O identificador do ambiente é inválido ou não foi informado.' )
url = 'healthcheckexpect/ambiente/' + str ( id_ambiente ) + '/'
code , xml = self . submit ( None , 'GET' , url )
key = 'healthcheck_expect'
return get_list_map ( self . response ( code , xml , [ key ] ) , key )
|
def MergeFlags ( self , args , unique = 1 , dict = None ) :
"""Merge the dict in args into the construction variables of this
env , or the passed - in dict . If args is not a dict , it is
converted into a dict using ParseFlags . If unique is not set ,
the flags are appended rather than merged ."""
|
if dict is None :
dict = self
if not SCons . Util . is_Dict ( args ) :
args = self . ParseFlags ( args )
if not unique :
self . Append ( ** args )
return self
for key , value in args . items ( ) :
if not value :
continue
try :
orig = self [ key ]
except KeyError :
orig = value
else :
if not orig :
orig = value
elif value : # Add orig and value . The logic here was lifted from
# part of env . Append ( ) ( see there for a lot of comments
# about the order in which things are tried ) and is
# used mainly to handle coercion of strings to CLVar to
# " do the right thing " given ( e . g . ) an original CCFLAGS
# string variable like ' - pipe - Wall ' .
try :
orig = orig + value
except ( KeyError , TypeError ) :
try :
add_to_orig = orig . append
except AttributeError :
value . insert ( 0 , orig )
orig = value
else :
add_to_orig ( value )
t = [ ]
if key [ - 4 : ] == 'PATH' : # # # keep left - most occurence
for v in orig :
if v not in t :
t . append ( v )
else : # # # keep right - most occurence
orig . reverse ( )
for v in orig :
if v not in t :
t . insert ( 0 , v )
self [ key ] = t
return self
|
def run_command ( self , commands , timeout_sec = None , exception = None ) :
"""Executes the given commands and sends OVSDB messages .
` ` commands ` ` must be a list of
: py : mod : ` ryu . lib . ovs . vsctl . VSCtlCommand ` .
If ` ` timeout _ sec ` ` is specified , raises exception after the given
timeout [ sec ] . Additionally , if ` ` exception ` ` is specified , this
function will wraps exception using the given exception class .
Retruns ` ` None ` ` but fills ` ` result ` ` attribute for each command
instance ."""
|
if timeout_sec is None :
self . _run_command ( commands )
else :
with hub . Timeout ( timeout_sec , exception ) :
self . _run_command ( commands )
|
def _who_when ( self , s , cmd , section , accept_just_who = False ) :
"""Parse who and when information from a string .
: return : a tuple of ( name , email , timestamp , timezone ) . name may be
the empty string if only an email address was given ."""
|
match = _WHO_AND_WHEN_RE . search ( s )
if match :
datestr = match . group ( 3 ) . lstrip ( )
if self . date_parser is None : # auto - detect the date format
if len ( datestr . split ( b' ' ) ) == 2 :
date_format = 'raw'
elif datestr == b'now' :
date_format = 'now'
else :
date_format = 'rfc2822'
self . date_parser = dates . DATE_PARSERS_BY_NAME [ date_format ]
try :
when = self . date_parser ( datestr , self . lineno )
except ValueError :
print ( "failed to parse datestr '%s'" % ( datestr , ) )
raise
name = match . group ( 1 ) . rstrip ( )
email = match . group ( 2 )
else :
match = _WHO_RE . search ( s )
if accept_just_who and match : # HACK around missing time
# TODO : output a warning here
when = dates . DATE_PARSERS_BY_NAME [ 'now' ] ( 'now' )
name = match . group ( 1 )
email = match . group ( 2 )
elif self . strict :
self . abort ( errors . BadFormat , cmd , section , s )
else :
name = s
email = None
when = dates . DATE_PARSERS_BY_NAME [ 'now' ] ( 'now' )
if len ( name ) > 0 :
if name . endswith ( b' ' ) :
name = name [ : - 1 ]
# While it shouldn ' t happen , some datasets have email addresses
# which contain unicode characters . See bug 338186 . We sanitize
# the data at this level just in case .
if self . user_mapper :
name , email = self . user_mapper . map_name_and_email ( name , email )
return Authorship ( name , email , when [ 0 ] , when [ 1 ] )
|
def writeline ( self , fmt , * args ) :
"""Write ` line ` ( list of objects ) with given ` fmt ` to file . The
` line ` will be chained if object is iterable ( except for
basestrings ) ."""
|
fmt = self . endian + fmt
size = struct . calcsize ( fmt )
fix = struct . pack ( self . endian + 'i' , size )
line = struct . pack ( fmt , * args )
self . write ( fix )
self . write ( line )
self . write ( fix )
|
def _monitor ( self ) :
"""Monitor the queue for items , and ask the handler to deal with them .
This method runs on a separate , internal thread .
The thread will terminate if it sees a sentinel object in the queue ."""
|
err_msg = ( "invalid internal state:" " _stop_nowait can not be set if _stop is not set" )
assert self . _stop . isSet ( ) or not self . _stop_nowait . isSet ( ) , err_msg
q = self . queue
has_task_done = hasattr ( q , 'task_done' )
while not self . _stop . isSet ( ) :
try :
record = self . dequeue ( True )
if record is self . _sentinel_item :
break
self . handle ( record )
if has_task_done :
q . task_done ( )
except queue . Empty :
pass
# There might still be records in the queue ,
# handle then unless _ stop _ nowait is set .
while not self . _stop_nowait . isSet ( ) :
try :
record = self . dequeue ( False )
if record is self . _sentinel_item :
break
self . handle ( record )
if has_task_done :
q . task_done ( )
except queue . Empty :
break
|
def _forwards ( apps , schema_editor ) :
"""Make sure that the MarkupItem model actually points
to the correct proxy model , that implements the given language ."""
|
# Need to work on the actual models here .
from fluent_contents . plugins . markup . models import LANGUAGE_MODEL_CLASSES
from fluent_contents . plugins . markup . models import MarkupItem
from django . contrib . contenttypes . models import ContentType
ctype = ContentType . objects . get_for_model ( MarkupItem )
for language , proxy_model in LANGUAGE_MODEL_CLASSES . items ( ) :
proxy_ctype = ContentType . objects . get_for_model ( proxy_model , for_concrete_model = False )
MarkupItem . objects . filter ( polymorphic_ctype = ctype , language = language ) . update ( polymorphic_ctype = proxy_ctype )
|
def _get_qe ( self , key , obj ) :
"""Instantiate a query engine , or retrieve a cached one ."""
|
if key in self . _cached :
return self . _cached [ key ]
qe = create_query_engine ( obj , self . _class )
self . _cached [ key ] = qe
return qe
|
def xml_starttag ( self , name , attrs = None ) :
"""Write XML start tag ."""
|
self . write ( self . indent * self . level )
self . write ( u"<%s" % xmlquote ( name ) )
if attrs :
for name , value in attrs . items ( ) :
args = ( xmlquote ( name ) , xmlquoteattr ( value ) )
self . write ( u' %s="%s"' % args )
self . writeln ( u">" )
self . level += 1
|
def extract_opts ( ** opts ) :
"""Small utility to extract a set of one - char options from sys . argv ."""
|
values = { }
for opt , init in opts . items ( ) :
try :
idx = sys . argv . index ( '-%s' % opt )
except ValueError :
continue
if idx + 1 < len ( sys . argv ) :
opts [ opt ] = sys . argv . pop ( idx + 1 )
sys . argv . pop ( idx )
return opts
|
def cacheable ( self ) :
"""Return the cacheable attribute of the BFD file being processed ."""
|
if not self . _ptr :
raise BfdException ( "BFD not initialized" )
return _bfd . get_bfd_attribute ( self . _ptr , BfdAttributes . CACHEABLE )
|
def attributes ( cls , create = False , extra = None ) :
"""Build a dict of attribute values , respecting declaration order .
The process is :
- Handle ' orderless ' attributes , overriding defaults with provided
kwargs when applicable
- Handle ordered attributes , overriding them with provided kwargs when
applicable ; the current list of computed attributes is available
to the currently processed object ."""
|
warnings . warn ( "Usage of Factory.attributes() is deprecated." , DeprecationWarning , stacklevel = 2 , )
declarations = cls . _meta . pre_declarations . as_dict ( )
declarations . update ( extra or { } )
from . import helpers
return helpers . make_factory ( dict , ** declarations )
|
def addDarkCurrent ( self , slope , intercept = None , date = None , info = '' , error = None ) :
'''Args :
slope ( np . array )
intercept ( np . array )
error ( numpy . array )
slope ( float ) : dPx / dExposureTime [ sec ]
error ( float ) : absolute
date ( str ) : " DD Mon YY " e . g . " 30 Nov 16"'''
|
date = _toDate ( date )
self . _checkShape ( slope )
self . _checkShape ( intercept )
d = self . coeffs [ 'dark current' ]
if intercept is None :
data = slope
else :
data = ( slope , intercept )
d . insert ( _insertDateIndex ( date , d ) , [ date , info , data , error ] )
|
def add ( self , * dic ) :
'''add a config to StartCalendarInterval .
Args :
* dic ( dict ) : dictionary with format { ' Day ' : 12 , ' Hour ' : 34 } Avaliable keys are Month , Day , Weekday , Hour , Minute . * Note the uppercase . * You can use gen ( ) , genMix ( ) to generate complex config dictionary .'''
|
dicList = list ( flatten ( dic ) )
# for every dict in the list passed in
for d in dicList : # make a dict single ( list of pairs )
di = [ ]
for k in d : # checkKey ( k , self . keyWord )
di . append ( Pair ( k , IntegerSingle ( d [ k ] ) ) )
dictSingle = DictSingle ( di )
# append dict single to array single ' s value
self . _add ( [ dictSingle ] , self . l )
|
def decode_door ( packet , channel = 1 ) :
"""Decode a door sensor ."""
|
val = str ( packet . get ( QSDATA , '' ) )
if len ( val ) == 6 and val . startswith ( '46' ) and channel == 1 :
return val [ - 1 ] == '0'
return None
|
def create_upload_and_chunk_url ( self , project_id , path_data , hash_data , remote_filename = None , storage_provider_id = None ) :
"""Create an non - chunked upload that returns upload id and upload url . This type of upload doesn ' t allow
additional upload urls . For single chunk files this method is more efficient than
create _ upload / create _ file _ chunk _ url .
: param project _ id : str : uuid of the project
: param path _ data : PathData : holds file system data about the file we are uploading
: param hash _ data : HashData : contains hash alg and value for the file we are uploading
: param remote _ filename : str : name to use for our remote file ( defaults to path _ data basename otherwise )
: param storage _ provider _ id : str : optional storage provider id
: return : str , dict : uuid for the upload , upload chunk url dict"""
|
upload_response = self . _create_upload ( project_id , path_data , hash_data , remote_filename = remote_filename , storage_provider_id = storage_provider_id , chunked = False )
return upload_response [ 'id' ] , upload_response [ 'signed_url' ]
|
def append ( self , other ) :
"""Append a collection of Index options together .
Parameters
other : Index or list / tuple of indices
Returns
appended : Index"""
|
to_concat = [ self ]
if isinstance ( other , ( list , tuple ) ) :
to_concat = to_concat + list ( other )
else :
to_concat . append ( other )
for obj in to_concat :
if not isinstance ( obj , Index ) :
raise TypeError ( 'all inputs must be Index' )
names = { obj . name for obj in to_concat }
name = None if len ( names ) > 1 else self . name
return self . _concat ( to_concat , name )
|
def table_r_node ( self , node ) :
"""General pattern where the last node should should
get the text span attributes of the entire tree"""
|
start = len ( self . f . getvalue ( ) )
try :
self . default ( node )
except GenericASTTraversalPruningException :
final = len ( self . f . getvalue ( ) )
self . set_pos_info ( node , start , final )
self . set_pos_info ( node [ - 1 ] , start , final )
raise GenericASTTraversalPruningException
|
def native_container ( self ) :
"""Native container object ."""
|
if self . __native is None :
self . __native = self . _get_container ( )
return self . __native
|
def delete_node ( node_id , purge_data , ** kwargs ) :
"""Remove node from DB completely
If there are attributes on the node , use purge _ data to try to
delete the data . If no other resources link to this data , it
will be deleted ."""
|
user_id = kwargs . get ( 'user_id' )
try :
node_i = db . DBSession . query ( Node ) . filter ( Node . id == node_id ) . one ( )
except NoResultFound :
raise ResourceNotFoundError ( "Node %s not found" % ( node_id ) )
group_items = db . DBSession . query ( ResourceGroupItem ) . filter ( ResourceGroupItem . node_id == node_id ) . all ( )
for gi in group_items :
db . DBSession . delete ( gi )
if purge_data == 'Y' :
_purge_datasets_unique_to_resource ( 'NODE' , node_id )
log . info ( "Deleting node %s, id=%s" , node_i . name , node_id )
node_i . network . check_write_permission ( user_id )
db . DBSession . delete ( node_i )
db . DBSession . flush ( )
return 'OK'
|
def _print_MatMul ( self , expr ) :
"""Matrix multiplication printer . The sympy one turns everything into a
dot product without type - checking ."""
|
from sympy import MatrixExpr
links = [ ]
for i , j in zip ( expr . args [ 1 : ] , expr . args [ : - 1 ] ) :
if isinstance ( i , MatrixExpr ) and isinstance ( j , MatrixExpr ) :
links . append ( ').dot(' )
else :
links . append ( '*' )
printouts = [ self . _print ( i ) for i in expr . args ]
result = [ printouts [ 0 ] ]
for link , printout in zip ( links , printouts [ 1 : ] ) :
result . extend ( [ link , printout ] )
return '({0})' . format ( '' . join ( result ) )
|
def is_checkmate ( position , input_color ) :
"""Finds if particular King is checkmated .
: type : position : Board
: type : input _ color : Color
: rtype : bool"""
|
return position . no_moves ( input_color ) and position . get_king ( input_color ) . in_check ( position )
|
def option ( self , key , value ) :
"""Adds an output option for the underlying data source .
You can set the following option ( s ) for writing files :
* ` ` timeZone ` ` : sets the string that indicates a timezone to be used to format
timestamps in the JSON / CSV datasources or partition values .
If it isn ' t set , it uses the default value , session local timezone ."""
|
self . _jwrite = self . _jwrite . option ( key , to_str ( value ) )
return self
|
def learn ( self , runs , dir = 1 , periodic = False , recurrent = True , randomSpeed = False , learnRecurrent = False , envelope = True , ) :
"""Traverses a sinusoidal trajectory across the environment , learning during
the process . A pair of runs across the environment ( one in each direction )
takes 10 seconds if in a periodic larger environment , and 4 seconds in a
smaller nonperiodic environment .
: param runs : How many runs across the environment to do . Each " run " is
defined as a full sweep across the environment in each direction .
: param dir : Which direction to move in first . Valid values are 1 and - 1.
: param periodic : Whether or not the learning environment should be
periodic ( toroidal ) .
: param recurrent : Whether or not recurrent connections should be active
during learning . Warning : True can lead to instability .
: param randomSpeed : Whether or not to use a random maximum speed for each
run , to better simulate real learning . Can degrade performance .
Only supported in periodic environments .
: param learnRecurrent : Whether or not to learn recurrent connections .
: param envelope : Whether or not the envelope should be active in learning ."""
|
# Simulate for a second to get nice starting activation bumps .
# Turn plotting off so as not to confuse the viewer
oldPlotting = self . plotting
self . plotting = False
self . simulate ( 10 , 1 , 1 , 0 , envelope = False , inputNoise = None , save = False )
self . plotting = oldPlotting
# Set up plotting
if self . plotting :
self . fig = plt . figure ( )
self . ax1 = self . fig . add_subplot ( 611 )
self . ax2 = self . fig . add_subplot ( 612 )
self . ax3 = self . fig . add_subplot ( 613 )
self . ax4 = self . fig . add_subplot ( 212 )
self . ax3 . set_xlabel ( "Inhibitory-Inhibitory connections" )
plt . ion ( )
self . fig . show ( )
self . fig . canvas . draw ( )
# Set up the trajectories and running times .
if not periodic :
time = 4. * runs
timings = [ np . arange ( 0 , time , self . dt ) ]
trajectories = [ ( np . sin ( dir * ( times * np . pi / 2 - np . pi / 2. ) ) + 1 ) / 2 ]
else : # Space the starting points of the runs out . This tends to improve the
# translation - invariance of the weight profiles , and thus gives better
# overall path integration .
startingPoint = 0
trajectories = [ ]
timings = [ ]
time = 0
residTime = 0
for run in xrange ( runs ) :
if randomSpeed :
speed = np . random . random ( ) + 0.5
else :
speed = 1.
length = 10. / speed
runTimes = np . arange ( 0 , length , self . dt )
trajectory = ( np . sin ( dir * ( runTimes * np . pi / ( 5 / speed ) - np . pi / 2. ) ) + 1 ) * 2.5 + startingPoint
trajectories . append ( trajectory )
timings . append ( runTimes + time )
time += length
startingPoint += 1. / runs
for trajectory , timing in zip ( trajectories , timings ) :
self . activationsI = np . random . random_sample ( self . activationsI . shape )
self . activationsER = np . random . random_sample ( self . activationsER . shape )
self . activationsEL = np . random . random_sample ( self . activationsEL . shape )
self . activationHistoryI = np . zeros ( self . activationsI . shape )
self . activationHistoryEL = np . zeros ( self . activationsEL . shape )
self . activationHistoryER = np . zeros ( self . activationsER . shape )
velocities = np . diff ( trajectory ) / self . dt
for i , t in enumerate ( timing [ : - 1 ] ) :
x = trajectory [ i ] % self . roomSize
v = velocities [ i ]
self . activationsP = np . exp ( - 1. * ( self . placeCode - x ) ** 2 / ( 2 * self . sigmaLoc ** 2 ) )
self . update ( 0 , 0 , v , recurrent = recurrent , envelope = envelope , iSpeedTuning = periodic , enforceDale = True , )
self . stdpUpdate ( t , onlyPlace = not learnRecurrent )
# Enforce Dale ' s law for place cells . Place cells must be excitatory .
np . maximum ( self . weightsPI , 0 , self . weightsPI )
np . maximum ( self . weightsPEL , 0 , self . weightsPEL )
np . maximum ( self . weightsPER , 0 , self . weightsPER )
# Also keep the place weights from being too large .
np . minimum ( self . weightsPI , 1. , self . weightsPI )
np . minimum ( self . weightsPEL , 3. , self . weightsPEL )
np . minimum ( self . weightsPER , 3. , self . weightsPER )
if self . plotting :
residTime += self . dt
if residTime > PLOT_INTERVAL :
residTime -= PLOT_INTERVAL
self . ax4 . matshow ( self . weightsPI , cmap = plt . cm . coolwarm )
self . plotActivation ( position = x , time = t )
self . stdpUpdate ( t , onlyPlace = not learnRecurrent , clearBuffer = True )
# Finally , enforce Dale ' s law for recurrent connections .
# Inhibitory neurons must be inhibitory ,
# excitatory neurons must be excitatory .
# This could be handled through update , but it ' s faster to do it here .
np . minimum ( self . weightsII , 0 , self . weightsII )
np . minimum ( self . weightsIER , 0 , self . weightsIER )
np . minimum ( self . weightsIEL , 0 , self . weightsIEL )
np . maximum ( self . weightsELI , 0 , self . weightsELI )
np . maximum ( self . weightsERI , 0 , self . weightsERI )
|
def check_command ( self , name , exclude_packages = None , exclude_command_class = None ) :
"""Uses get _ command _ class ( ) to check for the presence of a command ."""
|
return get_command_class ( name , exclude_packages = exclude_packages , exclude_command_class = exclude_command_class ) is not None
|
def _mmComputeTransitionTraces ( self ) :
"""Computes the transition traces , if necessary .
Transition traces are the following :
predicted = > active cells
predicted = > inactive cells
predicted = > active columns
predicted = > inactive columns
unpredicted = > active columns"""
|
if not self . _mmTransitionTracesStale :
return
self . _mmData [ "predictedActiveCellsForSequence" ] = defaultdict ( set )
self . _mmTraces [ "predictedActiveCells" ] = IndicesTrace ( self , "predicted => active cells (correct)" )
self . _mmTraces [ "predictedInactiveCells" ] = IndicesTrace ( self , "predicted => inactive cells (extra)" )
self . _mmTraces [ "predictedActiveColumns" ] = IndicesTrace ( self , "predicted => active columns (correct)" )
self . _mmTraces [ "predictedInactiveColumns" ] = IndicesTrace ( self , "predicted => inactive columns (extra)" )
self . _mmTraces [ "unpredictedActiveColumns" ] = IndicesTrace ( self , "unpredicted => active columns (bursting)" )
predictedCellsTrace = self . _mmTraces [ "predictedCells" ]
for i , activeColumns in enumerate ( self . mmGetTraceActiveColumns ( ) . data ) :
predictedActiveCells = set ( )
predictedInactiveCells = set ( )
predictedActiveColumns = set ( )
predictedInactiveColumns = set ( )
for predictedCell in predictedCellsTrace . data [ i ] :
predictedColumn = self . columnForCell ( predictedCell )
if predictedColumn in activeColumns :
predictedActiveCells . add ( predictedCell )
predictedActiveColumns . add ( predictedColumn )
sequenceLabel = self . mmGetTraceSequenceLabels ( ) . data [ i ]
if sequenceLabel is not None :
self . _mmData [ "predictedActiveCellsForSequence" ] [ sequenceLabel ] . add ( predictedCell )
else :
predictedInactiveCells . add ( predictedCell )
predictedInactiveColumns . add ( predictedColumn )
unpredictedActiveColumns = set ( activeColumns ) - set ( predictedActiveColumns )
self . _mmTraces [ "predictedActiveCells" ] . data . append ( predictedActiveCells )
self . _mmTraces [ "predictedInactiveCells" ] . data . append ( predictedInactiveCells )
self . _mmTraces [ "predictedActiveColumns" ] . data . append ( predictedActiveColumns )
self . _mmTraces [ "predictedInactiveColumns" ] . data . append ( predictedInactiveColumns )
self . _mmTraces [ "unpredictedActiveColumns" ] . data . append ( unpredictedActiveColumns )
self . _mmTransitionTracesStale = False
|
def min_version ( self ) :
"""Version with the fewest downloads ."""
|
data = self . version_downloads
if not data :
return ( None , 0 )
return min ( data . items ( ) , key = lambda item : item [ 1 ] )
|
def from_range ( cls , range_list , register_flag = True ) :
"""core class method to create visible objects from a range ( nested list )"""
|
s = dict_from_range ( range_list )
obj = cls . from_serializable ( s , register_flag )
return obj
|
def deserialize ( cls , value , * args , ** kwargs ) :
"""Deserialize a value just after importing it
` cls . deserialize ` should always return a value of type ` cls . TYPE ` or
` None ` ."""
|
if isinstance ( value , cls . TYPE ) :
return value
elif is_null ( value ) :
return None
else :
return value
|
def get_dependencies_from_json ( ireq ) :
"""Retrieves dependencies for the given install requirement from the json api .
: param ireq : A single InstallRequirement
: type ireq : : class : ` ~ pip . _ internal . req . req _ install . InstallRequirement `
: return : A set of dependency lines for generating new InstallRequirements .
: rtype : set ( str ) or None"""
|
if ireq . editable or not is_pinned_requirement ( ireq ) :
return
# It is technically possible to parse extras out of the JSON API ' s
# requirement format , but it is such a chore let ' s just use the simple API .
if ireq . extras :
return
session = requests . session ( )
atexit . register ( session . close )
version = str ( ireq . req . specifier ) . lstrip ( "=" )
def gen ( ireq ) :
info = None
try :
info = session . get ( "https://pypi.org/pypi/{0}/{1}/json" . format ( ireq . req . name , version ) ) . json ( ) [ "info" ]
finally :
session . close ( )
requires_dist = info . get ( "requires_dist" , info . get ( "requires" ) )
if not requires_dist : # The API can return None for this .
return
for requires in requires_dist :
i = pip_shims . shims . InstallRequirement . from_line ( requires )
# See above , we don ' t handle requirements with extras .
if not _marker_contains_extra ( i ) :
yield format_requirement ( i )
if ireq not in DEPENDENCY_CACHE :
try :
reqs = DEPENDENCY_CACHE [ ireq ] = list ( gen ( ireq ) )
except JSONDecodeError :
return
req_iter = iter ( reqs )
else :
req_iter = gen ( ireq )
return set ( req_iter )
|
def _do_subread_set ( flag , input_file , of , negative_filter , aligned ) :
best = { }
cmd = 'samtools view ' + flag + ' ' + input_file
sys . stderr . write ( cmd + "\n" )
p = Popen ( cmd . split ( ) , stdout = PIPE )
z = 0
for line in p . stdout :
z += 1
if z % 10000 == 0 :
sys . stderr . write ( str ( z ) + " subread alignment paths scanned for alignment length\r" )
pbname = PacBioReadName ( _nameprog . match ( line ) . group ( 1 ) )
mol = pbname . get_molecule ( )
if mol in negative_filter :
continue
name = pbname . name ( )
sam = SAM ( line )
c = 0
# aligned base count if we are aligned , subread length if we are not aligned
if aligned :
c = sam . get_aligned_bases_count ( )
else :
c = sam . get_query_length ( )
if mol not in best :
best [ mol ] = [ name , c ]
if c > best [ mol ] [ 1 ] :
best [ mol ] = [ name , c ]
p . communicate ( )
sys . stderr . write ( "\n" )
sys . stderr . write ( "Finished analyzing subread lengths\nWriting aligned subreads\n" )
"""After getting all the best alignment counts we can traverse again
to keep the best"""
|
cmd = 'samtools view ' + flag + ' ' + input_file
sys . stderr . write ( cmd + "\n" )
z = 0
p = Popen ( cmd . split ( ) , stdout = PIPE )
for line in p . stdout :
z += 1
if z % 10000 == 0 :
sys . stderr . write ( str ( z ) + " subreads alignment paths scanned during selected for best\r" )
pbname = PacBioReadName ( _nameprog . match ( line ) . group ( 1 ) )
mol = pbname . get_molecule ( )
name = pbname . name ( )
if mol in negative_filter :
continue
if not best [ mol ] [ 0 ] == name :
continue
of . write ( line )
p . communicate ( )
for mol in best :
negative_filter . add ( mol )
sys . stderr . write ( "\n" )
sys . stderr . write ( "molecules written: " + str ( len ( negative_filter ) ) + "\n" )
return negative_filter
|
def _validate_nodes_with_data ( self , names ) :
"""Validate NodeWithData pseudo - type ."""
|
names = names if isinstance ( names , list ) else [ names ]
if not names :
raise RuntimeError ( "Argument `nodes` is not valid" )
for ndict in names :
if ( not isinstance ( ndict , dict ) ) or ( isinstance ( ndict , dict ) and ( set ( ndict . keys ( ) ) != set ( [ "name" , "data" ] ) ) ) :
raise RuntimeError ( "Argument `nodes` is not valid" )
name = ndict [ "name" ]
if ( not isinstance ( name , str ) ) or ( isinstance ( name , str ) and ( ( " " in name ) or any ( [ element . strip ( ) == "" for element in name . strip ( ) . split ( self . _node_separator ) ] ) ) ) :
raise RuntimeError ( "Argument `nodes` is not valid" )
|
def __telnet_event_listener ( self , ip , callback ) :
"""creates a telnet connection to the lightpad"""
|
tn = telnetlib . Telnet ( ip , 2708 )
self . _last_event = ""
self . _telnet_running = True
while self . _telnet_running :
try :
raw_string = tn . read_until ( b'.\n' , 5 )
if len ( raw_string ) >= 2 and raw_string [ - 2 : ] == b'.\n' : # lightpad sends " . \ n " at the end that we need to chop off
json_string = raw_string . decode ( 'ascii' ) [ 0 : - 2 ]
if json_string != self . _last_event :
callback ( json . loads ( json_string ) )
self . _last_event = json_string
except :
pass
tn . close ( )
|
def hashVariantAnnotation ( cls , gaVariant , gaVariantAnnotation ) :
"""Produces an MD5 hash of the gaVariant and gaVariantAnnotation objects"""
|
treffs = [ treff . id for treff in gaVariantAnnotation . transcript_effects ]
return hashlib . md5 ( "{}\t{}\t{}\t" . format ( gaVariant . reference_bases , tuple ( gaVariant . alternate_bases ) , treffs ) ) . hexdigest ( )
|
def set_multivar ( self , section , option , value = '' ) :
'''This function is unique to the GitConfigParser . It will add another
value for the option if it already exists , converting the option ' s
value to a list if applicable .
If " value " is a list , then any existing values for the specified
section and option will be replaced with the list being passed .'''
|
self . _string_check ( value , allow_list = True )
if not section or section == self . DEFAULTSECT :
sectdict = self . _defaults
else :
try :
sectdict = self . _sections [ section ]
except KeyError :
raise NoSectionError ( # pylint : disable = undefined - variable
salt . utils . stringutils . to_str ( section ) )
key = self . optionxform ( option )
self . _add_option ( sectdict , key , value )
|
def get_items_of_credit_note_per_page ( self , credit_note_id , per_page = 1000 , page = 1 ) :
"""Get items of credit note per page
: param credit _ note _ id : the credit note id
: param per _ page : How many objects per page . Default : 1000
: param page : Which page . Default : 1
: return : list"""
|
return self . _get_resource_per_page ( resource = CREDIT_NOTE_ITEMS , per_page = per_page , page = page , params = { 'credit_note_id' : credit_note_id } , )
|
def set_linestyle ( self , ls ) :
"""Set the line style to be one of"""
|
DEBUG_MSG ( "set_linestyle()" , 1 , self )
self . select ( )
GraphicsContextBase . set_linestyle ( self , ls )
try :
self . _style = GraphicsContextWx . _dashd_wx [ ls ]
except KeyError :
self . _style = wx . LONG_DASH
# Style not used elsewhere . . .
# On MS Windows platform , only line width of 1 allowed for dash lines
if wx . Platform == '__WXMSW__' :
self . set_linewidth ( 1 )
self . _pen . SetStyle ( self . _style )
self . gfx_ctx . SetPen ( self . _pen )
self . unselect ( )
|
def update ( self , gist , content ) :
"""Updates the contents of file hosted inside a gist at GitHub .
: param gist : ( dict ) gist parsed by GitHubTools . _ parse _ gist ( )
: param content : ( str or bytes ) to be written
: return : ( bool ) indicatind the success or failure of the update"""
|
# abort if content is False
if content is False :
return False
# request
url = self . _api_url ( "gists" , gist . get ( "id" ) )
data = { "files" : { self . filename : { "content" : content } } }
self . output ( "Sending contents of {} to {}" . format ( self . file_path , url ) )
response = self . requests . patch ( url , data = dumps ( data ) )
# error
if response . status_code != 200 :
self . oops ( "Could not update " + gist . get ( "description" ) )
self . oops ( "PATCH request returned " + str ( response . status_code ) )
return False
# success
self . yeah ( "Done!" )
self . hey ( "The URL to this Gist is: {}" . format ( gist [ "url" ] ) )
return True
|
def __analizar_errores ( self , ret ) :
"Comprueba y extrae errores si existen en la respuesta XML"
|
errores = [ ]
if 'errores' in ret :
errores . extend ( ret [ 'errores' ] )
if errores :
self . Errores = [ "%(codigo)s: %(descripcion)s" % err [ 'error' ] [ 0 ] for err in errores ]
self . errores = [ { 'codigo' : err [ 'error' ] [ 0 ] [ 'codigo' ] , 'descripcion' : err [ 'error' ] [ 0 ] [ 'descripcion' ] . replace ( "\n" , "" ) . replace ( "\r" , "" ) } for err in errores ]
self . ErrCode = ' ' . join ( self . Errores )
self . ErrMsg = '\n' . join ( self . Errores )
|
def slow_reduction_transfer ( ii , j , idx , count , x , u , v , c ) :
'''Perform the reduction transfer step from the Jonker - Volgenant algorithm
The data is input in a ragged array in terms of " i " structured as a
vector of values for each i , j combination where :
ii - the i to be reduced
j - the j - index of every entry
idx - the index of the first entry for each i
count - the number of entries for each i
x - the assignment of j to i
u - the dual variable " u " which will be updated . It should be
initialized to zero for the first reduction transfer .
v - the dual variable " v " which will be reduced in - place
c - the cost for each entry .
The code described in the paper is :
for each assigned row i do
begin
j1 : = x [ i ] ; u = min { c [ i , j ] - v [ j ] | j = 1 . . n , j ! = j1 } ;
v [ j1 ] : = v [ j1 ] - ( u - u [ i ] ) ;
u [ i ] = u ;
end ;
The authors note that reduction transfer can be applied in later stages
of the algorithm but does not seem to provide a substantial benefit
in speed .'''
|
for i in ii :
j1 = x [ i ]
jj = j [ idx [ i ] : ( idx [ i ] + count [ i ] ) ]
uu = np . min ( ( c [ idx [ i ] : ( idx [ i ] + count [ i ] ) ] - v [ jj ] ) [ jj != j1 ] )
v [ j1 ] = v [ j1 ] - uu + u [ i ]
u [ i ] = uu
|
def play_tone ( self , frequency , duration , delay = 0.0 , volume = 100 , play_type = PLAY_WAIT_FOR_COMPLETE ) :
"""Play a single tone , specified by its frequency , duration , volume and final delay .
: param int frequency : the tone frequency , in Hertz
: param float duration : Tone duration , in seconds
: param float delay : Delay after tone , in seconds ( can be useful when chaining calls to ` ` play _ tone ` ` )
: param int volume : The play volume , in percent of maximum volume
: param play _ type : The behavior of ` ` play _ tone ` ` once playback has been initiated
: type play _ type : ` ` Sound . PLAY _ WAIT _ FOR _ COMPLETE ` ` , ` ` Sound . PLAY _ NO _ WAIT _ FOR _ COMPLETE ` ` or ` ` Sound . PLAY _ LOOP ` `
: return : When ` ` Sound . PLAY _ NO _ WAIT _ FOR _ COMPLETE ` ` is specified , returns the PID of the underlying beep command ; ` ` None ` ` otherwise
: raises ValueError : if invalid parameter"""
|
self . _validate_play_type ( play_type )
if duration <= 0 :
raise ValueError ( 'invalid duration (%s)' % duration )
if delay < 0 :
raise ValueError ( 'invalid delay (%s)' % delay )
if not 0 < volume <= 100 :
raise ValueError ( 'invalid volume (%s)' % volume )
self . set_volume ( volume )
duration_ms = int ( duration * 1000 )
delay_ms = int ( delay * 1000 )
self . tone ( [ ( frequency , duration_ms , delay_ms ) ] , play_type = play_type )
|
def _exit_session ( self ) :
"""Exits session to Hetzner account and returns ."""
|
api = self . api [ self . account ]
response = self . _get ( api [ 'exit' ] [ 'GET' ] [ 'url' ] )
if not Provider . _filter_dom ( response . text , api [ 'filter' ] ) :
LOGGER . info ( 'Hetzner => Exit session' )
else :
LOGGER . warning ( 'Hetzner => Unable to exit session' )
self . session = None
return True
|
def match_var ( self , tokens , item ) :
"""Matches a variable ."""
|
setvar , = tokens
if setvar != wildcard :
if setvar in self . names :
self . add_check ( self . names [ setvar ] + " == " + item )
else :
self . add_def ( setvar + " = " + item )
self . names [ setvar ] = item
|
def subprocess_manager ( self , exec_args ) :
'''Bro subprocess manager'''
|
try :
sp = gevent . subprocess . Popen ( exec_args , stdout = gevent . subprocess . PIPE , stderr = gevent . subprocess . PIPE )
except OSError :
raise RuntimeError ( 'Could not run bro executable (either not installed or not in path): %s' % ( exec_args ) )
out , err = sp . communicate ( )
if out :
print 'standard output of subprocess: %s' % out
if err :
raise RuntimeError ( '%s\npcap_bro had output on stderr: %s' % ( exec_args , err ) )
if sp . returncode :
raise RuntimeError ( '%s\npcap_bro had returncode: %d' % ( exec_args , sp . returncode ) )
|
def filter_featured_apps ( admin_apps , request ) :
"""Given a list of apps return a set of pseudo - apps considered featured .
Apps are considered featured if the are defined in the settings
property called ` DASHBOARD _ FEATURED _ APPS ` which contains a list of the apps
that are considered to be featured .
: param admin _ apps : A list of apps .
: param request : Django request .
: return : Subset of app like objects that are listed in
the settings ` DASHBOARD _ FEATURED _ APPS ` setting ."""
|
featured_apps = [ ]
# Build the featured apps list based upon settings .
for orig_app_spec in appsettings . DASHBOARD_FEATURED_APPS : # make a copy that we can write to , to fix deprecations without
# changing settings
app_spec = orig_app_spec . copy ( )
if "verbose_name" in app_spec :
warnings . warn ( "DASHBOARD_FEATURED_APPS[]['verbose_name'] = '%s' is deprecated. " "Use 'name' instead)" % app_spec [ 'verbose_name' ] , DeprecationWarning , stacklevel = 2 )
app_spec [ 'name' ] = app_spec [ 'verbose_name' ]
if hasattr ( app_spec [ 'models' ] , 'items' ) :
warnings . warn ( "DASHBOARD_FEATURED_APPS[]['models'] for '%s' should now be a " "list of tuples, not a dict." % app_spec [ 'name' ] , DeprecationWarning , stacklevel = 2 )
app_spec [ 'models' ] = app_spec [ 'models' ] . items ( )
# lookup the models from the names
app_spec [ 'models' ] = _build_app_models ( request , admin_apps , app_spec [ 'models' ] )
# Only add the panel if at least one model is listed .
if app_spec [ 'models' ] :
featured_apps . append ( app_spec )
return featured_apps
|
def parse_authn_request_response ( self , xmlstr , binding , outstanding = None , outstanding_certs = None , conv_info = None ) :
"""Deal with an AuthnResponse
: param xmlstr : The reply as a xml string
: param binding : Which binding that was used for the transport
: param outstanding : A dictionary with session IDs as keys and
the original web request from the user before redirection
as values .
: param outstanding _ certs :
: param conv _ info : Information about the conversation .
: return : An response . AuthnResponse or None"""
|
if not getattr ( self . config , 'entityid' , None ) :
raise SAMLError ( "Missing entity_id specification" )
if not xmlstr :
return None
kwargs = { "outstanding_queries" : outstanding , "outstanding_certs" : outstanding_certs , "allow_unsolicited" : self . allow_unsolicited , "want_assertions_signed" : self . want_assertions_signed , "want_assertions_or_response_signed" : self . want_assertions_or_response_signed , "want_response_signed" : self . want_response_signed , "return_addrs" : self . service_urls ( binding = binding ) , "entity_id" : self . config . entityid , "attribute_converters" : self . config . attribute_converters , "allow_unknown_attributes" : self . config . allow_unknown_attributes , 'conv_info' : conv_info }
try :
resp = self . _parse_response ( xmlstr , AuthnResponse , "assertion_consumer_service" , binding , ** kwargs )
except StatusError as err :
logger . error ( "SAML status error: %s" , err )
raise
except UnravelError :
return None
except Exception as err :
logger . error ( "XML parse error: %s" , err )
raise
if not isinstance ( resp , AuthnResponse ) :
logger . error ( "Response type not supported: %s" , saml2 . class_name ( resp ) )
return None
if ( resp . assertion and len ( resp . response . encrypted_assertion ) == 0 and resp . assertion . subject . name_id ) :
self . users . add_information_about_person ( resp . session_info ( ) )
logger . info ( "--- ADDED person info ----" )
return resp
|
def new_wins ( self , orig_criteria , orig_idx , new_criteria , new_idx ) :
"""Returns a bool indicating whether a new adversarial example is better
than the pre - existing one for the same clean example .
: param orig _ criteria : dict mapping names of criteria to their value
for each example in the whole dataset
: param orig _ idx : The position of the pre - existing example within the
whole dataset .
: param new _ criteria : dict , like orig _ criteria , but with values only
on the latest batch of adversarial examples
: param new _ idx : The position of the new adversarial example within
the batch"""
|
raise NotImplementedError ( str ( type ( self ) ) + " needs to implement new_wins." )
|
def show_data_file ( fname ) :
"""shows a data file in CSV format - all files live in CORE folder"""
|
txt = '<H2>' + fname + '</H2>'
print ( fname )
# try :
txt += web . read_csv_to_html_table ( fname , 'Y' )
# it is ok to use a table for actual table data
# except :
# txt + = ' < H2 > ERROR - cant read file < / H2 > '
# txt + = web . read _ csv _ to _ html _ list ( fname ) # only use this for single column lists
txt += '</div>\n'
return txt
|
def create_video ( video_data ) :
"""Called on to create Video objects in the database
create _ video is used to create Video objects whose children are EncodedVideo
objects which are linked to Profile objects . This is an alternative to the HTTP
requests so it can be used internally . The VideoSerializer is used to
deserialize this object . If there are duplicate profile _ names , the entire
creation will be rejected . If the profile is not found in the database , the
video will not be created .
Args :
video _ data ( dict ) :
url : api url to the video
edx _ video _ id : ID of the video
duration : Length of video in seconds
client _ video _ id : client ID of video
encoded _ video : a list of EncodedVideo dicts
url : url of the video
file _ size : size of the video in bytes
profile : ID of the profile
courses : Courses associated with this video
image : poster image file name for a particular course
Raises :
Raises ValCannotCreateError if the video cannot be created .
Returns the successfully created Video object"""
|
serializer = VideoSerializer ( data = video_data )
if serializer . is_valid ( ) :
serializer . save ( )
return video_data . get ( "edx_video_id" )
else :
raise ValCannotCreateError ( serializer . errors )
|
def generate_command ( command = None , package = None , path = "~" , topic = "mycommands" ) :
"""the command will generate the package and code for a sample cmd3 module .
: param command : the name of the command
: param package : name of the new package . Often this will be cloudmesh _ COMMAND
which will be used if not specified .
: param path : path where to place the directory
: param topic : the topic listed in cm
: return :"""
|
if command is None :
Console . error ( "command not specified" )
return
if topic is None :
topic = "mycommands"
if path is None :
path = "."
path = path_expand ( path )
if package is None :
package = "cloudmesh_" + command
data = { 'command' : command , 'package' : package , 'path' : path , 'topic' : topic , 'dir' : path_expand ( '~/.cloudmesh' ) }
data [ "destination" ] = "{path}/{package}" . format ( ** data )
print ( data )
if os . path . exists ( "" . format ( ** data ) ) :
Console . error ( "The directory {destination} already exists." . format ( ** data ) )
return
banner ( "Generating Cloudmesh Command" )
print "Command:" , data [ 'command' ]
print "Package:" , data [ 'package' ]
banner ( "Setup Directory with Package and Command" )
# mv { path } / { package } / shell _ plugins . py . in { path } / { package } / shell _ plugins . py
script = """
rm -rf {destination}
cp -rf {dir}/etc/cmd3_template {destination}
mv {destination}/cmd3_template {destination}/{package}
mv {destination}/setup.py.in {destination}/setup.py
mv {destination}/{package}/command_command.py.in {destination}/{package}/command_{command}.py
mv {destination}/{package}/plugins/cm_shell_command.py.in {destination}/{package}/plugins/cm_shell_{command}.py
rm -rf {destination}/command_command.py.in
rm -rf {destination}/plugins
""" . format ( ** data )
for line in script . split ( "\n" ) :
line = line . strip ( )
if line != "" :
print line
os . system ( line )
banner ( "replacing command and package name in template files" )
# { path } / { package } / shell _ plugins . py
files = """
{path}/{package}/Makefile
{path}/{package}/{package}/plugins/cm_shell_{command}.py
{path}/{package}/{package}/command_{command}.py
{path}/{package}/setup.py
""" . format ( ** data )
for filename in files . split ( "\n" ) :
filename = filename . strip ( )
if filename != "" :
replace_string ( filename , data )
banner ( "Command code created." )
|
def p_expression_noteql ( self , p ) :
'expression : expression NEL expression'
|
p [ 0 ] = NotEql ( p [ 1 ] , p [ 3 ] , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) )
|
def get_contents_debug_adapter_protocol ( self , lst , fmt = None ) :
'''This method is to be used in the case where the variables are all saved by its id ( and as
such don ' t need to have the ` resolve ` method called later on , so , keys don ' t need to
embed the reference in the key ) .
Note that the return should be ordered .
: return list ( tuple ( name : str , value : object , evaluateName : str ) )'''
|
l = len ( lst )
ret = [ ]
format_str = '%0' + str ( int ( len ( str ( l - 1 ) ) ) ) + 'd'
if fmt is not None and fmt . get ( 'hex' , False ) :
format_str = '0x%0' + str ( int ( len ( hex ( l ) . lstrip ( '0x' ) ) ) ) + 'x'
for i , item in enumerate ( lst ) :
ret . append ( ( format_str % i , item , '[%s]' % i ) )
if i > MAX_ITEMS_TO_HANDLE :
ret . append ( ( TOO_LARGE_ATTR , TOO_LARGE_MSG , None ) )
break
ret . append ( ( '__len__' , len ( lst ) , partial ( _apply_evaluate_name , evaluate_name = 'len(%s)' ) ) )
# Needed in case the class extends the built - in type and has some additional fields .
from_default_resolver = defaultResolver . get_contents_debug_adapter_protocol ( lst , fmt = fmt )
if from_default_resolver :
ret = from_default_resolver + ret
return ret
|
def _transform_local_field_to_expression ( expression , node , context ) :
"""Transform a LocalField compiler expression into its SQLAlchemy expression representation .
Args :
expression : expression , LocalField compiler expression .
node : SqlNode , the SqlNode the expression applies to .
context : CompilationContext , global compilation state and metadata .
Returns :
Expression , SQLAlchemy expression ."""
|
column_name = expression . field_name
column = sql_context_helpers . get_column ( column_name , node , context )
return column
|
def zone_create_or_update ( name , resource_group , ** kwargs ) :
'''. . versionadded : : Fluorine
Creates or updates a DNS zone . Does not modify DNS records within the zone .
: param name : The name of the DNS zone to create ( without a terminating dot ) .
: param resource _ group : The name of the resource group .
CLI Example :
. . code - block : : bash
salt - call azurearm _ dns . zone _ create _ or _ update myzone testgroup'''
|
# DNS zones are global objects
kwargs [ 'location' ] = 'global'
dnsconn = __utils__ [ 'azurearm.get_client' ] ( 'dns' , ** kwargs )
# Convert list of ID strings to list of dictionaries with id key .
if isinstance ( kwargs . get ( 'registration_virtual_networks' ) , list ) :
kwargs [ 'registration_virtual_networks' ] = [ { 'id' : vnet } for vnet in kwargs [ 'registration_virtual_networks' ] ]
if isinstance ( kwargs . get ( 'resolution_virtual_networks' ) , list ) :
kwargs [ 'resolution_virtual_networks' ] = [ { 'id' : vnet } for vnet in kwargs [ 'resolution_virtual_networks' ] ]
try :
zone_model = __utils__ [ 'azurearm.create_object_model' ] ( 'dns' , 'Zone' , ** kwargs )
except TypeError as exc :
result = { 'error' : 'The object model could not be built. ({0})' . format ( str ( exc ) ) }
return result
try :
zone = dnsconn . zones . create_or_update ( zone_name = name , resource_group_name = resource_group , parameters = zone_model , if_match = kwargs . get ( 'if_match' ) , if_none_match = kwargs . get ( 'if_none_match' ) )
result = zone . as_dict ( )
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'dns' , str ( exc ) , ** kwargs )
result = { 'error' : str ( exc ) }
except SerializationError as exc :
result = { 'error' : 'The object model could not be parsed. ({0})' . format ( str ( exc ) ) }
return result
|
def getLogger ( name = None ) :
"""Return a logger with the specified name , creating it if necessary .
If no name is specified , return the root logger ."""
|
if name :
logger = SLogger . manager . getLogger ( name )
return logger
else :
return rootLogger
|
def track_time ( self , name , description = '' , max_rows = None ) :
"""Create a Timer object in the Tracker ."""
|
if name in self . _tables :
raise TableConflictError ( name )
if max_rows is None :
max_rows = AnonymousUsageTracker . MAX_ROWS_PER_TABLE
self . register_table ( name , self . uuid , 'Timer' , description )
self . _tables [ name ] = Timer ( name , self , max_rows = max_rows )
|
def _complete_url ( self , url_part , registration_prefix ) :
"""This method is used to defer the construction of the final url in
the case that the Api is created with a Blueprint .
: param url _ part : The part of the url the endpoint is registered with
: param registration _ prefix : The part of the url contributed by the
blueprint . Generally speaking , BlueprintSetupState . url _ prefix"""
|
parts = { 'b' : registration_prefix , 'a' : self . prefix , 'e' : url_part }
return '' . join ( parts [ key ] for key in self . url_part_order if parts [ key ] )
|
def op ( name , images , max_outputs = 3 , display_name = None , description = None , collections = None ) :
"""Create a legacy image summary op for use in a TensorFlow graph .
Arguments :
name : A unique name for the generated summary node .
images : A ` Tensor ` representing pixel data with shape ` [ k , h , w , c ] ` ,
where ` k ` is the number of images , ` h ` and ` w ` are the height and
width of the images , and ` c ` is the number of channels , which
should be 1 , 3 , or 4 . Any of the dimensions may be statically
unknown ( i . e . , ` None ` ) .
max _ outputs : Optional ` int ` or rank - 0 integer ` Tensor ` . At most this
many images will be emitted at each step . When more than
` max _ outputs ` many images are provided , the first ` max _ outputs ` many
images will be used and the rest silently discarded .
display _ name : Optional name for this summary in TensorBoard , as a
constant ` str ` . Defaults to ` name ` .
description : Optional long - form description for this summary , as a
constant ` str ` . Markdown is supported . Defaults to empty .
collections : Optional list of graph collections keys . The new
summary op is added to these collections . Defaults to
` [ Graph Keys . SUMMARIES ] ` .
Returns :
A TensorFlow summary op ."""
|
# TODO ( nickfelt ) : remove on - demand imports once dep situation is fixed .
import tensorflow . compat . v1 as tf
if display_name is None :
display_name = name
summary_metadata = metadata . create_summary_metadata ( display_name = display_name , description = description )
with tf . name_scope ( name ) , tf . control_dependencies ( [ tf . assert_rank ( images , 4 ) , tf . assert_type ( images , tf . uint8 ) , tf . assert_non_negative ( max_outputs ) ] ) :
limited_images = images [ : max_outputs ]
encoded_images = tf . map_fn ( tf . image . encode_png , limited_images , dtype = tf . string , name = 'encode_each_image' )
image_shape = tf . shape ( input = images )
dimensions = tf . stack ( [ tf . as_string ( image_shape [ 2 ] , name = 'width' ) , tf . as_string ( image_shape [ 1 ] , name = 'height' ) ] , name = 'dimensions' )
tensor = tf . concat ( [ dimensions , encoded_images ] , axis = 0 )
return tf . summary . tensor_summary ( name = 'image_summary' , tensor = tensor , collections = collections , summary_metadata = summary_metadata )
|
def get_queryset ( self ) :
"""The queryset is over - ridden to show only plug events in which the strain matches the breeding strain ."""
|
self . strain = get_object_or_404 ( Strain , Strain_slug__iexact = self . kwargs [ 'slug' ] )
return PlugEvents . objects . filter ( Breeding__Strain = self . strain )
|
def camel_case_to_snake_case ( name ) :
"""HelloWorld - > hello _ world"""
|
s1 = _FIRST_CAP_RE . sub ( r'\1_\2' , name )
return _ALL_CAP_RE . sub ( r'\1_\2' , s1 ) . lower ( )
|
def load_configuration_file ( self ) :
"""Load all configuration from file"""
|
if not os . path . exists ( self . config_file ) :
return
try :
with open ( self . config_file , 'r' ) as file :
csvreader = csv . reader ( file , delimiter = '=' , escapechar = '\\' , quoting = csv . QUOTE_NONE )
for line in csvreader :
if len ( line ) == 2 :
key , value = line
self . config_dict [ key ] = value
else :
self . config_dict = dict ( )
self . logger . warning ( "Malformed configuration file {0}, ignoring it." . format ( self . config_file ) )
return
except ( OSError , IOError ) as e :
self . logger . warning ( "Could not load configuration file: {0}" . format ( utils . exc_as_decoded_string ( e ) ) )
|
def create_term ( self , lemma , pos , morphofeat , tokens , id = None ) :
"""Create a new term and add it to the term layer
@ type lemma : string
@ param lemma : The lemma of the term
@ type pos : string
@ param pos : The postrag ( rst letter ) of the POS attribute
@ type morphofeat : string
@ param morphofeat : The morphofeat ( full morphological features ) of the term
@ type tokens : sequence of L { Cwf }
@ param tokens : the token ( s ) that this term describes
@ type id : string
@ param id : the id of the term , if not given an id tXXX will be created"""
|
if id is None :
n = 1 if self . term_layer is None else len ( self . term_layer . idx ) + 1
id = "t{n}" . format ( ** locals ( ) )
new_term = Cterm ( type = self . type )
new_term . set_id ( id )
new_term . set_lemma ( lemma )
new_term . set_pos ( pos )
new_term . set_morphofeat ( morphofeat )
new_span = Cspan ( )
for token in tokens :
new_span . add_target_id ( token . get_id ( ) )
new_term . set_span ( new_span )
self . add_term ( new_term )
return new_term
|
def remove_vertex ( self , vertex ) :
"""Remove vertex from G"""
|
try :
self . vertices . pop ( vertex )
self . succ . pop ( vertex )
except KeyError :
raise GraphInsertError ( "Vertex %s doesn't exist." % ( vertex , ) )
if vertex in self . nodes :
self . nodes . pop ( vertex )
for element in self . vertices :
if vertex in self . vertices [ element ] :
self . vertices [ element ] . remove ( vertex )
edges = [ ]
# List for edges that include vertex
for element in self . edges :
if vertex in element :
edges . append ( element )
for element in edges :
del self . edges [ element ]
for element in self . pred :
if vertex in self . pred [ element ] :
self . pred [ element ] . remove ( vertex )
for element in self . succ :
if vertex in self . succ [ element ] :
self . succ [ element ] . remove ( vertex )
|
def __send_message ( self , operation ) :
"""Send a query or getmore operation and handles the response .
If operation is ` ` None ` ` this is an exhaust cursor , which reads
the next result batch off the exhaust socket instead of
sending getMore messages to the server .
Can raise ConnectionFailure ."""
|
client = self . __collection . database . client
try :
response = client . _run_operation_with_response ( operation , self . _unpack_response , exhaust = self . __exhaust , address = self . __address )
except OperationFailure :
self . __killed = True
# Make sure exhaust socket is returned immediately , if necessary .
self . __die ( )
# If this is a tailable cursor the error is likely
# due to capped collection roll over . Setting
# self . _ _ killed to True ensures Cursor . alive will be
# False . No need to re - raise .
if self . __query_flags & _QUERY_OPTIONS [ "tailable_cursor" ] :
return
raise
except NotMasterError : # Don ' t send kill cursors to another server after a " not master "
# error . It ' s completely pointless .
self . __killed = True
# Make sure exhaust socket is returned immediately , if necessary .
self . __die ( )
raise
except ConnectionFailure : # Don ' t try to send kill cursors on another socket
# or to another server . It can cause a _ pinValue
# assertion on some server releases if we get here
# due to a socket timeout .
self . __killed = True
self . __die ( )
raise
except Exception : # Close the cursor
self . __die ( )
raise
self . __address = response . address
if self . __exhaust and not self . __exhaust_mgr : # ' response ' is an ExhaustResponse .
self . __exhaust_mgr = _SocketManager ( response . socket_info , response . pool )
cmd_name = operation . name
docs = response . docs
if response . from_command :
if cmd_name != "explain" :
cursor = docs [ 0 ] [ 'cursor' ]
self . __id = cursor [ 'id' ]
if cmd_name == 'find' :
documents = cursor [ 'firstBatch' ]
else :
documents = cursor [ 'nextBatch' ]
self . __data = deque ( documents )
self . __retrieved += len ( documents )
else :
self . __id = 0
self . __data = deque ( docs )
self . __retrieved += len ( docs )
else :
self . __id = response . data . cursor_id
self . __data = deque ( docs )
self . __retrieved += response . data . number_returned
if self . __id == 0 :
self . __killed = True
# Don ' t wait for garbage collection to call _ _ del _ _ , return the
# socket and the session to the pool now .
self . __die ( )
if self . __limit and self . __id and self . __limit <= self . __retrieved :
self . __die ( )
|
def session_check_name ( session_name ) :
"""Raises exception session name invalid , modeled after tmux function .
tmux ( 1 ) session names may not be empty , or include periods or colons .
These delimiters are reserved for noting session , window and pane .
Parameters
session _ name : str
Name of session .
Raises
: exc : ` exc . BadSessionName `
Invalid session name ."""
|
if not session_name or len ( session_name ) == 0 :
raise exc . BadSessionName ( "tmux session names may not be empty." )
elif '.' in session_name :
raise exc . BadSessionName ( "tmux session name \"%s\" may not contain periods." , session_name )
elif ':' in session_name :
raise exc . BadSessionName ( "tmux session name \"%s\" may not contain colons." , session_name )
|
def check_known_inconsistencies ( bill_data , bond_data ) :
"""There are a couple quirks in the data provided by Bank of Canada .
Check that no new quirks have been introduced in the latest download ."""
|
inconsistent_dates = bill_data . index . sym_diff ( bond_data . index )
known_inconsistencies = [ # bill _ data has an entry for 2010-02-15 , which bond _ data doesn ' t .
# bond _ data has an entry for 2006-09-04 , which bill _ data doesn ' t .
# Both of these dates are bank holidays ( Flag Day and Labor Day ,
# respectively ) .
pd . Timestamp ( '2006-09-04' , tz = 'UTC' ) , pd . Timestamp ( '2010-02-15' , tz = 'UTC' ) , # 2013-07-25 comes back as " Not available " from the bills endpoint .
# This date doesn ' t seem to be a bank holiday , but the previous
# calendar implementation dropped this entry , so we drop it as well .
# If someone cares deeply about the integrity of the Canadian trading
# calendar , they may want to consider forward - filling here rather than
# dropping the row .
pd . Timestamp ( '2013-07-25' , tz = 'UTC' ) , ]
unexpected_inconsistences = inconsistent_dates . drop ( known_inconsistencies )
if len ( unexpected_inconsistences ) :
in_bills = bill_data . index . difference ( bond_data . index ) . difference ( known_inconsistencies )
in_bonds = bond_data . index . difference ( bill_data . index ) . difference ( known_inconsistencies )
raise ValueError ( "Inconsistent dates for Canadian treasury bills vs bonds. \n" "Dates with bills but not bonds: {in_bills}.\n" "Dates with bonds but not bills: {in_bonds}." . format ( in_bills = in_bills , in_bonds = in_bonds , ) )
|
def cluster_assignments ( self ) :
"""Return an array of cluster assignments corresponding to the most recent set of instances clustered .
: return : the cluster assignments
: rtype : ndarray"""
|
array = javabridge . call ( self . jobject , "getClusterAssignments" , "()[D" )
if array is None :
return None
else :
return javabridge . get_env ( ) . get_double_array_elements ( array )
|
def get_square_axes_limits ( coords , margin = 0.05 ) :
"""Return N - dimensional square ' s limits
# # Arguments
# ' coords ' : list of coordinates of poins to be plotted
# ' margin ' : margin to be added from boundaries of the square .
- ' margin ' can be negative if one wants to reduce the square size .
# # Example
if ' coords ' was given as [ x , y , z ] ,
then the resulting square ' s limits are given by :
( xlim , ylim , zlim )
where ,
xlim = = ( x _ mid - max _ width , x _ mid + max _ width )
ylim = = ( y _ mid - max _ width , y _ mid + max _ width )
zlim = = ( z _ mid - max _ width , z _ mid + max _ width )
x _ mid = 0.5 * ( min ( x ) + max ( x ) ) ( and so on )
max _ width = max ( [ x _ width , y _ width , z _ width ] )
where x _ width = 0.5 * ( max ( x ) - min ( x ) ) ( and so on )"""
|
# coords = [ x , y , z ]
try :
coords = [ np . array ( coord ) for coord in coords ]
except :
raise Exception ( "Failed to convert elements of 'coords' into numpy.array" )
lims = [ ( coord . min ( ) , coord . max ( ) ) for coord in coords ]
mids = [ 0.5 * ( lim [ 0 ] + lim [ 1 ] ) for lim in lims ]
widths = [ 0.5 * ( lim [ 1 ] - lim [ 0 ] ) for lim in lims ]
max_width = max ( widths )
max_width += max_width * margin
ax_lims = tuple ( ( mid - max_width , mid + max_width ) for mid in mids )
# xlim , ylim , zlim = ax _ lims
return ax_lims
|
def format_item ( self , item , defaults = None , stencil = None ) :
"""Format an item ."""
|
from pyrobase . osutil import shell_escape
try :
item_text = fmt . to_console ( formatting . format_item ( self . options . output_format , item , defaults ) )
except ( NameError , ValueError , TypeError ) , exc :
self . fatal ( "Trouble with formatting item %r\n\n FORMAT = %r\n\n REASON =" % ( item , self . options . output_format ) , exc )
raise
# in - - debug mode
if self . options . shell :
item_text = '\t' . join ( shell_escape ( i ) for i in item_text . split ( '\t' ) )
# Justify headers according to stencil
if stencil :
item_text = '\t' . join ( i . ljust ( len ( s ) ) for i , s in zip ( item_text . split ( '\t' ) , stencil ) )
return item_text
|
def version ( self , value ) :
"""Setter for * * self . _ _ version * * attribute .
: param value : Attribute value .
: type value : unicode"""
|
if value is not None :
assert type ( value ) is unicode , "'{0}' attribute: '{1}' type is not 'unicode'!" . format ( "version" , value )
self . __version = value
|
def nla_reserve ( msg , attrtype , attrlen ) :
"""Reserve space for an attribute .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / attr . c # L456
Reserves room for an attribute in the specified Netlink message and fills in the attribute header ( type , length ) .
Returns None if there is insufficient space for the attribute .
Any padding between payload and the start of the next attribute is zeroed out .
Positional arguments :
msg - - Netlink message ( nl _ msg class instance ) .
attrtype - - attribute type ( integer ) .
attrlen - - length of payload ( integer ) .
Returns :
nlattr class instance allocated to the new space or None on failure ."""
|
tlen = NLMSG_ALIGN ( msg . nm_nlh . nlmsg_len ) + nla_total_size ( attrlen )
if tlen > msg . nm_size :
return None
nla = nlattr ( nlmsg_tail ( msg . nm_nlh ) )
nla . nla_type = attrtype
nla . nla_len = nla_attr_size ( attrlen )
if attrlen :
padlen = nla_padlen ( attrlen )
nla . bytearray [ nla . nla_len : nla . nla_len + padlen ] = bytearray ( b'\0' ) * padlen
msg . nm_nlh . nlmsg_len = tlen
_LOGGER . debug ( 'msg 0x%x: attr <0x%x> %d: Reserved %d (%d) bytes at offset +%d nlmsg_len=%d' , id ( msg ) , id ( nla ) , nla . nla_type , nla_total_size ( attrlen ) , attrlen , nla . bytearray . slice . start - nlmsg_data ( msg . nm_nlh ) . slice . start , msg . nm_nlh . nlmsg_len )
return nla
|
def update_model ( self ) :
'''a method to update model with latest training data
: return : True'''
|
import requests
url = self . endpoint_public + '/calculate'
params = { 'group' : self . group_name }
response = requests . get ( url , params = params )
response_details = response . json ( )
return response_details [ 'success' ]
|
def append_data ( self , len_tag , val_tag , data , header = False ) :
"""Append raw data , possibly including a embedded SOH .
: param len _ tag : Tag number for length field .
: param val _ tag : Tag number for value field .
: param data : Raw data byte string .
: param header : Append to header if True ; default to body .
Appends two pairs : a length pair , followed by a data pair ,
containing the raw data supplied . Example fields that should
use this method include : 95/96 , 212/213 , 354/355 , etc ."""
|
self . append_pair ( len_tag , len ( data ) , header = header )
self . append_pair ( val_tag , data , header = header )
return
|
def liouvillian ( H , Ls = None ) :
r"""Return the Liouvillian super - operator associated with ` H ` and ` Ls `
The Liouvillian : math : ` \ mathcal { L } ` generates the Markovian - dynamics of a
system via the Master equation :
. . math : :
\ dot { \ rho } = \ mathcal { L } \ rho
= - i [ H , \ rho ] + \ sum _ { j = 1 } ^ n \ mathcal { D } [ L _ j ] \ rho
Args :
H ( Operator ) : The associated Hamilton operator
Ls ( sequence or Matrix ) : A sequence of Lindblad operators .
Returns :
SuperOperator : The Liouvillian super - operator ."""
|
if Ls is None :
Ls = [ ]
elif isinstance ( Ls , Matrix ) :
Ls = Ls . matrix . ravel ( ) . tolist ( )
summands = [ - I * commutator ( H ) , ]
summands . extend ( [ lindblad ( L ) for L in Ls ] )
return SuperOperatorPlus . create ( * summands )
|
def clusterStatus ( self ) :
"""Returns a dict of cluster nodes and their status information"""
|
servers = yield self . getClusterServers ( )
d = { 'workers' : { } , 'crons' : { } , 'queues' : { } }
now = time . time ( )
reverse_map = { }
for sname in servers :
last = yield self . get ( 'rhumba.server.%s.heartbeat' % sname )
status = yield self . get ( 'rhumba.server.%s.status' % sname )
uuid = yield self . get ( 'rhumba.server.%s.uuid' % sname )
reverse_map [ uuid ] = sname
if not last :
last = 0
last = float ( last )
if ( status == 'ready' ) and ( now - last > 5 ) :
status = 'offline'
if not sname in d [ 'workers' ] :
d [ 'workers' ] [ sname ] = [ ]
d [ 'workers' ] [ sname ] . append ( { 'lastseen' : last , 'status' : status , 'id' : uuid } )
# Crons
crons = yield self . keys ( 'rhumba\.crons\.*' )
for key in crons :
segments = key . split ( '.' )
queue = segments [ 2 ]
if queue not in d [ 'crons' ] :
d [ 'crons' ] [ queue ] = { 'methods' : { } }
if len ( segments ) == 4 :
last = yield self . get ( key )
d [ 'crons' ] [ queue ] [ 'methods' ] [ segments [ 3 ] ] = float ( last )
else :
uid = yield self . get ( key )
d [ 'crons' ] [ queue ] [ 'master' ] = '%s:%s' % ( uid , reverse_map [ uid ] )
# Queues
queue_keys = yield self . keys ( 'rhumba.qstats.*' )
for key in queue_keys :
qname = key . split ( '.' ) [ 2 ]
if qname not in d [ 'queues' ] :
qlen = yield self . queueSize ( qname )
stats = yield self . getQueueMessageStats ( qname )
d [ 'queues' ] [ qname ] = { 'waiting' : qlen , 'messages' : stats }
defer . returnValue ( d )
|
def log10 ( x ) :
"""Base - 10 logarithm"""
|
if isinstance ( x , UncertainFunction ) :
mcpts = np . log10 ( x . _mcpts )
return UncertainFunction ( mcpts )
else :
return np . log10 ( x )
|
def add_minrmsd_to_ref ( self , ref , ref_frame = 0 , atom_indices = None , precentered = False ) :
r"""Adds the minimum root - mean - square - deviation ( minrmsd ) with respect to a reference structure to the feature list .
Parameters
ref :
Reference structure for computing the minrmsd . Can be of two types :
1 . : py : obj : ` mdtraj . Trajectory ` object
2 . filename for mdtraj to load . In this case , only the : py : obj : ` ref _ frame ` of that file will be used .
ref _ frame : integer , default = 0
Reference frame of the filename specified in : py : obj : ` ref ` .
This parameter has no effect if : py : obj : ` ref ` is not a filename .
atom _ indices : array _ like , default = None
Atoms that will be used for :
1 . aligning the target and reference geometries .
2 . computing rmsd after the alignment .
If left to None , all atoms of : py : obj : ` ref ` will be used .
precentered : bool , default = False
Use this boolean at your own risk to let mdtraj know that the target conformations are already
centered at the origin , i . e . , their ( uniformly weighted ) center of mass lies at the origin .
This will speed up the computation of the rmsd ."""
|
from . misc import MinRmsdFeature
f = MinRmsdFeature ( ref , ref_frame = ref_frame , atom_indices = atom_indices , topology = self . topology , precentered = precentered )
self . __add_feature ( f )
|
def read_string ( cls , string ) :
"""Decodes a given bencoded string or bytestring .
Returns decoded structure ( s ) .
: param str string :
: rtype : list"""
|
if PY3 and not isinstance ( string , byte_types ) :
string = string . encode ( )
return cls . decode ( string )
|
def ekopw ( fname ) :
"""Open an existing E - kernel file for writing .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / ekopw _ c . html
: param fname : Name of EK file .
: type fname : str
: return : Handle attached to EK file .
: rtype : int"""
|
fname = stypes . stringToCharP ( fname )
handle = ctypes . c_int ( )
libspice . ekopw_c ( fname , ctypes . byref ( handle ) )
return handle . value
|
def sort_file_tabs_alphabetically ( self ) :
"""Sort open tabs alphabetically ."""
|
while self . sorted ( ) is False :
for i in range ( 0 , self . tabs . tabBar ( ) . count ( ) ) :
if ( self . tabs . tabBar ( ) . tabText ( i ) > self . tabs . tabBar ( ) . tabText ( i + 1 ) ) :
self . tabs . tabBar ( ) . moveTab ( i , i + 1 )
|
def intersection ( a , b , scale = 1 ) :
'''Intersection between two segments .'''
|
try :
a1 , a2 = a
except TypeError :
a1 = a . start
a2 = a . stop
try :
b1 , b2 = b
except TypeError :
b1 = b . start
b2 = b . stop
if a2 <= b1 :
return None
if a1 >= b2 :
return None
# a2 > b1 and a1 < b2
if a2 <= b2 :
if a1 <= b1 :
return slice ( b1 * scale , a2 * scale )
else :
return slice ( a1 * scale , a2 * scale )
else :
if a1 <= b1 :
return slice ( b1 * scale , b2 * scale )
else :
return slice ( a1 * scale , b2 * scale )
|
def get_parameter_action ( action ) :
"""To foster a general schema that can accomodate multiple parsers , the general behavior here is described
rather than the specific language of a given parser . For instance , the ' append ' action of an argument
is collapsing each argument given to a single argument . It also returns a set of actions as well , since
presumably some actions can impact multiple parameter options"""
|
actions = set ( )
if isinstance ( action , argparse . _AppendAction ) :
actions . add ( SPECIFY_EVERY_PARAM )
return actions
|
def extract_log_level_from_environment ( k , default ) :
"""Gets the log level from the environment variable ."""
|
return LOG_LEVELS . get ( os . environ . get ( k ) ) or int ( os . environ . get ( k , default ) )
|
def validate_instance ( instance ) :
"""Validating if the instance should be logged , or is excluded"""
|
excludes = settings . AUTOMATED_LOGGING [ 'exclude' ] [ 'model' ]
for excluded in excludes :
if ( excluded in [ instance . _meta . app_label . lower ( ) , instance . __class__ . __name__ . lower ( ) ] or instance . __module__ . lower ( ) . startswith ( excluded ) ) :
return False
return True
|
def _validate_configuration ( self ) :
"""Validates that required parameters are present ."""
|
if not self . access_token :
raise ConfigurationException ( 'You will need to initialize a client with an Access Token' )
if not self . api_url :
raise ConfigurationException ( 'The client configuration needs to contain an API URL' )
if not self . default_locale :
raise ConfigurationException ( 'The client configuration needs to contain a Default Locale' )
if not self . api_version or self . api_version < 1 :
raise ConfigurationException ( 'The API Version must be a positive number' )
|
def _is_cache_dir_appropriate ( cache_dir , cache_file ) :
"""Determine if a directory is acceptable for building .
A directory is suitable if any of the following are true :
- it doesn ' t exist
- it is empty
- it contains an existing build cache"""
|
if os . path . exists ( cache_dir ) :
files = os . listdir ( cache_dir )
if cache_file in files :
return True
return not bool ( files )
return True
|
def removeIndexOnAttribute ( self , attributeName ) :
'''removeIndexOnAttribute - Remove an attribute from indexing ( for getElementsByAttr function ) and remove indexed data .
@ param attributeName < lowercase str > - An attribute name . Will be lowercased . " name " and " id " will have no effect .'''
|
attributeName = attributeName . lower ( )
if attributeName in self . otherAttributeIndexFunctions :
del self . otherAttributeIndexFunctions [ attributeName ]
if attributeName in self . _otherAttributeIndexes :
del self . _otherAttributeIndexes [ attributeName ]
|
def fire_running ( self , running ) :
'''Pass in a state " running " dict , this is the return dict from a state
call . The dict will be processed and fire events .
By default yellows and reds fire events on the master and minion , but
this can be configured .'''
|
load = { 'id' : self . opts [ 'id' ] , 'events' : [ ] , 'cmd' : '_minion_event' }
for stag in sorted ( running , key = lambda k : running [ k ] . get ( '__run_num__' , 0 ) ) :
if running [ stag ] [ 'result' ] and not running [ stag ] [ 'changes' ] :
continue
tag = 'state_{0}_{1}' . format ( six . text_type ( running [ stag ] [ 'result' ] ) , 'True' if running [ stag ] [ 'changes' ] else 'False' )
load [ 'events' ] . append ( { 'tag' : tag , 'data' : running [ stag ] , } )
channel = salt . transport . client . ReqChannel . factory ( self . opts )
try :
channel . send ( load )
except Exception :
pass
finally :
channel . close ( )
return True
|
def initialize_environment ( app ) :
"""Perform initializations needed before the build process starts ."""
|
env = app . builder . env
# Assure ` ` traceability _ all _ items ` ` will always be there .
if not hasattr ( env , 'traceability_all_items' ) :
env . traceability_all_items = { }
update_available_item_relationships ( app )
|
def load_network_from_file ( filename ) :
import cPickle
"""Load the complete configuration of a previously stored network ."""
|
network = NeuralNet ( { "n_inputs" : 1 , "layers" : [ [ 0 , None ] ] } )
with open ( filename , 'rb' ) as file :
store_dict = cPickle . load ( file )
network . n_inputs = store_dict [ "n_inputs" ]
network . n_weights = store_dict [ "n_weights" ]
network . layers = store_dict [ "layers" ]
network . weights = store_dict [ "weights" ]
return network
|
def optional_file_like ( path ) :
"""Validator that ensures that if a file exists it regular , a fifo , or a
character device . The file is not required to exist .
This includes character special devices like / dev / null ."""
|
if ( os . path . exists ( path ) and not ( os . path . isfile ( path ) or stat . S_ISFIFO ( os . stat ( path ) . st_mode ) or stat . S_ISCHR ( os . stat ( path ) . st_mode ) ) ) :
raise ValidationFailed ( '{} is not a valid file, character device, or fifo.' . format ( path ) )
|
def from_center_of_mass ( cls , inputs , window_length , center_of_mass , ** kwargs ) :
"""Convenience constructor for passing ` decay _ rate ` in terms of center of
mass .
Forwards ` decay _ rate ` as ` 1 - ( 1 / 1 + center _ of _ mass ) ` . This provides
behavior equivalent to passing ` center _ of _ mass ` to pandas . ewma .
Examples
. . code - block : : python
# Equivalent to :
# my _ ewma = EWMA (
# inputs = [ EquityPricing . close ] ,
# window _ length = 30,
# decay _ rate = ( 1 - ( 1 / 15.0 ) ) ,
my _ ewma = EWMA . from _ center _ of _ mass (
inputs = [ EquityPricing . close ] ,
window _ length = 30,
center _ of _ mass = 15,
Notes
This classmethod is provided by both
: class : ` ExponentialWeightedMovingAverage ` and
: class : ` ExponentialWeightedMovingStdDev ` ."""
|
return cls ( inputs = inputs , window_length = window_length , decay_rate = ( 1.0 - ( 1.0 / ( 1.0 + center_of_mass ) ) ) , ** kwargs )
|
def _load_github_repo ( ) :
"""Loads the GitHub repository from the users config ."""
|
if 'TRAVIS' in os . environ :
raise RuntimeError ( 'Detected that we are running in Travis. ' 'Stopping to prevent infinite loops.' )
try :
with open ( os . path . join ( config_dir , 'repo' ) , 'r' ) as f :
return f . read ( )
except ( OSError , IOError ) :
raise RuntimeError ( 'Could not find your repository. ' 'Have you ran `trytravis --repo`?' )
|
def get_instance ( self , payload ) :
"""Build an instance of DayInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . preview . bulk _ exports . export . day . DayInstance
: rtype : twilio . rest . preview . bulk _ exports . export . day . DayInstance"""
|
return DayInstance ( self . _version , payload , resource_type = self . _solution [ 'resource_type' ] , )
|
def MatrixTriangularSolve ( a , rhs , lower , adj ) :
"""Matrix triangular solve op ."""
|
trans = 0 if not adj else 2
r = np . empty ( rhs . shape ) . astype ( a . dtype )
for coord in np . ndindex ( a . shape [ : - 2 ] ) :
pos = coord + ( Ellipsis , )
r [ pos ] = sp . linalg . solve_triangular ( a [ pos ] if not adj else np . conj ( a [ pos ] ) , rhs [ pos ] , trans = trans , lower = lower )
return r ,
|
def from_fptr ( cls , label , type_ , fptr ) :
"""Return ` ` FSEntry ` ` object ."""
|
return FSEntry ( label = label , type = type_ , path = fptr . path , use = fptr . use , file_uuid = fptr . file_uuid , derived_from = fptr . derived_from , checksum = fptr . checksum , checksumtype = fptr . checksumtype , )
|
def _load_input_data_port_models ( self ) :
"""Reloads the input data port models directly from the the state"""
|
if not self . state_copy_initialized :
return
self . input_data_ports = [ ]
for input_data_port_m in self . state_copy . input_data_ports :
new_ip_m = deepcopy ( input_data_port_m )
new_ip_m . parent = self
new_ip_m . data_port = input_data_port_m . data_port
self . input_data_ports . append ( new_ip_m )
|
def _glob1 ( self , pattern , ondisk = True , source = False , strings = False ) :
"""Globs for and returns a list of entry names matching a single
pattern in this directory .
This searches any repositories and source directories for
corresponding entries and returns a Node ( or string ) relative
to the current directory if an entry is found anywhere .
TODO : handle pattern with no wildcard"""
|
search_dir_list = self . get_all_rdirs ( )
for srcdir in self . srcdir_list ( ) :
search_dir_list . extend ( srcdir . get_all_rdirs ( ) )
selfEntry = self . Entry
names = [ ]
for dir in search_dir_list : # We use the . name attribute from the Node because the keys of
# the dir . entries dictionary are normalized ( that is , all upper
# case ) on case - insensitive systems like Windows .
node_names = [ v . name for k , v in dir . entries . items ( ) if k not in ( '.' , '..' ) ]
names . extend ( node_names )
if not strings : # Make sure the working directory ( self ) actually has
# entries for all Nodes in repositories or variant dirs .
for name in node_names :
selfEntry ( name )
if ondisk :
try :
disk_names = os . listdir ( dir . _abspath )
except os . error :
continue
names . extend ( disk_names )
if not strings : # We ' re going to return corresponding Nodes in
# the local directory , so we need to make sure
# those Nodes exist . We only want to create
# Nodes for the entries that will match the
# specified pattern , though , which means we
# need to filter the list here , even though
# the overall list will also be filtered later ,
# after we exit this loop .
if pattern [ 0 ] != '.' :
disk_names = [ x for x in disk_names if x [ 0 ] != '.' ]
disk_names = fnmatch . filter ( disk_names , pattern )
dirEntry = dir . Entry
for name in disk_names : # Add ' . / ' before disk filename so that ' # ' at
# beginning of filename isn ' t interpreted .
name = './' + name
node = dirEntry ( name ) . disambiguate ( )
n = selfEntry ( name )
if n . __class__ != node . __class__ :
n . __class__ = node . __class__
n . _morph ( )
names = set ( names )
if pattern [ 0 ] != '.' :
names = [ x for x in names if x [ 0 ] != '.' ]
names = fnmatch . filter ( names , pattern )
if strings :
return names
return [ self . entries [ _my_normcase ( n ) ] for n in names ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.