signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def run ( self , graminit_h , graminit_c ) :
"""Load the grammar tables from the text files written by pgen ."""
|
self . parse_graminit_h ( graminit_h )
self . parse_graminit_c ( graminit_c )
self . finish_off ( )
|
async def reconnect ( self ) :
"""Force a reconnection ."""
|
monitor = self . monitor
if monitor . reconnecting . locked ( ) or monitor . close_called . is_set ( ) :
return
async with monitor . reconnecting :
await self . close ( )
await self . _connect_with_login ( [ ( self . endpoint , self . cacert ) ] )
|
def decode ( data , encoding = 'utf-8' , errors = 'strict' ) :
"""We need to convert a dictionary where keys and values
are bytes , to unicode strings . This happens when a
Python 2 worker sends a dictionary back to a Python 3 master ."""
|
data_type = type ( data )
if data_type == bytes :
return bytes2unicode ( data , encoding , errors )
if data_type in ( dict , list , tuple ) :
if data_type == dict :
data = data . items ( )
return data_type ( map ( decode , data ) )
return data
|
def string_chain ( text , filters ) :
"""Chain several filters after each other , applies the filter on the entire string
: param text : String to format
: param filters : Sequence of filters to apply on String
: return : The formatted String"""
|
if filters is None :
return text
for filter_function in filters :
text = filter_function ( text )
return text
|
def add ( self , item ) :
"""Adds the specified item to this queue if there is available space .
: param item : ( object ) , the specified item .
: return : ( bool ) , ` ` true ` ` if element is successfully added , ` ` false ` ` otherwise ."""
|
def result_fnc ( f ) :
if f . result ( ) :
return True
raise Full ( "Queue is full!" )
return self . offer ( item ) . continue_with ( result_fnc )
|
def entries ( self ) :
"""Return the entries that are published under this node ."""
|
# Since there is currently no filtering in place , return all entries .
EntryModel = get_entry_model ( )
qs = get_entry_model ( ) . objects . order_by ( '-publication_date' )
# Only limit to current language when this makes sense .
if issubclass ( EntryModel , TranslatableModel ) :
admin_form_language = self . get_current_language ( )
# page object is in current language tab .
qs = qs . active_translations ( admin_form_language ) . language ( admin_form_language )
return qs
|
def complete_upload ( self ) :
"""Complete the MultiPart Upload operation . This method should
be called when all parts of the file have been successfully
uploaded to S3.
: rtype : : class : ` boto . s3 . multipart . CompletedMultiPartUpload `
: returns : An object representing the completed upload ."""
|
xml = self . to_xml ( )
return self . bucket . complete_multipart_upload ( self . key_name , self . id , xml )
|
def get_aliases ( self ) :
"""RETURN LIST OF { " alias " : a , " index " : i } PAIRS
ALL INDEXES INCLUDED , EVEN IF NO ALIAS { " alias " : Null }"""
|
for index , desc in self . get_metadata ( ) . indices . items ( ) :
if not desc [ "aliases" ] :
yield wrap ( { "index" : index } )
elif desc [ 'aliases' ] [ 0 ] == index :
Log . error ( "should not happen" )
else :
for a in desc [ "aliases" ] :
yield wrap ( { "index" : index , "alias" : a } )
|
def index_sets ( self , as_set = False ) :
"""Return the series as list of index set tuples ."""
|
indexes = frozenset if as_set else tuple
return [ indexes ( b . iter_set ( ) ) for b in self ]
|
def remove_sni_cert ( self , hostname ) :
"""Remove the SSL Server Name Indicator ( SNI ) certificate configuration for
the specified * hostname * .
. . warning : :
This method will raise a : py : exc : ` RuntimeError ` if either the SNI
extension is not available in the : py : mod : ` ssl ` module or if SSL was
not enabled at initialization time through the use of arguments to
: py : meth : ` ~ . _ _ init _ _ ` .
. . versionadded : : 2.2.0
: param str hostname : The hostname to delete the SNI configuration for ."""
|
if not g_ssl_has_server_sni :
raise RuntimeError ( 'the ssl server name indicator extension is unavailable' )
if self . _ssl_sni_entries is None :
raise RuntimeError ( 'ssl was not enabled on initialization' )
sni_entry = self . _ssl_sni_entries . pop ( hostname , None )
if sni_entry is None :
raise ValueError ( 'the specified hostname does not have an sni certificate configuration' )
|
def download ( self , data , um_update = False ) :
"""Used to download firmware or filter set .
: param data : binary string to push via serial
: param um _ update : flag whether to update umanager"""
|
self . open_umanager ( )
self . ser . write ( '' . join ( ( self . cmd_download , self . cr ) ) )
if self . read_loop ( lambda x : x . endswith ( self . xmodem_crc ) , self . timeout ) :
if self . xmodem . send ( StringIO . StringIO ( data ) ) :
log . info ( "Data sent" )
else :
raise Dam1021Error ( 4 , "Error during file download" )
else :
raise Dam1021Error ( 3 , "uManager is not ready to accept a data" )
if self . read_loop ( lambda x : x . lower ( ) . find ( self . reprogram_ack ) != - 1 , self . timeout ) :
skr_sum = hashlib . sha1 ( data ) . hexdigest ( )
log . info ( "File downloaded. Data SHA-1 checksum: {}" . format ( skr_sum ) )
else :
raise Dam1021Error ( 5 , "uManager accepted data and not reprogrammed" )
if um_update :
self . ser . write ( '' . join ( ( self . cmd_update , self . cr ) ) )
if self . read_loop ( lambda x : x . lower ( ) . find ( self . update_confirmation ) != - 1 , self . timeout * self . umanager_waitcoeff ) :
self . ser . write ( self . update_ack )
else :
raise Dam1021Error ( 13 , "Error during update command invocation" )
if self . read_loop ( lambda x : x . lower ( ) . find ( self . update_reset ) != - 1 , self . timeout * self . umanager_waitcoeff ) :
log . info ( "uManager updated" )
else :
raise Dam1021Error ( 14 , "Update failed" )
else :
self . close_umanager ( )
return skr_sum
|
def get_plot ( self , zero_to_efermi = True , ylim = None , smooth = False , vbm_cbm_marker = False , smooth_tol = None ) :
"""Get a matplotlib object for the bandstructure plot .
Blue lines are up spin , red lines are down
spin .
Args :
zero _ to _ efermi : Automatically subtract off the Fermi energy from
the eigenvalues and plot ( E - Ef ) .
ylim : Specify the y - axis ( energy ) limits ; by default None let
the code choose . It is vbm - 4 and cbm + 4 if insulator
efermi - 10 and efermi + 10 if metal
smooth : interpolates the bands by a spline cubic
smooth _ tol ( float ) : tolerance for fitting spline to band data .
Default is None such that no tolerance will be used ."""
|
plt = pretty_plot ( 12 , 8 )
from matplotlib import rc
import scipy . interpolate as scint
# main internal config options
e_min = - 4
e_max = 4
if self . _bs . is_metal ( ) :
e_min = - 10
e_max = 10
# band _ linewidth = 3
band_linewidth = 1
data = self . bs_plot_data ( zero_to_efermi )
if not smooth :
for d in range ( len ( data [ 'distances' ] ) ) :
for i in range ( self . _nb_bands ) :
plt . plot ( data [ 'distances' ] [ d ] , [ data [ 'energy' ] [ d ] [ str ( Spin . up ) ] [ i ] [ j ] for j in range ( len ( data [ 'distances' ] [ d ] ) ) ] , 'b-' , linewidth = band_linewidth )
if self . _bs . is_spin_polarized :
plt . plot ( data [ 'distances' ] [ d ] , [ data [ 'energy' ] [ d ] [ str ( Spin . down ) ] [ i ] [ j ] for j in range ( len ( data [ 'distances' ] [ d ] ) ) ] , 'r--' , linewidth = band_linewidth )
else : # Interpolation failure can be caused by trying to fit an entire
# band with one spline rather than fitting with piecewise splines
# ( splines are ill - suited to fit discontinuities ) .
# The number of splines used to fit a band is determined by the
# number of branches ( high symmetry lines ) defined in the
# BandStructureSymmLine object ( see BandStructureSymmLine . _ branches ) .
warning = "WARNING! Distance / branch {d}, band {i} cannot be " + "interpolated.\n" + "See full warning in source.\n" + "If this is not a mistake, try increasing " + "smooth_tol.\nCurrent smooth_tol is {s}."
for d in range ( len ( data [ 'distances' ] ) ) :
for i in range ( self . _nb_bands ) :
tck = scint . splrep ( data [ 'distances' ] [ d ] , [ data [ 'energy' ] [ d ] [ str ( Spin . up ) ] [ i ] [ j ] for j in range ( len ( data [ 'distances' ] [ d ] ) ) ] , s = smooth_tol )
step = ( data [ 'distances' ] [ d ] [ - 1 ] - data [ 'distances' ] [ d ] [ 0 ] ) / 1000
xs = [ x * step + data [ 'distances' ] [ d ] [ 0 ] for x in range ( 1000 ) ]
ys = [ scint . splev ( x * step + data [ 'distances' ] [ d ] [ 0 ] , tck , der = 0 ) for x in range ( 1000 ) ]
for y in ys :
if np . isnan ( y ) :
print ( warning . format ( d = str ( d ) , i = str ( i ) , s = str ( smooth_tol ) ) )
break
plt . plot ( xs , ys , 'b-' , linewidth = band_linewidth )
if self . _bs . is_spin_polarized :
tck = scint . splrep ( data [ 'distances' ] [ d ] , [ data [ 'energy' ] [ d ] [ str ( Spin . down ) ] [ i ] [ j ] for j in range ( len ( data [ 'distances' ] [ d ] ) ) ] , s = smooth_tol )
step = ( data [ 'distances' ] [ d ] [ - 1 ] - data [ 'distances' ] [ d ] [ 0 ] ) / 1000
xs = [ x * step + data [ 'distances' ] [ d ] [ 0 ] for x in range ( 1000 ) ]
ys = [ scint . splev ( x * step + data [ 'distances' ] [ d ] [ 0 ] , tck , der = 0 ) for x in range ( 1000 ) ]
for y in ys :
if np . isnan ( y ) :
print ( warning . format ( d = str ( d ) , i = str ( i ) , s = str ( smooth_tol ) ) )
break
plt . plot ( xs , ys , 'r--' , linewidth = band_linewidth )
self . _maketicks ( plt )
# Main X and Y Labels
plt . xlabel ( r'$\mathrm{Wave\ Vector}$' , fontsize = 30 )
ylabel = r'$\mathrm{E\ -\ E_f\ (eV)}$' if zero_to_efermi else r'$\mathrm{Energy\ (eV)}$'
plt . ylabel ( ylabel , fontsize = 30 )
# Draw Fermi energy , only if not the zero
if not zero_to_efermi :
ef = self . _bs . efermi
plt . axhline ( ef , linewidth = 2 , color = 'k' )
# X range ( K )
# last distance point
x_max = data [ 'distances' ] [ - 1 ] [ - 1 ]
plt . xlim ( 0 , x_max )
if ylim is None :
if self . _bs . is_metal ( ) : # Plot A Metal
if zero_to_efermi :
plt . ylim ( e_min , e_max )
else :
plt . ylim ( self . _bs . efermi + e_min , self . _bs . efermi + e_max )
else :
if vbm_cbm_marker :
for cbm in data [ 'cbm' ] :
plt . scatter ( cbm [ 0 ] , cbm [ 1 ] , color = 'r' , marker = 'o' , s = 100 )
for vbm in data [ 'vbm' ] :
plt . scatter ( vbm [ 0 ] , vbm [ 1 ] , color = 'g' , marker = 'o' , s = 100 )
plt . ylim ( data [ 'vbm' ] [ 0 ] [ 1 ] + e_min , data [ 'cbm' ] [ 0 ] [ 1 ] + e_max )
else :
plt . ylim ( ylim )
if not self . _bs . is_metal ( ) and vbm_cbm_marker :
for cbm in data [ 'cbm' ] :
plt . scatter ( cbm [ 0 ] , cbm [ 1 ] , color = 'r' , marker = 'o' , s = 100 )
for vbm in data [ 'vbm' ] :
plt . scatter ( vbm [ 0 ] , vbm [ 1 ] , color = 'g' , marker = 'o' , s = 100 )
plt . tight_layout ( )
return plt
|
def get_main_app ( argv = [ ] ) :
"""Standard boilerplate Qt application code .
Do everything but app . exec _ ( ) - - so that we can test the application in one thread"""
|
app = QApplication ( argv )
app . setApplicationName ( __appname__ )
app . setWindowIcon ( newIcon ( "app" ) )
# Tzutalin 201705 + : Accept extra agruments to change predefined class file
# Usage : labelImg . py image predefClassFile saveDir
win = MainWindow ( argv [ 1 ] if len ( argv ) >= 2 else None , argv [ 2 ] if len ( argv ) >= 3 else os . path . join ( os . path . dirname ( sys . argv [ 0 ] ) , 'data' , 'predefined_classes.txt' ) , argv [ 3 ] if len ( argv ) >= 4 else None )
win . show ( )
return app , win
|
def top2_reduced ( votes ) :
"""Description :
Top 2 alternatives 12 moment conditions values calculation
Parameters :
votes : ordinal preference data ( numpy ndarray of integers )"""
|
res = np . zeros ( 12 )
for vote in votes : # the top ranked alternative is in vote [ 0 ] [ 0 ] , second in vote [ 1 ] [ 0]
if vote [ 0 ] [ 0 ] == 0 : # i . e . the first alt is ranked first
res [ 0 ] += 1
if vote [ 1 ] [ 0 ] == 2 :
res [ 4 ] += 1
elif vote [ 1 ] [ 0 ] == 3 :
res [ 5 ] += 1
elif vote [ 0 ] [ 0 ] == 1 :
res [ 1 ] += 1
if vote [ 1 ] [ 0 ] == 0 :
res [ 6 ] += 1
elif vote [ 1 ] [ 0 ] == 3 :
res [ 7 ] += 1
elif vote [ 0 ] [ 0 ] == 2 :
res [ 2 ] += 1
if vote [ 1 ] [ 0 ] == 0 :
res [ 8 ] += 1
elif vote [ 1 ] [ 0 ] == 1 :
res [ 9 ] += 1
elif vote [ 0 ] [ 0 ] == 3 :
res [ 3 ] += 1
if vote [ 1 ] [ 0 ] == 1 :
res [ 10 ] += 1
elif vote [ 1 ] [ 0 ] == 2 :
res [ 11 ] += 1
res /= len ( votes )
return res
|
def GetArtifactsDependenciesClosure ( name_list , os_name = None ) :
"""For all the artifacts in the list returns them and their dependencies ."""
|
artifacts = set ( REGISTRY . GetArtifacts ( os_name = os_name , name_list = name_list ) )
dependencies = set ( )
for art in artifacts :
dependencies . update ( GetArtifactDependencies ( art , recursive = True ) )
if dependencies :
artifacts . update ( set ( REGISTRY . GetArtifacts ( os_name = os_name , name_list = list ( dependencies ) ) ) )
return artifacts
|
def _call ( self , path , method , body = None , headers = None ) :
"""Wrapper around http . do _ call that transforms some HTTPError into
our own exceptions"""
|
try :
resp = self . http . do_call ( path , method , body , headers )
except http . HTTPError as err :
if err . status == 401 :
raise PermissionError ( 'Insufficient permissions to query ' + '%s with user %s :%s' % ( path , self . user , err ) )
raise
return resp
|
def defrise_ellipses ( ndim , nellipses = 8 , alternating = False ) :
"""Ellipses for the standard Defrise phantom in 2 or 3 dimensions .
Parameters
ndim : { 2 , 3}
Dimension of the space for the ellipses / ellipsoids .
nellipses : int , optional
Number of ellipses . If more ellipses are used , each ellipse becomes
thinner .
alternating : bool , optional
True if the ellipses should have alternating densities ( + 1 , - 1 ) ,
otherwise all ellipses have value + 1.
See Also
odl . phantom . geometric . ellipsoid _ phantom :
Function for creating arbitrary ellipsoids phantoms
odl . phantom . transmission . shepp _ logan _ ellipsoids"""
|
ellipses = [ ]
if ndim == 2 :
for i in range ( nellipses ) :
if alternating :
value = ( - 1.0 + 2.0 * ( i % 2 ) )
else :
value = 1.0
axis_1 = 0.5
axis_2 = 0.5 / ( nellipses + 1 )
center_x = 0.0
center_y = - 1 + 2.0 / ( nellipses + 1.0 ) * ( i + 1 )
rotation = 0
ellipses . append ( [ value , axis_1 , axis_2 , center_x , center_y , rotation ] )
elif ndim == 3 :
for i in range ( nellipses ) :
if alternating :
value = ( - 1.0 + 2.0 * ( i % 2 ) )
else :
value = 1.0
axis_1 = axis_2 = 0.5
axis_3 = 0.5 / ( nellipses + 1 )
center_x = center_y = 0.0
center_z = - 1 + 2.0 / ( nellipses + 1.0 ) * ( i + 1 )
rotation_phi = rotation_theta = rotation_psi = 0
ellipses . append ( [ value , axis_1 , axis_2 , axis_3 , center_x , center_y , center_z , rotation_phi , rotation_theta , rotation_psi ] )
return ellipses
|
def _estimate_param_scan_worker ( estimator , params , X , evaluate , evaluate_args , failfast , return_exceptions ) :
"""Method that runs estimation for several parameter settings .
Defined as a worker for parallelization"""
|
# run estimation
model = None
try : # catch any exception
estimator . estimate ( X , ** params )
model = estimator . model
except KeyboardInterrupt : # we want to be able to interactively interrupt the worker , no matter of failfast = False .
raise
except :
e = sys . exc_info ( ) [ 1 ]
if isinstance ( estimator , Loggable ) :
estimator . logger . warning ( "Ignored error during estimation: %s" % e )
if failfast :
raise
# re - raise
elif return_exceptions :
model = e
else :
pass
# just return model = None
# deal with results
res = [ ]
# deal with result
if evaluate is None : # we want full models
res . append ( model )
# we want to evaluate function ( s ) of the model
elif _types . is_iterable ( evaluate ) :
values = [ ]
# the function values the model
for ieval , name in enumerate ( evaluate ) : # get method / attribute name and arguments to be evaluated
# name = evaluate [ ieval ]
args = ( )
if evaluate_args is not None :
args = evaluate_args [ ieval ]
# wrap single arguments in an iterable again to pass them .
if _types . is_string ( args ) :
args = ( args , )
# evaluate
try : # try calling method / property / attribute
value = _call_member ( estimator . model , name , failfast , * args )
# couldn ' t find method / property / attribute
except AttributeError as e :
if failfast :
raise e
# raise an AttributeError
else :
value = None
# we just ignore it and return None
values . append ( value )
# if we only have one value , unpack it
if len ( values ) == 1 :
values = values [ 0 ]
res . append ( values )
else :
raise ValueError ( 'Invalid setting for evaluate: ' + str ( evaluate ) )
if len ( res ) == 1 :
res = res [ 0 ]
return res
|
def read_stream ( self , stream_id , size = None ) :
"""Read data from the given stream .
By default ( ` size = None ` ) , this returns all data left in current HTTP / 2
frame . In other words , default behavior is to receive frame by frame .
If size is given a number above zero , method will try to return as much
bytes as possible up to the given size , block until enough bytes are
ready or stream is remotely closed .
If below zero , it will read until the stream is remotely closed and
return everything at hand .
` size = 0 ` is a special case that does nothing but returns ` b ' ' ` . The
same result ` b ' ' ` is also returned under other conditions if there is
no more data on the stream to receive , even under ` size = None ` and peer
sends an empty frame - you can use ` b ' ' ` to safely identify the end of
the given stream .
Flow control frames will be automatically sent while reading clears the
buffer , allowing more data to come in .
: param stream _ id : Stream to read
: param size : Expected size to read , ` - 1 ` for all , default frame .
: return : Bytes read or empty if there is no more to expect ."""
|
rv = [ ]
try :
with ( yield from self . _get_stream ( stream_id ) . rlock ) :
if size is None :
rv . append ( ( yield from self . _get_stream ( stream_id ) . read_frame ( ) ) )
self . _flow_control ( stream_id )
elif size < 0 :
while True :
rv . extend ( ( yield from self . _get_stream ( stream_id ) . read_all ( ) ) )
self . _flow_control ( stream_id )
else :
while size > 0 :
bufs , count = yield from self . _get_stream ( stream_id ) . read ( size )
rv . extend ( bufs )
size -= count
self . _flow_control ( stream_id )
except StreamClosedError :
pass
except _StreamEndedException as e :
try :
self . _flow_control ( stream_id )
except StreamClosedError :
pass
rv . extend ( e . bufs )
return b'' . join ( rv )
|
def is_parseable ( self ) :
"""Check if content is parseable for recursion .
@ return : True if content is parseable
@ rtype : bool"""
|
if not self . valid :
return False
# some content types must be validated with the page content
if self . content_type in ( "application/xml" , "text/xml" ) :
rtype = mimeutil . guess_mimetype_read ( self . get_content )
if rtype is not None : # XXX side effect
self . content_type = rtype
if self . content_type not in self . ContentMimetypes :
log . debug ( LOG_CHECK , "URL with content type %r is not parseable" , self . content_type )
return False
return True
|
def from_qubo ( cls , Q , offset = 0.0 ) :
"""Create a binary quadratic model from a QUBO model .
Args :
Q ( dict ) :
Coefficients of a quadratic unconstrained binary optimization
( QUBO ) problem . Should be a dict of the form ` { ( u , v ) : bias , . . . } `
where ` u ` , ` v ` , are binary - valued variables and ` bias ` is their
associated coefficient .
offset ( optional , default = 0.0 ) :
Constant offset applied to the model .
Returns :
: class : ` . BinaryQuadraticModel ` : Binary quadratic model with vartype set to
: class : ` . Vartype . BINARY ` .
Examples :
This example creates a binary quadratic model from a QUBO model .
> > > import dimod
> > > Q = { ( 0 , 0 ) : - 1 , ( 1 , 1 ) : - 1 , ( 0 , 1 ) : 2}
> > > model = dimod . BinaryQuadraticModel . from _ qubo ( Q , offset = 0.0)
> > > model . linear # doctest : + SKIP
{0 : - 1 , 1 : - 1}
> > > model . vartype
< Vartype . BINARY : frozenset ( { 0 , 1 } ) >"""
|
linear = { }
quadratic = { }
for ( u , v ) , bias in iteritems ( Q ) :
if u == v :
linear [ u ] = bias
else :
quadratic [ ( u , v ) ] = bias
return cls ( linear , quadratic , offset , Vartype . BINARY )
|
def get_binary ( self ) :
"""Return a binary buffer containing the file content"""
|
content_disp = 'Content-Disposition: form-data; name="file"; filename="{}"'
stream = io . BytesIO ( )
stream . write ( _string_to_binary ( '--{}' . format ( self . boundary ) ) )
stream . write ( _crlf ( ) )
stream . write ( _string_to_binary ( content_disp . format ( self . file_name ) ) )
stream . write ( _crlf ( ) )
stream . write ( _crlf ( ) )
stream . write ( self . body )
stream . write ( _crlf ( ) )
stream . write ( _string_to_binary ( '--{}--' . format ( self . boundary ) ) )
stream . write ( _crlf ( ) )
return stream . getvalue ( )
|
def contraction_conical_Crane ( Di1 , Di2 , l = None , angle = None ) :
r'''Returns loss coefficient for a conical pipe contraction
as shown in Crane TP 410M [ 1 ] _ between 0 and 180 degrees .
If : math : ` \ theta < 45 ^ { \ circ } ` :
. . math : :
K _ 2 = { 0.8 \ sin \ frac { \ theta } { 2 } ( 1 - \ beta ^ 2 ) }
otherwise :
. . math : :
K _ 2 = { 0.5 \ sqrt { \ sin \ frac { \ theta } { 2 } } ( 1 - \ beta ^ 2 ) }
. . math : :
\ beta = d _ 2 / d _ 1
Parameters
Di1 : float
Inside pipe diameter of the larger , upstream , pipe , [ m ]
Di2 : float
Inside pipe diameter of the smaller , downstream , pipe , [ m ]
l : float
Length of the contraction , optional [ m ]
angle : float
Angle of contraction , optional [ degrees ]
Returns
K : float
Loss coefficient in terms of the following ( smaller ) pipe [ - ]
Notes
Cheap and has substantial impact on pressure drop . Note that the
nomenclature in [ 1 ] _ is somewhat different - the smaller pipe is called 1,
and the larger pipe is called 2 ; and so the beta ratio is reversed , and the
fourth power of beta used in their equation is not necessary .
Examples
> > > contraction _ conical _ Crane ( Di1 = 0.0779 , Di2 = 0.0525 , l = 0)
0.2729017979998056
References
. . [ 1 ] Crane Co . Flow of Fluids Through Valves , Fittings , and Pipe . Crane ,
2009.'''
|
if l is None and angle is None :
raise Exception ( 'One of `l` or `angle` must be specified' )
beta = Di2 / Di1
beta2 = beta * beta
if angle is not None :
angle = radians ( angle )
# l = ( Di1 - Di2 ) / ( 2.0 * tan ( 0.5 * angle ) ) # L is not needed in this calculation
elif l is not None :
try :
angle = 2.0 * atan ( ( Di1 - Di2 ) / ( 2.0 * l ) )
except ZeroDivisionError :
angle = pi
if angle < 0.25 * pi : # Formula 1
K2 = 0.8 * sin ( 0.5 * angle ) * ( 1.0 - beta2 )
else : # Formula 2
K2 = 0.5 * ( sin ( 0.5 * angle ) ** 0.5 * ( 1.0 - beta2 ) )
return K2
|
def impute_using_statistics ( df , method = 'min' ) :
"""Imputes the missing values by the selected statistical property of each column
: param df : The input dataframe that contains missing values
: param method : The imputation method ( min by default )
" zero " : fill missing entries with zeros
" mean " : fill with column means
" median " : fill with column medians
" min " : fill with min value per column
" random " : fill with gaussian noise according to mean / std of column
: return : the imputed dataframe"""
|
sf = SimpleFill ( method )
imputed_matrix = sf . complete ( df . values )
imputed_df = pd . DataFrame ( imputed_matrix , df . index , df . columns )
return imputed_df
|
def is_upstart ( conn ) :
"""This helper should only used as a fallback ( last resort ) as it is not
guaranteed that it will be absolutely correct ."""
|
# it may be possible that we may be systemd and the caller never checked
# before so lets do that
if is_systemd ( conn ) :
return False
# get the initctl executable , if it doesn ' t exist we can ' t proceed so we
# are probably not upstart
initctl = conn . remote_module . which ( 'initctl' )
if not initctl :
return False
# finally , try and get output from initctl that might hint this is an upstart
# system . On a Ubuntu 14.04.2 system this would look like :
# $ initctl version
# init ( upstart 1.12.1)
stdout , stderr , _ = remoto . process . check ( conn , [ initctl , 'version' ] , )
result_string = b' ' . join ( stdout )
if b'upstart' in result_string :
return True
return False
|
def attribute_labels ( self , attribute_id , params = None ) :
"""Gets the security labels from a attribute
Yields : Security label json"""
|
if params is None :
params = { }
if not self . can_update ( ) :
self . _tcex . handle_error ( 910 , [ self . type ] )
for al in self . tc_requests . attribute_labels ( self . api_type , self . api_sub_type , self . unique_id , attribute_id , owner = self . owner , params = params , ) :
yield al
|
def create_id ( self , prefix = "guid" ) :
"""Create an ID .
Note that if ` prefix ` is not provided , it will be ` guid ` , even if the
` method ` is ` METHOD _ INT ` ."""
|
if self . method == IDGenerator . METHOD_UUID :
id_ = str ( uuid . uuid4 ( ) )
elif self . method == IDGenerator . METHOD_INT :
id_ = self . next_int
self . next_int += 1
else :
raise InvalidMethodError ( self . method )
return "%s:%s-%s" % ( self . namespace . prefix , prefix , id_ )
|
def lock ( self , block = True ) :
"""Lock connection from being used else where"""
|
self . _locked = True
return self . _lock . acquire ( block )
|
async def container_load ( self , container_type , params = None , container = None ) :
"""Loads container of elements from the reader . Supports the container ref .
Returns loaded container .
: param container _ type :
: param params :
: param container :
: param field _ archiver :
: return :"""
|
raw_container = container_is_raw ( container_type , params )
c_len = await load_uvarint ( self . iobj )
elem_ver = await load_uvarint ( self . iobj ) if not raw_container else 0
# if container and c _ len ! = len ( container ) :
# raise ValueError ( ' Size mismatch ' )
elem_type = x . container_elem_type ( container_type , params )
res = container if container else [ ]
for i in range ( c_len ) :
try :
self . tracker . push_index ( i )
fvalue = await self . _load_field ( elem_type , params [ 1 : ] if params else None , x . eref ( res , i ) if container else None )
self . tracker . pop ( )
except Exception as e :
raise helpers . ArchiveException ( e , tracker = self . tracker ) from e
if not container :
res . append ( fvalue )
return res
|
def error ( self , interface_id , errorcode , msg ) :
"""When some error occurs the CCU / Homegear will send it ' s error message here"""
|
LOG . debug ( "RPCFunctions.error: interface_id = %s, errorcode = %i, message = %s" % ( interface_id , int ( errorcode ) , str ( msg ) ) )
if self . systemcallback :
self . systemcallback ( 'error' , interface_id , errorcode , msg )
return True
|
def addCustomOptions ( parser ) :
"""Adds custom options to a parser .
: param parser : the parser .
: type parser : argparse . parser"""
|
parser . add_argument ( "--format" , type = str , metavar = "FORMAT" , default = "png" , choices = [ "png" , "ps" , "pdf" , "X11" ] , help = "The output file format (png, ps, pdf, or X11 " "formats are available). [default: %(default)s]" )
parser . add_argument ( "--title" , type = str , metavar = "STRING" , default = "C2 in function of C1 - MDS" , help = "The title of the MDS plot. [default: " "%(default)s]" )
parser . add_argument ( "--xlabel" , type = str , metavar = "STRING" , default = "C1" , help = "The label of the X axis. [default: %(default)s]" )
parser . add_argument ( "--ylabel" , type = str , metavar = "STRING" , default = "C2" , help = "The label of the Y axis. [default: %(default)s]" )
|
def read_cifar10 ( filename_queue ) :
"""Reads and parses examples from CIFAR10 data files .
Recommendation : if you want N - way read parallelism , call this function
N times . This will give you N independent Readers reading different
files & positions within those files , which will give better mixing of
examples .
Args :
filename _ queue : A queue of strings with the filenames to read from .
Returns :
An object representing a single example , with the following fields :
height : number of rows in the result ( 32)
width : number of columns in the result ( 32)
depth : number of color channels in the result ( 3)
key : a scalar string Tensor describing the filename & record number
for this example .
label : an int32 Tensor with the label in the range 0 . . 9.
uint8image : a [ height , width , depth ] uint8 Tensor with the image data"""
|
class CIFAR10Record ( object ) :
pass
result = CIFAR10Record ( )
# Dimensions of the images in the CIFAR - 10 dataset .
# See http : / / www . cs . toronto . edu / ~ kriz / cifar . html for a description of the
# input format .
label_bytes = 1
# 2 for CIFAR - 100
result . height = 32
result . width = 32
result . depth = 3
image_bytes = result . height * result . width * result . depth
# Every record consists of a label followed by the image , with a
# fixed number of bytes for each .
record_bytes = label_bytes + image_bytes
# Read a record , getting filenames from the filename _ queue . No
# header or footer in the CIFAR - 10 format , so we leave header _ bytes
# and footer _ bytes at their default of 0.
reader = tf . FixedLengthRecordReader ( record_bytes = record_bytes )
result . key , value = reader . read ( filename_queue )
# Convert from a string to a vector of uint8 that is record _ bytes long .
record_bytes = tf . decode_raw ( value , tf . uint8 )
# The first bytes represent the label , which we convert from uint8 - > int32.
result . label = tf . cast ( tf . strided_slice ( record_bytes , [ 0 ] , [ label_bytes ] ) , tf . int32 )
# The remaining bytes after the label represent the image , which we reshape
# from [ depth * height * width ] to [ depth , height , width ] .
depth_major = tf . reshape ( tf . strided_slice ( record_bytes , [ label_bytes ] , [ label_bytes + image_bytes ] ) , [ result . depth , result . height , result . width ] )
# Convert from [ depth , height , width ] to [ height , width , depth ] .
result . uint8image = tf . transpose ( depth_major , [ 1 , 2 , 0 ] )
return result
|
def is_valid_pentameter ( self , scanned_line : str ) -> bool :
"""Determine if a scansion pattern is one of the valid Pentameter metrical patterns
: param scanned _ line : a line containing a sequence of stressed and unstressed syllables
: return bool : whether or not the scansion is a valid pentameter
> > > print ( MetricalValidator ( ) . is _ valid _ pentameter ( ' - UU - UU - - UU - UUX ' ) )
True"""
|
line = scanned_line . replace ( self . constants . FOOT_SEPARATOR , "" )
line = line . replace ( " " , "" )
if len ( line ) < 10 :
return False
line = line [ : - 1 ] + self . constants . OPTIONAL_ENDING
return self . VALID_PENTAMETERS . __contains__ ( line )
|
def format_value ( value ) :
"""Convert a list into a comma separated string , for displaying
select multiple values in emails ."""
|
if isinstance ( value , list ) :
value = ", " . join ( [ v . strip ( ) for v in value ] )
return value
|
def _check_ising_linear_ranges ( linear_ranges , graph ) :
"""check correctness / populate defaults for ising _ linear _ ranges ."""
|
if linear_ranges is None :
linear_ranges = { }
for v in graph :
if v in linear_ranges : # check
linear_ranges [ v ] = Specification . _check_range ( linear_ranges [ v ] )
else : # set default
linear_ranges [ v ] = [ - 2 , 2 ]
return linear_ranges
|
def maturity_date ( self ) :
"""[ datetime ] 期货到期日 。 主力连续合约与指数连续合约都为 datetime ( 2999 , 12 , 31 ) ( 期货专用 )"""
|
try :
return self . __dict__ [ "maturity_date" ]
except ( KeyError , ValueError ) :
raise AttributeError ( "Instrument(order_book_id={}) has no attribute 'maturity_date' " . format ( self . order_book_id ) )
|
def browse_dailydeviations ( self ) :
"""Retrieves Daily Deviations"""
|
response = self . _req ( '/browse/dailydeviations' )
deviations = [ ]
for item in response [ 'results' ] :
d = Deviation ( )
d . from_dict ( item )
deviations . append ( d )
return deviations
|
def get_parent_object ( self ) :
"""Lookup a parent object . If parent _ field is None
this will return None . Otherwise this will try to
return that object .
The filter arguments are found by using the known url
parameters of the bundle , finding the value in the url keyword
arguments and matching them with the arguments in
` self . parent _ lookups ` . The first argument in parent _ lookups
matched with the value of the last argument in the list of bundle
url parameters , the second with the second last and so forth .
For example let ' s say the parent _ field attribute is ' gallery '
and the current bundle knows about these url parameters :
* adm _ post
* adm _ post _ gallery
And the current value for ' self . kwargs ' is :
* adm _ post = 2
* adm _ post _ gallery = 3
if parent _ lookups isn ' t set the filter for the queryset
on the gallery model will be :
* pk = 3
if parent _ lookups is ( ' pk ' , ' post _ _ pk ' ) then the filter
on the queryset will be :
* pk = 3
* post _ _ pk = 2
The model to filter on is found by finding the relationship
in self . parent _ field and filtering on that model .
If a match is found , ' self . queryset ` is changed to
filter on the parent as described above and the parent
object is returned . If no match is found , a Http404 error
is raised ."""
|
if self . parent_field : # Get the model we are querying on
if getattr ( self . model . _meta , 'init_name_map' , None ) : # pre - django - 1.8
cache = self . model . _meta . init_name_map ( )
field , mod , direct , m2m = cache [ self . parent_field ]
else : # 1.10
if DJANGO_VERSION [ 1 ] >= 10 :
field = self . model . _meta . get_field ( self . parent_field )
m2m = field . is_relation and field . many_to_many
direct = not field . auto_created or field . concrete
else : # 1.8 and 1.9
field , mod , direct , m2m = self . model . _meta . get_field ( self . parent_field )
to = None
field_name = None
if self . parent_lookups is None :
self . parent_lookups = ( 'pk' , )
url_params = list ( self . bundle . url_params )
if url_params and getattr ( self . bundle , 'delegated' , False ) :
url_params = url_params [ : - 1 ]
offset = len ( url_params ) - len ( self . parent_lookups )
kwargs = { }
for i in range ( len ( self . parent_lookups ) - 1 ) :
k = url_params [ offset + i ]
value = self . kwargs [ k ]
kwargs [ self . parent_lookups [ i + 1 ] ] = value
main_arg = self . kwargs [ url_params [ - 1 ] ]
main_key = self . parent_lookups [ 0 ]
if m2m :
rel = getattr ( self . model , self . parent_field )
kwargs [ main_key ] = main_arg
if direct :
to = rel . field . rel . to
field_name = self . parent_field
else :
try :
from django . db . models . fields . related import ( ForeignObjectRel )
if isinstance ( rel . rel , ForeignObjectRel ) :
to = rel . rel . related_model
else :
to = rel . rel . model
except ImportError :
to = rel . rel . model
field_name = rel . rel . field . name
else :
to = field . rel . to
if main_key == 'pk' :
to_field = field . rel . field_name
if to_field == 'vid' :
to_field = 'object_id'
else :
to_field = main_key
kwargs [ to_field ] = main_arg
# Build the list of arguments
try :
obj = to . objects . get ( ** kwargs )
if self . queryset is None :
if m2m :
self . queryset = getattr ( obj , field_name )
else :
self . queryset = self . model . objects . filter ( ** { self . parent_field : obj } )
return obj
except to . DoesNotExist :
raise http . Http404
return None
|
def __driver_stub ( self , text , state ) :
"""Display help messages or invoke the proper completer .
The interface of helper methods and completer methods are documented in
the helper ( ) decorator method and the completer ( ) decorator method ,
respectively .
Arguments :
text : A string , that is the current completion scope .
state : An integer .
Returns :
A string used to replace the given text , if any .
None if no completion candidates are found .
Raises :
This method is called via the readline callback . If this method
raises an error , it is silently ignored by the readline library .
This behavior makes debugging very difficult . For this reason ,
non - driver methods are run within try - except blocks . When an error
occurs , the stack trace is printed to self . stderr ."""
|
origline = readline . get_line_buffer ( )
line = origline . lstrip ( )
if line and line [ - 1 ] == '?' :
self . __driver_helper ( line )
else :
toks = shlex . split ( line )
return self . __driver_completer ( toks , text , state )
|
def words ( self , fileids = None ) -> Generator [ str , str , None ] :
"""Provide the words of the corpus ; skipping any paragraphs flagged by keywords to the main
class constructor
: param fileids :
: return : words , including punctuation , one by one"""
|
if not fileids :
fileids = self . fileids ( )
for para in self . paras ( fileids ) :
flat_para = flatten ( para )
skip = False
if self . skip_keywords :
for keyword in self . skip_keywords :
if keyword in flat_para :
skip = True
if not skip :
for word in flat_para :
yield word
|
def update ( self , checked = False , labels = None , custom_labels = None ) :
"""Use this function when we make changes to the list of labels or when
we load a new dataset .
Parameters
checked : bool
argument from clicked . connect
labels : list of str
list of labels in the dataset ( default )
custom _ labels : list of str
list of labels from a file"""
|
if labels is not None :
self . setEnabled ( True )
self . chan_name = labels
self . table . blockSignals ( True )
self . table . clearContents ( )
self . table . setRowCount ( len ( self . chan_name ) )
for i , label in enumerate ( self . chan_name ) :
old_label = QTableWidgetItem ( label )
old_label . setFlags ( Qt . ItemIsSelectable | Qt . ItemIsEnabled )
if custom_labels is not None and i < len ( custom_labels ) and custom_labels [ i ] : # it ' s not empty string or None
label_txt = custom_labels [ i ]
else :
label_txt = label
new_label = QTableWidgetItem ( label_txt )
self . table . setItem ( i , 0 , old_label )
self . table . setItem ( i , 1 , new_label )
self . table . blockSignals ( False )
|
def _get_bounds ( self , layers ) :
"""Return the bounds of all data layers involved in a cartoframes map .
Args :
layers ( list ) : List of cartoframes layers . See ` cartoframes . layers `
for all types .
Returns :
dict : Dictionary of northern , southern , eastern , and western bounds
of the superset of data layers . Keys are ` north ` , ` south ` ,
` east ` , and ` west ` . Units are in WGS84."""
|
extent_query = ( 'SELECT ST_EXTENT(the_geom) AS the_geom ' 'FROM ({query}) AS t{idx}\n' )
union_query = 'UNION ALL\n' . join ( [ extent_query . format ( query = layer . orig_query , idx = idx ) for idx , layer in enumerate ( layers ) if not layer . is_basemap ] )
extent = self . sql_client . send ( utils . minify_sql ( ( 'SELECT' , ' ST_XMIN(ext) AS west,' , ' ST_YMIN(ext) AS south,' , ' ST_XMAX(ext) AS east,' , ' ST_YMAX(ext) AS north' , 'FROM (' , ' SELECT ST_Extent(the_geom) AS ext' , ' FROM ({union_query}) AS _wrap1' , ') AS _wrap2' , ) ) . format ( union_query = union_query ) , do_post = False )
return extent [ 'rows' ] [ 0 ]
|
def analyze ( self , text ) :
"""Return the sentiment as a tuple of the form :
` ` ( polarity , subjectivity ) ` `
: param str text : A string .
. . todo : :
Figure out best format to be passed to the analyzer .
There might be a better format than a string of space separated
lemmas ( e . g . with pos tags ) but the parsing / tagging
results look rather inaccurate and a wrong pos
might prevent the lexicon lookup of an otherwise correctly
lemmatized word form ( or would it not ? ) - further checks needed ."""
|
if self . lemmatize :
text = self . _lemmatize ( text )
return self . RETURN_TYPE ( * pattern_sentiment ( text ) )
|
def token_cli ( self , eauth , load ) :
'''Create the token from the CLI and request the correct data to
authenticate via the passed authentication mechanism'''
|
load [ 'cmd' ] = 'mk_token'
load [ 'eauth' ] = eauth
tdata = self . _send_token_request ( load )
if 'token' not in tdata :
return tdata
try :
with salt . utils . files . set_umask ( 0o177 ) :
with salt . utils . files . fopen ( self . opts [ 'token_file' ] , 'w+' ) as fp_ :
fp_ . write ( tdata [ 'token' ] )
except ( IOError , OSError ) :
pass
return tdata
|
def _draw_label ( self , obj , count ) :
"""Helper method to draw on the current label . Not intended for external use ."""
|
# Start a drawing for the whole label .
label = Drawing ( float ( self . _lw ) , float ( self . _lh ) )
label . add ( self . _clip_label )
# And one for the available area ( i . e . , after padding ) .
available = Drawing ( float ( self . _dw ) , float ( self . _dh ) )
available . add ( self . _clip_drawing )
# Call the drawing function .
self . drawing_callable ( available , float ( self . _dw ) , float ( self . _dh ) , obj )
# Render the contents on the label .
available . shift ( float ( self . _lp ) , float ( self . _bp ) )
label . add ( available )
# Draw the border if requested .
if self . border :
label . add ( self . _border )
# Add however many copies we need to .
for i in range ( count ) : # Find the next available label .
self . _next_unused_label ( )
# Have we been told to skip this page ?
if self . pages_to_draw and self . page_count not in self . pages_to_draw :
continue
# Add the label to the page . ReportLab stores the added drawing by
# reference so we have to copy it N times .
thislabel = copy ( label )
thislabel . shift ( * self . _calculate_edges ( ) )
self . _current_page . add ( thislabel )
|
def dump_zone_file ( self ) :
"""Generate a zoneinfo compatible zone description table .
Returns :
list : zoneinfo descriptions"""
|
data = [ ]
for zone in sorted ( self , key = attrgetter ( 'country' ) ) :
text = [ '%s %s %s' % ( zone . country , utils . to_iso6709 ( zone . latitude , zone . longitude , format = 'dms' ) [ : - 1 ] , zone . zone ) , ]
if zone . comments :
text . append ( ' %s' % ', ' . join ( zone . comments ) )
data . append ( '' . join ( text ) )
return data
|
def _eval_wrapper ( self , fit_key , q , chiA , chiB , ** kwargs ) :
"""Evaluates the surfinBH3dq8 model ."""
|
chiA = np . array ( chiA )
chiB = np . array ( chiB )
# Warn / Exit if extrapolating
allow_extrap = kwargs . pop ( 'allow_extrap' , False )
self . _check_param_limits ( q , chiA , chiB , allow_extrap )
self . _check_unused_kwargs ( kwargs )
x = [ q , chiA [ 2 ] , chiB [ 2 ] ]
if fit_key == 'mf' or fit_key == 'all' :
mf , mf_err = self . _evaluate_fits ( x , 'mf' )
if fit_key == 'mf' :
return mf , mf_err
if fit_key == 'chif' or fit_key == 'all' :
chifz , chifz_err = self . _evaluate_fits ( x , 'chifz' )
chif = np . array ( [ 0 , 0 , chifz ] )
chif_err = np . array ( [ 0 , 0 , chifz_err ] )
if fit_key == 'chif' :
return chif , chif_err
if fit_key == 'vf' or fit_key == 'all' :
vfx , vfx_err = self . _evaluate_fits ( x , 'vfx' )
vfy , vfy_err = self . _evaluate_fits ( x , 'vfy' )
vf = np . array ( [ vfx , vfy , 0 ] )
vf_err = np . array ( [ vfx_err , vfy_err , 0 ] )
if fit_key == 'vf' :
return vf , vf_err
if fit_key == 'all' :
return mf , chif , vf , mf_err , chif_err , vf_err
|
async def emit ( self , name ) :
"""Add a callback to the event named ' name ' .
Returns this object for chained ' on ' calls ."""
|
for cb in self . _event_list [ name ] :
if isawaitable ( cb ) :
await cb
else :
cb ( )
|
def forum_topic ( self ) :
"""| Comment : The topic this ticket originated from , if any"""
|
if self . api and self . forum_topic_id :
return self . api . _get_topic ( self . forum_topic_id )
|
def remove_permission ( self , file_id , permission_id ) :
"""Deletes a permission from a file .
: param file _ id : a spreadsheet ID ( aka file ID . )
: type file _ id : str
: param permission _ id : an ID for the permission .
: type permission _ id : str"""
|
url = '{0}/{1}/permissions/{2}' . format ( DRIVE_FILES_API_V2_URL , file_id , permission_id )
self . request ( 'delete' , url )
|
def _fill_auto_fields ( model , values ) :
"""Given a list of models , fill in auto _ now and auto _ now _ add fields
for upserts . Since django manager utils passes Django ' s ORM , these values
have to be automatically constructed"""
|
auto_field_names = [ f . attname for f in model . _meta . fields if getattr ( f , 'auto_now' , False ) or getattr ( f , 'auto_now_add' , False ) ]
now = timezone . now ( )
for value in values :
for f in auto_field_names :
setattr ( value , f , now )
return values
|
def exclude_invert ( self ) : # type : ( ) - > None
"""Inverts the values in self . exclude
. . code - block : : python
> > > import pydarksky
> > > darksky = pydarksky . DarkSky ( ' 0 ' * 32)
> > > darksky . EXCLUDES
( ' currently ' , ' minutely ' , ' hourly ' , ' daily ' , ' alerts ' , ' flags ' )
> > > darksky . exclude = [ " alerts " , " flags " ]
> > > darksky . exclude
[ ' alerts ' , ' flags ' ]
> > > darksky . exclude _ invert ( )
> > > darksky . exclude
[ ' currently ' , ' minutely ' , ' hourly ' , ' daily ' ]"""
|
tmp = self . exclude
self . _exclude = [ ]
for i in self . EXCLUDES :
if i not in tmp :
self . _exclude . append ( i )
|
def compress_and_upload ( data , compressed_file , s3_path , multipart_chunk_size_mb = 500 , method = 'gz' , delete = False , access_key = None , secret_key = None ) :
'''Compresses data and uploads to S3.
S3 upload uses ` ` s3cmd ` ` , so you must either :
1 ) Manually configure ` ` s3cmd ` ` prior to use ( typically using ` ` s3cmd - - configure ` ` ) .
2 ) Configure ` ` s3cmd ` ` using ` ` s3 . configure ( ) ` ` .
3 ) Pass your access key and secret key to ` ` compress _ and _ upload ` ` , which will automatically configure s3cmd .
. . note :
` ` s3cmd ` ` configuration only needs to be done once per computer ,
which means that relaunching a cloud instance or Docker image will
require re - configuration of ` ` s3cmd ` ` .
Args :
data : Can be one of three things :
1 ) Path to a single file
2 ) Path to a directory
3 ) A list of one or more paths to files or directories
compressed _ file ( str ) : Path to the compressed file . Required .
s3 _ path ( str ) : The S3 path , with the filename omitted . The S3 filename
will be the basename of the ` ` compressed _ file ` ` . For example : :
compress _ and _ upload ( data = ' / path / to / data ' ,
compressed _ file = ' / path / to / compressed . tar . gz ' ,
s3 _ path = ' s3 : / / my _ bucket / path / to / ' )
will result in an uploaded S3 path of ` ` s3 : / / my _ bucket / path / to / compressed . tar . gz ` `
method ( str ) : Compression method . Options are ` ` ' gz ' ` ` ( gzip ) or ` ` ' bz2 ' ` ` ( bzip2 ) .
Default is ` ` ' gz ' ` ` .
delete ( bool ) : If ` ` True ` ` , the ` ` compressed _ file ` ` will be deleted after upload
to S3 . Default is ` ` False ` ` .
access _ key ( str ) : AWS access key .
secret _ key ( str ) : AWS secret key .'''
|
logger = log . get_logger ( 's3' )
if all ( [ access_key , secret_key ] ) :
configure ( access_key = access_key , secret_key = secret_key , logger = logger )
compress ( data , compressed_file , fmt = method , logger = logger )
put ( compressed_file , s3_path , multipart_chunk_size_mb = multipart_chunk_size_mb , logger = logger )
if delete :
os . unlink ( compressed_file )
|
def _setup ( module , extras ) :
"""Install common submodules"""
|
Qt . __binding__ = module . __name__
for name in list ( _common_members ) + extras :
try :
submodule = _import_sub_module ( module , name )
except ImportError :
try : # For extra modules like sip and shiboken that may not be
# children of the binding .
submodule = __import__ ( name )
except ImportError :
continue
setattr ( Qt , "_" + name , submodule )
if name not in extras : # Store reference to original binding ,
# but don ' t store speciality modules
# such as uic or QtUiTools
setattr ( Qt , name , _new_module ( name ) )
|
def get_confirmed_blockhash ( self ) :
"""Gets the block CONFIRMATION _ BLOCKS in the past and returns its block hash"""
|
confirmed_block_number = self . web3 . eth . blockNumber - self . default_block_num_confirmations
if confirmed_block_number < 0 :
confirmed_block_number = 0
return self . blockhash_from_blocknumber ( confirmed_block_number )
|
def from_clock_time ( cls , clock_time , epoch ) :
"""Convert from a ClockTime relative to a given epoch ."""
|
try :
clock_time = ClockTime ( * clock_time )
except ( TypeError , ValueError ) :
raise ValueError ( "Clock time must be a 2-tuple of (s, ns)" )
else :
ordinal = clock_time . seconds // 86400
return Date . from_ordinal ( ordinal + epoch . date ( ) . to_ordinal ( ) )
|
def done ( self , warn = True ) :
"""Is the subprocess done ?"""
|
if not self . process :
raise Exception ( "Not implemented yet or process not started yet, make sure to overload the done() method in your Experiment class" )
self . process . poll ( )
if self . process . returncode == None :
return False
elif self . process . returncode > 0 :
raise ProcessFailed ( )
else :
self . endtime = datetime . datetime . now ( )
return True
|
def is_timeseries ( nc , variable ) :
'''Returns true if the variable is a time series feature type .
: param netCDF4 . Dataset nc : An open netCDF dataset
: param str variable : name of the variable to check'''
|
# x , y , z , t ( o )
# X ( o )
dims = nc . variables [ variable ] . dimensions
cmatrix = coordinate_dimension_matrix ( nc )
for req in ( 'x' , 'y' , 't' ) :
if req not in cmatrix :
return False
if len ( cmatrix [ 'x' ] ) != 0 :
return False
if len ( cmatrix [ 'y' ] ) != 0 :
return False
if 'z' in cmatrix and len ( cmatrix [ 'z' ] ) != 0 :
return False
timevar = get_time_variable ( nc )
# time has to be a coordinate variable in this case
if cmatrix [ 't' ] != ( timevar , ) :
return False
if dims == cmatrix [ 't' ] :
return True
return False
|
def virtual_networks_list_all ( ** kwargs ) :
'''. . versionadded : : 2019.2.0
List all virtual networks within a subscription .
CLI Example :
. . code - block : : bash
salt - call azurearm _ network . virtual _ networks _ list _ all'''
|
result = { }
netconn = __utils__ [ 'azurearm.get_client' ] ( 'network' , ** kwargs )
try :
vnets = __utils__ [ 'azurearm.paged_object_to_list' ] ( netconn . virtual_networks . list_all ( ) )
for vnet in vnets :
result [ vnet [ 'name' ] ] = vnet
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'network' , str ( exc ) , ** kwargs )
result = { 'error' : str ( exc ) }
return result
|
def step2 ( self , pub_key , salt ) :
"""Second authentication step ."""
|
self . _check_initialized ( )
pk_str = binascii . hexlify ( pub_key ) . decode ( )
salt = binascii . hexlify ( salt ) . decode ( )
self . client_session_key , _ , _ = self . session . process ( pk_str , salt )
_LOGGER . debug ( 'Client session key: %s' , self . client_session_key )
# Generate client public and session key proof .
client_public = self . session . public
client_session_key_proof = self . session . key_proof
_LOGGER . debug ( 'Client public: %s, proof: %s' , client_public , client_session_key_proof )
if not self . session . verify_proof ( self . session . key_proof_hash ) :
raise AuthenticationError ( 'proofs do not match (mitm?)' )
return client_public , client_session_key_proof
|
def get_lldp_status ( cls , intf ) :
"""Retrieves the LLDP status ."""
|
if intf not in cls . topo_intf_obj_dict :
LOG . error ( "Interface %s not configured at all" , intf )
return False
intf_obj = cls . topo_intf_obj_dict . get ( intf )
return intf_obj . get_lldp_status ( )
|
def parse_vote_info ( protobuf : bytes ) -> dict :
'''decode vote init tx op _ return protobuf message and validate it .'''
|
vote = pavoteproto . Vote ( )
vote . ParseFromString ( protobuf )
assert vote . version > 0 , { "error" : "Vote info incomplete, version can't be 0." }
assert vote . start_block < vote . end_block , { "error" : "vote can't end in the past." }
return { "version" : vote . version , "description" : vote . description , "count_mode" : vote . MODE . Name ( vote . count_mode ) , "choices" : vote . choices , "start_block" : vote . start_block , "end_block" : vote . end_block , "vote_metainfo" : vote . vote_metainfo }
|
def parse_content ( self , text ) :
"""parse section to formal format
raw _ content : { title : section ( with title ) } . For ` help ` access .
formal _ content : { title : section } but the section has been dedented
without title . For parse instance"""
|
raw_content = self . raw_content
raw_content . clear ( )
formal_collect = { }
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" )
try :
split = self . visible_empty_line_re . split ( text )
except ValueError : # python > = 3.5
split = [ text ]
option_split_re = self . option_split_re
name = re . compile ( re . escape ( self . option_name ) , re . IGNORECASE )
for text in filter ( lambda x : x and x . strip ( ) , split ) : # logger . warning ( ' get options group : \ n % r ' , text )
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" )
try :
split_options = option_split_re . split ( text )
except ValueError : # python > = 3.5
continue
split_options . pop ( 0 )
for title , section in zip ( split_options [ : : 2 ] , split_options [ 1 : : 2 ] ) :
prefix , end = name . split ( title )
prefix = prefix . strip ( )
section = section . rstrip ( )
if end . endswith ( '\n' ) :
formal = section
else :
formal = ' ' * len ( title ) + section
formal_collect . setdefault ( prefix , [ ] ) . append ( formal )
# logger . error ( ( title , section ) )
if prefix in raw_content : # TODO : better handling way ?
if self . namedoptions :
log = logger . warning
else :
log = logger . debug
log ( 'duplicated options section %s' , prefix )
raw_content [ prefix ] += '\n%s%s' % ( title , section )
else :
raw_content [ prefix ] = title + section
if formal_collect :
for each_title , values in formal_collect . items ( ) :
value = '\n' . join ( map ( textwrap . dedent , values ) )
formal_collect [ each_title ] = value
self . formal_content = formal_collect
|
def _get_fields ( self , table_name , ** kwargs ) :
"""return all the fields for the given table"""
|
ret = { }
query_str = 'PRAGMA table_info({})' . format ( self . _normalize_table_name ( table_name ) )
fields = self . _query ( query_str , ** kwargs )
# pout . v ( [ dict ( d ) for d in fields ] )
query_str = 'PRAGMA foreign_key_list({})' . format ( self . _normalize_table_name ( table_name ) )
fks = { f [ "from" ] : f for f in self . _query ( query_str , ** kwargs ) }
# pout . v ( [ dict ( d ) for d in fks . values ( ) ] )
pg_types = { "INTEGER" : int , "BIGINT" : long , "DOUBLE PRECISION" : float , "FLOAT" : float , "REAL" : float , "NUMERIC" : decimal . Decimal , "BOOLEAN" : bool , "DATE" : datetime . date , "TIMESTAMP" : datetime . datetime , "CHARACTER" : str , "VARCHAR" : str , "TEXT" : str , "BLOB" : bytearray , }
# the rows we can set : field _ type , name , field _ required , min _ size , max _ size ,
# size , unique , pk , < foreign key info >
# These keys will roughly correspond with schema . Field
# TODO - - we could actually use " type " to get the size because SQLite returns
# a value like VARCHAR [ 32]
for row in fields :
field = { "name" : row [ "name" ] , "field_required" : bool ( row [ "notnull" ] ) or bool ( row [ "pk" ] ) , "pk" : bool ( row [ "pk" ] ) , }
for tname , ty in pg_types . items ( ) :
if row [ "type" ] . startswith ( tname ) :
field [ "field_type" ] = ty
break
if field [ "pk" ] and field [ "field_type" ] is int : # we compensate for SQLite internally setting pk to int
field [ "field_type" ] = long
if row [ "name" ] in fks :
field [ "schema_table_name" ] = fks [ row [ "name" ] ] [ "table" ]
field [ "ref_table_name" ] = fks [ row [ "name" ] ] [ "table" ]
ret [ field [ "name" ] ] = field
return ret
|
def read ( path , corpus = True , index_by = 'uri' , follow_links = False , ** kwargs ) :
"""Read bibliographic data from Zotero RDF .
Examples
Assuming that the Zotero collection was exported to the directory
` ` / my / working / dir ` ` with the name ` ` myCollection ` ` , a subdirectory should
have been created at ` ` / my / working / dir / myCollection ` ` , and an RDF file
should exist at ` ` / my / working / dir / myCollection / myCollection . rdf ` ` .
. . code - block : : python
> > > from tethne . readers . zotero import read
> > > myCorpus = read ( ' / my / working / dir / myCollection ' )
> > > myCorpus
< tethne . classes . corpus . Corpus object at 0x10047e350 >
Parameters
path : str
Path to the output directory created by Zotero . Expected to contain a
file called ` ` [ directory _ name ] . rdf ` ` .
corpus : bool
( default : True ) If True , returns a : class : ` . Corpus ` \ . Otherwise ,
returns a list of : class : ` . Paper ` \ s .
index _ by : str
( default : ` ` ' identifier ' ` ` ) : class : ` . Paper ` attribute name to use as
the primary indexing field . If the field is missing on a
: class : ` . Paper ` \ , a unique identifier will be generated based on the
title and author names .
follow _ links : bool
If ` ` True ` ` , attempts to load full - text content from attached files
( e . g . PDFs with embedded text ) . Default : False .
kwargs : kwargs
Passed to the : class : ` . Corpus ` constructor .
Returns
corpus : : class : ` . Corpus `"""
|
# TODO : is there a case where ` from _ dir ` would make sense ?
parser = ZoteroParser ( path , index_by = index_by , follow_links = follow_links )
papers = parser . parse ( )
if corpus :
c = Corpus ( papers , index_by = index_by , ** kwargs )
if c . duplicate_papers :
warnings . warn ( "Duplicate papers detected. Use the 'duplicate_papers' attribute of the corpus to get the list" , UserWarning )
for fset_name , fset_values in parser . full_text . iteritems ( ) :
c . features [ fset_name ] = StructuredFeatureSet ( fset_values )
return c
return papers
|
def check_perms ( perms , user , slug , raise_exception = False ) :
"""a helper user to check if a user has the permissions
for a given slug"""
|
if isinstance ( perms , string_types ) :
perms = { perms }
else :
perms = set ( perms )
allowed_users = ACLRule . get_users_for ( perms , slug )
if allowed_users :
return user in allowed_users
if perms . issubset ( set ( WALIKI_ANONYMOUS_USER_PERMISSIONS ) ) :
return True
if is_authenticated ( user ) and perms . issubset ( set ( WALIKI_LOGGED_USER_PERMISSIONS ) ) :
return True
# First check if the user has the permission ( even anon users )
if user . has_perms ( [ 'waliki.%s' % p for p in perms ] ) :
return True
# In case the 403 handler should be called raise the exception
if raise_exception :
raise PermissionDenied
# As the last resort , show the login form
return False
|
def generate_srt_from_sjson ( sjson_subs ) :
"""Generate transcripts from sjson to SubRip ( * . srt ) .
Arguments :
sjson _ subs ( dict ) : ` sjson ` subs .
Returns :
Subtitles in SRT format ."""
|
output = ''
equal_len = len ( sjson_subs [ 'start' ] ) == len ( sjson_subs [ 'end' ] ) == len ( sjson_subs [ 'text' ] )
if not equal_len :
return output
for i in range ( len ( sjson_subs [ 'start' ] ) ) :
item = SubRipItem ( index = i , start = SubRipTime ( milliseconds = sjson_subs [ 'start' ] [ i ] ) , end = SubRipTime ( milliseconds = sjson_subs [ 'end' ] [ i ] ) , text = sjson_subs [ 'text' ] [ i ] )
output += ( six . text_type ( item ) )
output += '\n'
return output
|
def run ( self ) :
"""Continually write images to the filename specified by a command queue ."""
|
if not self . camera . is_running :
self . camera . start ( )
while True :
if not self . cmd_q . empty ( ) :
cmd = self . cmd_q . get ( )
if cmd [ 0 ] == 'stop' :
self . out . close ( )
self . recording = False
elif cmd [ 0 ] == 'start' :
filename = cmd [ 1 ]
self . out = si . FFmpegWriter ( filename )
self . recording = True
self . count = 0
if self . recording :
if self . count == 0 :
image , _ , _ = self . camera . frames ( )
if self . data_buf is None :
self . data_buf = np . zeros ( [ 1 , image . height , image . width , image . channels ] )
self . data_buf [ 0 , ... ] = image . raw_data
self . out . writeFrame ( self . data_buf )
self . count += 1
if self . count == self . rate :
self . count = 0
|
def _densMoments_approx_higherorder_gaussxpolyInts ( self , ll , ul , maxj ) :
"""Calculate all of the polynomial x Gaussian integrals occuring
in the higher - order terms , recursively"""
|
gaussxpolyInt = numpy . zeros ( ( maxj , len ( ul ) ) )
gaussxpolyInt [ - 1 ] = 1. / numpy . sqrt ( numpy . pi ) * ( numpy . exp ( - ll ** 2. ) - numpy . exp ( - ul ** 2. ) )
gaussxpolyInt [ - 2 ] = 1. / numpy . sqrt ( numpy . pi ) * ( numpy . exp ( - ll ** 2. ) * ll - numpy . exp ( - ul ** 2. ) * ul ) + 0.5 * ( special . erf ( ul ) - special . erf ( ll ) )
for jj in range ( maxj - 2 ) :
gaussxpolyInt [ - jj - 3 ] = 1. / numpy . sqrt ( numpy . pi ) * ( numpy . exp ( - ll ** 2. ) * ll ** ( jj + 2 ) - numpy . exp ( - ul ** 2. ) * ul ** ( jj + 2 ) ) + 0.5 * ( jj + 2 ) * gaussxpolyInt [ - jj - 1 ]
return gaussxpolyInt
|
def parent ( self ) :
"""Cache a instance of self parent class .
: return object : instance of self . Meta . parent class"""
|
if not self . _meta . parent :
return None
if not self . __parent__ :
self . __parent__ = self . _meta . parent ( )
return self . __parent__
|
def getFreeEnergyDifferences ( self , compute_uncertainty = True , uncertainty_method = None , warning_cutoff = 1.0e-10 , return_theta = False ) :
"""Get the dimensionless free energy differences and uncertainties among all thermodynamic states .
Parameters
compute _ uncertainty : bool , optional
If False , the uncertainties will not be computed ( default : True )
uncertainty _ method : string , optional
Choice of method used to compute asymptotic covariance method ,
or None to use default . See help for computeAsymptoticCovarianceMatrix ( )
for more information on various methods . ( default : svd )
warning _ cutoff : float , optional
Warn if squared - uncertainty is negative and larger in magnitude
than this number ( default : 1.0e - 10)
return _ theta : bool , optional
Whether or not to return the theta matrix . Can be useful for complicated differences .
Returns
Deltaf _ ij : L np . ndarray , float , shape = ( K , K )
Deltaf _ ij [ i , j ] is the estimated free energy difference
dDeltaf _ ij : L np . ndarray , float , shape = ( K , K )
dDeltaf _ ij [ i , j ] is the estimated statistical uncertainty
( one standard deviation ) in Deltaf _ ij [ i , j ]
Notes
Computation of the covariance matrix may take some time for large K .
The reported statistical uncertainty should , in the asymptotic limit , reflect one standard deviation for the normal distribution of the estimate .
The true free energy difference should fall within the interval [ - df , + df ] centered on the estimate 68 % of the time , and within
the interval [ - 2 df , + 2 df ] centered on the estimate 95 % of the time .
This will break down in cases where the number of samples is not large enough to reach the asymptotic normal limit .
See Section III of Reference [ 1 ] .
Examples
> > > from pymbar import testsystems
> > > ( x _ n , u _ kn , N _ k , s _ n ) = testsystems . HarmonicOscillatorsTestCase ( ) . sample ( mode = ' u _ kn ' )
> > > mbar = MBAR ( u _ kn , N _ k )
> > > [ Deltaf _ ij , dDeltaf _ ij ] = mbar . getFreeEnergyDifferences ( )"""
|
# Compute free energy differences .
f_i = np . matrix ( self . f_k )
Deltaf_ij = f_i - f_i . transpose ( )
# zero out numerical error for thermodynamically identical states
self . _zerosamestates ( Deltaf_ij )
returns = [ ]
returns . append ( np . array ( Deltaf_ij ) )
if compute_uncertainty or return_theta : # Compute asymptotic covariance matrix .
Theta_ij = self . _computeAsymptoticCovarianceMatrix ( np . exp ( self . Log_W_nk ) , self . N_k , method = uncertainty_method )
if compute_uncertainty : # compute the covariance component without doing the double loop .
# d2DeltaF = Theta _ ij [ i , i ] + Theta _ ij [ j , j ] - 2.0 * Theta _ ij [ i , j ]
diag = Theta_ij . diagonal ( )
d2DeltaF = diag + diag . transpose ( ) - 2 * Theta_ij
# zero out numerical error for thermodynamically identical states
self . _zerosamestates ( d2DeltaF )
# check for any numbers below zero .
if ( np . any ( d2DeltaF < 0.0 ) ) :
if ( np . any ( d2DeltaF ) < warning_cutoff ) : # Hmm . Will this print correctly ?
print ( "A squared uncertainty is negative. d2DeltaF = %e" % d2DeltaF [ ( np . any ( d2DeltaF ) < warning_cutoff ) ] )
else :
d2DeltaF [ ( np . any ( d2DeltaF ) < warning_cutoff ) ] = 0.0
# take the square root of the entries of the matrix
dDeltaf_ij = np . sqrt ( d2DeltaF )
# Return matrix of free energy differences and uncertainties .
returns . append ( np . array ( dDeltaf_ij ) )
if ( return_theta ) :
returns . append ( np . array ( Theta_ij ) )
return returns
|
def rename ( self , source_col : str , dest_col : str ) :
"""Renames a column in the main dataframe
: param source _ col : name of the column to rename
: type source _ col : str
: param dest _ col : new name of the column
: type dest _ col : str
: example : ` ` ds . rename ( " Col 1 " , " New col " ) ` `"""
|
try :
self . df = self . df . rename ( columns = { source_col : dest_col } )
except Exception as e :
self . err ( e , self . rename , "Can not rename column" )
return
self . ok ( "Column" , source_col , "renamed" )
|
def to_binary ( value , encoding = 'utf-8' ) :
"""Convert value to binary string , default encoding is utf - 8
: param value : Value to be converted
: param encoding : Desired encoding"""
|
if not value :
return b''
if isinstance ( value , six . binary_type ) :
return value
if isinstance ( value , six . text_type ) :
return value . encode ( encoding )
return to_text ( value ) . encode ( encoding )
|
def get ( self , item ) :
"""Returns the container whose name is provided as ' item ' . If ' item ' is
not a string , the original item is returned unchanged ."""
|
if isinstance ( item , six . string_types ) :
item = super ( StorageClient , self ) . get ( item )
return item
|
def make_tm_request ( method , uri , ** kwargs ) :
"""Make a request to TextMagic REST APIv2.
: param str method : " POST " , " GET " , " PUT " or " DELETE "
: param str uri : URI to process request .
: return : : class : ` Response `"""
|
headers = kwargs . get ( "headers" , { } )
user_agent = "textmagic-python/%s (Python %s)" % ( __version__ , platform . python_version ( ) )
headers [ "User-agent" ] = user_agent
headers [ "Accept-Charset" ] = "utf-8"
if "Accept-Language" not in headers :
headers [ "Accept-Language" ] = "en-us"
if ( method == "POST" or method == "PUT" ) and "Content-Type" not in headers :
headers [ "Content-Type" ] = "application/x-www-form-urlencoded"
kwargs [ "headers" ] = headers
if "Accept" not in headers :
headers [ "Accept" ] = "application/json"
headers [ "X-TM-Username" ] , headers [ "X-TM-Key" ] = kwargs [ "auth" ]
response = make_request ( method , uri , ** kwargs )
if not response . ok :
try :
resp_body = json . loads ( response . content )
message = resp_body [ "message" ]
errors = resp_body [ "errors" ]
except :
message = response . content
errors = None
raise TextmagicRestException ( status = response . status , method = method , uri = response . url , msg = message , errors = errors )
return response
|
def ListChildren ( self , urn ) :
"""Lists children of a given urn . Resulting list is cached ."""
|
result = self . MultiListChildren ( [ urn ] )
try :
return result [ urn ]
except KeyError :
return [ ]
|
def output ( self , _filename ) :
"""_ filename is not used
Args :
_ filename ( string )"""
|
txt = "Analyze of {}\n" . format ( self . slither . filename )
txt += self . get_detectors_result ( )
for contract in self . slither . contracts_derived :
txt += "\nContract {}\n" . format ( contract . name )
txt += self . is_complex_code ( contract )
is_erc20 = contract . is_erc20 ( )
txt += '\tNumber of functions:{}' . format ( self . _number_functions ( contract ) )
txt += "\tIs ERC20 token: {}\n" . format ( contract . is_erc20 ( ) )
if is_erc20 :
txt += self . get_summary_erc20 ( contract )
self . info ( txt )
|
def _to_chimera ( M , N , L , q ) :
"Converts a qubit ' s linear index to chimera coordinates ."
|
return ( q // N // L // 2 , ( q // L // 2 ) % N , ( q // L ) % 2 , q % L )
|
def get_endpoint ( self , session , ** kwargs ) :
"""Get the HubiC storage endpoint uri .
If the current session has not been authenticated , this will trigger a
new authentication to the HubiC OAuth service .
: param keystoneclient . Session session : The session object to use for
queries .
: raises keystoneclient . exceptions . AuthorizationFailure : if something
goes wrong .
: returns : The uri to use for object - storage v1 requests .
: rtype : string"""
|
if self . endpoint is None :
try :
self . _refresh_tokens ( session )
self . _fetch_credentials ( session )
except :
raise AuthorizationFailure ( )
return self . endpoint
|
def basic_auth_tween_factory ( handler , registry ) :
"""Do basic authentication , parse HTTP _ AUTHORIZATION and set remote _ user
variable to request"""
|
def basic_auth_tween ( request ) :
remote_user = get_remote_user ( request )
if remote_user is not None :
request . environ [ 'REMOTE_USER' ] = remote_user [ 0 ]
return handler ( request )
return basic_auth_tween
|
def image ( self ) :
"""Returns a json - schema document that represents a single image entity ."""
|
uri = "/%s/image" % self . uri_base
resp , resp_body = self . api . method_get ( uri )
return resp_body
|
def create_index ( self , index , index_type = GEO2D ) :
"""Create an index on a given attribute
: param str index : Attribute to set index on
: param str index _ type : See PyMongo index types for further information , defaults to GEO2D index ."""
|
self . logger . info ( "Adding %s index to stores on attribute: %s" % ( index_type , index ) )
yield self . collection . create_index ( [ ( index , index_type ) ] )
|
def extractFont ( self , xref = 0 , info_only = 0 ) :
"""extractFont ( self , xref = 0 , info _ only = 0 ) - > PyObject *"""
|
if self . isClosed or self . isEncrypted :
raise ValueError ( "operation illegal for closed / encrypted doc" )
return _fitz . Document_extractFont ( self , xref , info_only )
|
def _resolve_name ( name , package , level ) :
"""Return the absolute name of the module to be imported ."""
|
if not hasattr ( package , 'rindex' ) :
raise ValueError ( "'package' not set to a string" )
dot = len ( package )
for x in xrange ( level , 1 , - 1 ) :
try :
dot = package . rindex ( '.' , 0 , dot )
except ValueError :
raise ValueError ( "attempted relative import beyond top-level " "package" )
return "%s.%s" % ( package [ : dot ] , name )
|
def fast_lindblad_terms ( gamma , unfolding , matrix_form = False , file_name = None , return_code = False ) :
r"""Return a fast function that returns the Lindblad terms .
We test a basic two - level system .
> > > import numpy as np
> > > Ne = 2
> > > gamma21 = 2 * np . pi * 6e6
> > > gamma = np . array ( [ [ 0.0 , - gamma21 ] ,
. . . [ gamma21 , 0.0 ] ] )
> > > rhos = np . array ( [ [ 0.6 , 3 + 2j ] ,
. . . [ 3-2j , 0.4 ] ] )
An map to unfold the density matrix .
> > > unfolding = Unfolding ( Ne , True , True , True )
We obtain a function to calculate Lindblad terms .
> > > lindblad _ terms = fast _ lindblad _ terms ( gamma , unfolding )
Apply this to a density matrix .
> > > rhos = np . array ( [ [ 0.6 , 3 + 2j ] ,
. . . [ 3-2j , 0.4 ] ] )
> > > rhosv = unfolding ( rhos )
> > > rhs _ lindblad = lindblad _ terms ( rhosv )
> > > print ( rhs _ lindblad )
[ - 15079644.7372 - 56548667.7646 37699111.8431]"""
|
Ne = unfolding . Ne
Nrho = unfolding . Nrho
Mu = unfolding . Mu
# We establish the arguments of the output function .
if True :
code = ""
code += "def lindblad_terms("
if not matrix_form :
code += "rho, "
if code [ - 2 : ] == ", " :
code = code [ : - 2 ]
code += "):\n"
# We initialize the output and auxiliaries .
if True : # We introduce the factor that multiplies all terms .
if matrix_form :
code += " A = np.zeros((" + str ( Nrho ) + ", " + str ( Nrho )
if not unfolding . real :
code += "), complex)\n\n"
else :
code += "))\n\n"
if unfolding . normalized :
code += " b = np.zeros((" + str ( Nrho )
if not unfolding . real :
code += "), complex)\n\n"
else :
code += "))\n\n"
else :
code += " rhs = np.zeros((" + str ( Nrho )
if not unfolding . real :
code += "), complex)\n\n"
else :
code += "))\n\n"
for a in range ( Ne ) :
for b in range ( a ) : # The first term is of the from
# gamma _ ab * rho _ aa | b > < b |
if not ( unfolding . normalized and b == 0 ) :
coef = gamma [ a , b ]
if unfolding . real :
mu = Mu ( 1 , b , b )
nu = Mu ( 1 , a , a )
else :
mu = Mu ( 0 , b , b )
nu = Mu ( 0 , a , a )
code += term_code ( mu , nu , coef , matrix_form , False )
# The second term is of the form
# sum _ j - gamma _ ab / 2 rho _ aj | a > < j |
# for a lower triangular unfolding , this j runs from 1 to a .
for j in range ( a ) :
coef = - gamma [ a , b ] * 0.5
if unfolding . real :
mur = Mu ( 1 , a , j )
code += term_code ( mur , mur , coef , matrix_form , False )
mui = Mu ( - 1 , a , j )
code += term_code ( mui , mui , coef , matrix_form , False )
else :
mu = Mu ( 0 , a , j )
code += term_code ( mu , mu , coef , matrix_form , False )
# The third term is of the form
# - sum _ i 1/2 rho _ ia | i > < a |
# for a lower triangular unfolding , this i runs from a to Ne .
for i in range ( a + 1 , Ne ) :
coef = - gamma [ a , b ] * 0.5
if unfolding . real :
mur = Mu ( 1 , i , a )
code += term_code ( mur , mur , coef , matrix_form , False )
mui = Mu ( - 1 , i , a )
code += term_code ( mui , mui , coef , matrix_form , False )
else :
mu = Mu ( 0 , i , a )
code += term_code ( mu , mu , coef , matrix_form , False )
# We missed one term in each of the previous fors , that together
# correspond to
# - gamma _ ab * rho _ aa | a > < a |
coef = - gamma [ a , b ]
if unfolding . real :
mu = Mu ( 1 , a , a )
else :
mu = Mu ( 0 , a , a )
code += term_code ( mu , mu , coef , matrix_form , False )
# We finish the code .
if True :
if matrix_form :
if unfolding . normalized :
code += " return A, b\n"
else :
code += " return A\n"
else :
code += " return rhs\n"
# We write the code to file if provided , and execute it .
if True :
if file_name is not None :
f = file ( file_name + ".py" , "w" )
f . write ( code )
f . close ( )
lindblad_terms = code
if not return_code : exec
lindblad_terms
return lindblad_terms
|
def get_user_action_sets ( self , user_action_set_id , version = 'v1.0' ) :
"""获取数据源信息
: param user _ action _ set _ id : 数据源唯一ID
: param version : 版本号 v1.0"""
|
return self . _get ( 'user_action_sets/get' , params = { 'version' : version , 'user_action_set_id' : user_action_set_id } , result_processor = lambda x : x [ 'data' ] [ 'list' ] )
|
def pipe_fetchdata ( context = None , _INPUT = None , conf = None , ** kwargs ) :
"""A source that fetches and parses an XML or JSON file . Loopable .
Parameters
context : pipe2py . Context object
_ INPUT : pipeforever pipe or an iterable of items or fields
conf : {
' URL ' : { ' value ' : < url > } ,
' path ' : { ' value ' : < dot separated path to data list > }
Yields
_ OUTPUT : items
Examples
> > > from os import path as p
> > > from pipe2py . modules . pipeforever import pipe _ forever
> > > parent = p . dirname ( p . dirname ( _ _ file _ _ ) )
> > > abspath = p . abspath ( p . join ( parent , ' data ' , ' gigs . json ' ) )
> > > path = ' value . items '
> > > url = " file : / / % s " % abspath
> > > conf = { ' URL ' : { ' value ' : url } , ' path ' : { ' value ' : path } }
> > > pipe _ fetchdata ( _ INPUT = pipe _ forever ( ) , conf = conf ) . next ( ) . keys ( ) [ : 5]
[ u ' y : repeatcount ' , u ' description ' , u ' pubDate ' , u ' title ' , u ' y : published ' ]
> > > abspath = p . abspath ( p . join ( parent , ' data ' , ' places . xml ' ) )
> > > path = ' appointment '
> > > url = " file : / / % s " % abspath
> > > conf = { ' URL ' : { ' value ' : url } , ' path ' : { ' value ' : path } }
> > > sorted ( pipe _ fetchdata ( _ INPUT = pipe _ forever ( ) , conf = conf ) . next ( ) . keys ( ) )
[ ' alarmTime ' , ' begin ' , ' duration ' , ' places ' , ' subject ' , ' uid ' ]
> > > conf = { ' URL ' : { ' value ' : url } , ' path ' : { ' value ' : ' ' } }
> > > sorted ( pipe _ fetchdata ( _ INPUT = pipe _ forever ( ) , conf = conf ) . next ( ) . keys ( ) )
[ ' appointment ' , ' reminder ' ]"""
|
# todo : iCal and KML
funcs = get_splits ( None , conf , ** cdicts ( opts , kwargs ) )
parsed = get_parsed ( _INPUT , funcs [ 0 ] )
results = starmap ( parse_result , parsed )
items = imap ( utils . gen_items , results )
_OUTPUT = utils . multiplex ( items )
return _OUTPUT
|
def unwrap ( self , message , signature ) :
"""[ MS - NLMP ] v28.0 2016-07-14
3.4.7 GSS _ UnwrapEx ( )
Emulates the GSS _ Unwrap ( ) implementation to unseal messages and verify the signature
sent matches what has been computed locally . Will throw an Exception if the signature
doesn ' t match
@ param message : The message data received from the server
@ param signature : The signature of the message
@ return message : The message that has been unsealed if flags are set"""
|
if self . negotiate_flags & NegotiateFlags . NTLMSSP_NEGOTIATE_SEAL :
message = self . _unseal_message ( message )
self . _verify_signature ( message , signature )
elif self . negotiate_flags & NegotiateFlags . NTLMSSP_NEGOTIATE_SIGN :
self . _verify_signature ( message , signature )
return message
|
def members ( self , is_manager = None ) :
"""Retrieve members of the scope .
: param is _ manager : ( optional ) set to True to return only Scope members that are also managers .
: type is _ manager : bool
: return : List of members ( usernames )
Examples
> > > members = project . members ( )
> > > managers = project . members ( is _ manager = True )"""
|
if not is_manager :
return [ member for member in self . _json_data [ 'members' ] if member [ 'is_active' ] ]
else :
return [ member for member in self . _json_data [ 'members' ] if member . get ( 'is_active' , False ) and member . get ( 'is_manager' , False ) ]
|
def get_proxy_ticket_for ( self , service ) :
"""Verifies CAS 2.0 + XML - based authentication ticket .
: param : service
Returns username on success and None on failure ."""
|
if not settings . CAS_PROXY_CALLBACK :
raise CasConfigException ( "No proxy callback set in settings" )
params = { 'pgt' : self . tgt , 'targetService' : service }
url = ( urljoin ( settings . CAS_SERVER_URL , 'proxy' ) + '?' + urlencode ( params ) )
page = urlopen ( url )
try :
response = page . read ( )
tree = ElementTree . fromstring ( response )
if tree [ 0 ] . tag . endswith ( 'proxySuccess' ) :
return tree [ 0 ] [ 0 ] . text
else :
logger . warning ( 'Failed to get proxy ticket' )
raise CasTicketException ( 'Failed to get proxy ticket: %s' % tree [ 0 ] . text . strip ( ) )
finally :
page . close ( )
|
def _create_tags ( ctx ) :
"create all classes and put them in ctx"
|
for ( tag , info ) in _TAGS . items ( ) :
class_name = tag . title ( )
quote_ , compact , self_closing , docs = info
def __init__ ( self , * childs , ** attrs ) :
TagBase . __init__ ( self , childs , attrs )
cls = type ( class_name , ( TagBase , ) , { "__doc__" : docs , "__init__" : __init__ } )
cls . QUOTE = quote_
cls . COMPACT = compact
cls . SELF_CLOSING = self_closing
ctx [ class_name ] = cls
|
def se_clearing_code_bank_info ( clearing : str ) -> ( str , int ) :
"""Returns Sweden bank info by clearning code .
: param clearing : 4 - digit clearing code
: return : ( Bank name , account digit count ) or ( ' ' , None ) if not found"""
|
from jutil . bank_const_se import SE_BANK_CLEARING_LIST
for name , begin , end , acc_digits in SE_BANK_CLEARING_LIST :
if begin <= clearing <= end :
return name , acc_digits
return '' , None
|
def build_ast_schema ( document_ast : DocumentNode , assume_valid : bool = False , assume_valid_sdl : bool = False , ) -> GraphQLSchema :
"""Build a GraphQL Schema from a given AST .
This takes the ast of a schema document produced by the parse function in
src / language / parser . py .
If no schema definition is provided , then it will look for types named Query
and Mutation .
Given that AST it constructs a GraphQLSchema . The resulting schema has no
resolve methods , so execution will use default resolvers .
When building a schema from a GraphQL service ' s introspection result , it might
be safe to assume the schema is valid . Set ` assume _ valid ` to True to assume the
produced schema is valid . Set ` assume _ valid _ sdl ` to True to assume it is already
a valid SDL document ."""
|
if not isinstance ( document_ast , DocumentNode ) :
raise TypeError ( "Must provide a Document AST." )
if not ( assume_valid or assume_valid_sdl ) :
from . . validation . validate import assert_valid_sdl
assert_valid_sdl ( document_ast )
schema_def : Optional [ SchemaDefinitionNode ] = None
type_defs : List [ TypeDefinitionNode ] = [ ]
directive_defs : List [ DirectiveDefinitionNode ] = [ ]
append_directive_def = directive_defs . append
for def_ in document_ast . definitions :
if isinstance ( def_ , SchemaDefinitionNode ) :
schema_def = def_
elif isinstance ( def_ , TypeDefinitionNode ) :
def_ = cast ( TypeDefinitionNode , def_ )
type_defs . append ( def_ )
elif isinstance ( def_ , DirectiveDefinitionNode ) :
append_directive_def ( def_ )
def resolve_type ( type_name : str ) -> GraphQLNamedType :
type_ = type_map . get ( type_name )
if not type :
raise TypeError ( f"Type '{type_name}' not found in document." )
return type_
ast_builder = ASTDefinitionBuilder ( assume_valid = assume_valid , resolve_type = resolve_type )
type_map = { node . name . value : ast_builder . build_type ( node ) for node in type_defs }
if schema_def :
operation_types = get_operation_types ( schema_def )
else :
operation_types = { OperationType . QUERY : "Query" , OperationType . MUTATION : "Mutation" , OperationType . SUBSCRIPTION : "Subscription" , }
directives = [ ast_builder . build_directive ( directive_def ) for directive_def in directive_defs ]
# If specified directives were not explicitly declared , add them .
if not any ( directive . name == "skip" for directive in directives ) :
directives . append ( GraphQLSkipDirective )
if not any ( directive . name == "include" for directive in directives ) :
directives . append ( GraphQLIncludeDirective )
if not any ( directive . name == "deprecated" for directive in directives ) :
directives . append ( GraphQLDeprecatedDirective )
query_type = operation_types . get ( OperationType . QUERY )
mutation_type = operation_types . get ( OperationType . MUTATION )
subscription_type = operation_types . get ( OperationType . SUBSCRIPTION )
return GraphQLSchema ( # Note : While this could make early assertions to get the correctly
# typed values below , that would throw immediately while type system
# validation with ` validate _ schema ( ) ` will produce more actionable results .
query = cast ( GraphQLObjectType , type_map . get ( query_type ) ) if query_type else None , mutation = cast ( GraphQLObjectType , type_map . get ( mutation_type ) ) if mutation_type else None , subscription = cast ( GraphQLObjectType , type_map . get ( subscription_type ) ) if subscription_type else None , types = list ( type_map . values ( ) ) , directives = directives , ast_node = schema_def , assume_valid = assume_valid , )
|
def validate ( self , value , model_instance ) :
"""Check value is a valid JSON string , raise ValidationError on
error ."""
|
if isinstance ( value , six . string_types ) :
super ( JSONField , self ) . validate ( value , model_instance )
try :
json . loads ( value )
except Exception as err :
raise ValidationError ( str ( err ) )
|
def _storestr ( ins ) :
"""Stores a string value into a memory address .
It copies content of 2nd operand ( string ) , into 1st , reallocating
dynamic memory for the 1st str . These instruction DOES ALLOW
inmediate strings for the 2nd parameter , starting with ' # ' .
Must prepend ' # ' ( immediate sigil ) to 1st operand , as we need
the & address of the destination ."""
|
op1 = ins . quad [ 1 ]
indirect = op1 [ 0 ] == '*'
if indirect :
op1 = op1 [ 1 : ]
immediate = op1 [ 0 ] == '#'
if immediate and not indirect :
raise InvalidIC ( 'storestr does not allow immediate destination' , ins . quad )
if not indirect :
op1 = '#' + op1
tmp1 , tmp2 , output = _str_oper ( op1 , ins . quad [ 2 ] , no_exaf = True )
if not tmp2 :
output . append ( 'call __STORE_STR' )
REQUIRES . add ( 'storestr.asm' )
else :
output . append ( 'call __STORE_STR2' )
REQUIRES . add ( 'storestr2.asm' )
return output
|
def _compute_mean ( self , C , f0 , f1 , f2 , SC , mag , rrup , idxs , mean , scale_fac ) :
"""Compute mean value ( for a set of indexes ) without site amplification
terms . This is equation ( 5 ) , p . 2191 , without S term ."""
|
mean [ idxs ] = ( C [ 'c1' ] + C [ 'c2' ] * mag + C [ 'c3' ] * ( mag ** 2 ) + ( C [ 'c4' ] + C [ 'c5' ] * mag ) * f1 [ idxs ] + ( C [ 'c6' ] + C [ 'c7' ] * mag ) * f2 [ idxs ] + ( C [ 'c8' ] + C [ 'c9' ] * mag ) * f0 [ idxs ] + C [ 'c10' ] * rrup [ idxs ] + self . _compute_stress_drop_adjustment ( SC , mag , scale_fac ) )
|
def get_qemus ( self ) :
"""Get the maximum ID of the Qemu VMs
: return : Maximum Qemu VM ID
: rtype : int"""
|
qemu_vm_list = [ ]
qemu_vm_max = None
for node in self . nodes :
if node [ 'type' ] == 'QemuVM' :
qemu_vm_list . append ( node [ 'qemu_id' ] )
if len ( qemu_vm_list ) > 0 :
qemu_vm_max = max ( qemu_vm_list )
return qemu_vm_max
|
def delete_custom_view ( auth , url , name ) :
"""function takes input of auth , url , and name and issues a RESTFUL call to delete a specific of custom views from HPE
IMC .
: param name : string containg the name of the desired custom view
: param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class
: param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass
: return : str of creation results ( " view " + name + " created successfully "
: rtype : str
> > > from pyhpeimc . auth import *
> > > from pyhpeimc . plat . groups import *
> > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " )
> > > delete _ custom _ view ( auth . creds , auth . url , name = " L1 View " )
' View L1 View deleted successfully '
> > > view _ 1 = get _ custom _ views ( auth . creds , auth . url , name = ' L1 View ' )
> > > assert view _ 1 = = None
> > > delete _ custom _ view ( auth . creds , auth . url , name = " L2 View " )
' View L2 View deleted successfully '
> > > view _ 2 = get _ custom _ views ( auth . creds , auth . url , name = ' L2 View ' )
> > > assert view _ 2 = = None"""
|
view_id = get_custom_views ( auth , url , name ) [ 0 ] [ 'symbolId' ]
delete_custom_view_url = '/imcrs/plat/res/view/custom/' + str ( view_id )
f_url = url + delete_custom_view_url
r = requests . delete ( f_url , auth = auth , headers = HEADERS )
# creates the URL using the payload variable as the contents
try :
if r . status_code == 204 :
return 'View ' + name + ' deleted successfully'
except requests . exceptions . RequestException as e :
return "Error:\n" + str ( e ) + ' delete_custom_view: An Error has occured'
|
def iso_abundMulti ( self , cyclist , stable = False , amass_range = None , mass_range = None , ylim = [ 0 , 0 ] , ref = - 1 , decayed = False , include_title = False , title = None , pdf = False , color_plot = True , grid = False , point_set = 1 ) :
'''Method that plots figures and saves those figures to a . png
file . Plots a figure for each cycle in the argument cycle .
Can be called via iso _ abund method by passing a list to cycle .
Parameters
cycllist : list
The cycles of interest . This method will do a plot for
each cycle and save them to a file .
stable : boolean , optional
A boolean of whether to filter out the unstables . The
defaults is False .
amass _ range : list , optional
A 1x2 array containing the lower and upper atomic mass
range . If None plot entire available atomic mass range .
The default is None .
mass _ range : list , optional
A 1x2 array containing the lower and upper mass range . If
this is an instance of abu _ vector this will only plot
isotopes that have an atominc mass within this range . This
will throw an error if this range does not make sense ie
[45,2 ] . If None , it will plot over the entire range . The
defaults is None .
ylim : list , optional
A 1x2 array containing the lower and upper Y limits . If
it is [ 0,0 ] , then ylim will be determined automatically .
The default is [ 0,0 ] .
ref : integer or list , optional
reference cycle . If it is not - 1 , this method will plot
the abundences of cycle devided by the cycle of the same
instance given in the ref variable . If ref is a list it
will be interpreted to have two elements :
ref = [ ' dir / of / ref / run ' , cycle ] which uses a refernece cycle
from another run . If any abundence in the reference cycle
is zero , it will replace it with 1e - 99 . The default is - 1.
decayed : boolean , optional
If True plot decayed distributions , else plot life
distribution . The default is False .
include _ title : boolean , optional
Include a title with the plot . The default is False .
title : string , optional
A title to include with the plot . The default is None .
pdf : boolean , optional
Save image as a [ pdf / png ] . The default is False .
color _ plot : boolean , optional
Color dots and lines [ True / False ] . The default is True .
grid : boolean , optional
print grid . The default is False .
point _ set : integer , optional
Set to 0 , 1 or 2 to select one of three point sets , useful
for multiple abundances or ratios in one plot . The defalult
is 1.'''
|
max_num = max ( cyclist )
for i in range ( len ( cyclist ) ) :
self . iso_abund ( cyclist [ i ] , stable , amass_range , mass_range , ylim , ref , decayed = decayed , show = False , color_plot = color_plot , grid = False , point_set = 1 , include_title = include_title )
if title != None :
pl . title ( title )
else :
name = 'IsoAbund'
number_str = _padding_model_number ( cyclist [ i ] , max_num )
if not pdf :
pl . savefig ( name + number_str + '.png' , dpi = 200 )
else :
pl . savefig ( name + number_str + '.pdf' , dpi = 200 )
pl . clf ( )
return None
|
def _compile_bus_injection ( self ) :
"""Impose injections on buses"""
|
string = '"""\n'
for device , series in zip ( self . devices , self . series ) :
if series :
string += 'system.' + device + '.gcall(system.dae)\n'
string += '\n'
string += 'system.dae.reset_small_g()\n'
string += self . gisland
string += '"""'
self . bus_injection = compile ( eval ( string ) , '' , 'exec' )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.