signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def cc ( project , detect_project = False ) :
"""Return a clang that hides CFLAGS and LDFLAGS .
This will generate a wrapper script in the current directory
and return a complete plumbum command to it .
Args :
cflags : The CFLAGS we want to hide .
ldflags : The LDFLAGS we want to hide .
func ( optional ) : A function that will be pickled alongside the compiler .
It will be called before the actual compilation took place . This
way you can intercept the compilation process with arbitrary python
code .
Returns ( benchbuild . utils . cmd ) :
Path to the new clang command ."""
|
from benchbuild . utils import cmd
cc_name = str ( CFG [ "compiler" ] [ "c" ] )
wrap_cc ( cc_name , compiler ( cc_name ) , project , detect_project = detect_project )
return cmd [ "./{}" . format ( cc_name ) ]
|
def records ( self , sc , group_by = 'greedy' , limit = None , sample = 1 , seed = 42 , decode = None , summaries = None ) :
"""Retrieve the elements of a Dataset
: param sc : a SparkContext object
: param group _ by : specifies a partition strategy for the objects
: param limit : maximum number of objects to retrieve
: param decode : an optional transformation to apply to the objects retrieved
: param sample : percentage of results to return . Useful to return a sample
of the dataset . This parameter is ignored when ` limit ` is set .
: param seed : initialize internal state of the random number generator ( 42 by default ) .
This is used to make the dataset sampling reproducible . It can be set to None to obtain
different samples .
: param summaries : an iterable containing a summary for each item in the dataset . If None ,
it will computed calling the summaries dataset .
: return : a Spark rdd containing the elements retrieved"""
|
decode = decode or message_parser . parse_heka_message
summaries = summaries or self . summaries ( sc , limit )
# Calculate the sample if summaries is not empty and limit is not set
if summaries and limit is None and sample != 1 :
if sample < 0 or sample > 1 :
raise ValueError ( 'sample must be between 0 and 1' )
print ( "WARNING: THIS IS NOT A REPRESENTATIVE SAMPLE.\n" "This 'sampling' is based on s3 files and is highly\n" "susceptible to skew. Use only for quicker performance\n" "while prototyping." )
# We want this sample to be reproducible .
# See https : / / bugzilla . mozilla . org / show _ bug . cgi ? id = 1318681
seed_state = random . getstate ( )
try :
random . seed ( seed )
summaries = random . sample ( summaries , int ( len ( summaries ) * sample ) )
finally :
random . setstate ( seed_state )
# Obtain size in MB
total_size = functools . reduce ( lambda acc , item : acc + item [ 'size' ] , summaries , 0 )
total_size_mb = total_size / float ( 1 << 20 )
print ( "fetching %.5fMB in %s files..." % ( total_size_mb , len ( summaries ) ) )
if group_by == 'equal_size' :
groups = _group_by_equal_size ( summaries , 10 * sc . defaultParallelism )
elif group_by == 'greedy' :
groups = _group_by_size_greedy ( summaries , 10 * sc . defaultParallelism )
else :
raise Exception ( "group_by specification is invalid" )
self . _compile_selection ( )
keys = ( sc . parallelize ( groups , len ( groups ) ) . flatMap ( lambda x : x ) . map ( lambda x : x [ 'key' ] ) )
file_handles = keys . map ( self . store . get_key )
# decode ( fp : file - object ) - > list [ dict ]
data = file_handles . flatMap ( decode )
return data . map ( self . _apply_selection )
|
def intervalSum ( self , a , b ) :
""": param int a b : with 1 < = a < = b
: returns : t [ a ] + . . . + t [ b ]"""
|
return self . prefixSum ( b ) - self . prefixSum ( a - 1 )
|
def mapCellsToPoints ( self ) :
"""Transform cell data ( i . e . , data specified per cell )
into point data ( i . e . , data specified at cell points ) .
The method of transformation is based on averaging the data values
of all cells using a particular point ."""
|
c2p = vtk . vtkCellDataToPointData ( )
c2p . SetInputData ( self . polydata ( False ) )
c2p . Update ( )
return self . updateMesh ( c2p . GetOutput ( ) )
|
def _make_digest ( k , ** kwargs ) :
"""Creates a digest suitable for use within an : class : ` phyles . FSCache `
object from the key object ` k ` .
> > > adict = { ' a ' : { ' b ' : 1 } , ' f ' : [ ] }
> > > make _ digest ( adict )
' a2VKynHgDrUIm17r6BQ5QcA5XVmqpNBmiKbZ9kTu0A '"""
|
result = list ( )
result_dict = _make_digest_dict ( k , ** kwargs )
if result_dict is None :
return 'default'
else :
for ( key , h ) in sorted ( result_dict . items ( ) ) :
result . append ( '{}_{}' . format ( key , h ) )
return '.' . join ( result )
|
def get_signatures_with_results ( vcs ) :
"""Returns the list of signatures for which test results are saved .
Args :
vcs ( easyci . vcs . base . Vcs )
Returns :
List [ str ]"""
|
results_dir = os . path . join ( vcs . private_dir ( ) , 'results' )
if not os . path . exists ( results_dir ) :
return [ ]
rel_paths = os . listdir ( results_dir )
return [ p for p in rel_paths if os . path . isdir ( os . path . join ( results_dir , p ) ) ]
|
def publish ( self ) :
"""Publishes the object .
The decorator ` assert _ draft ` makes sure that you cannot publish
a published object .
: param self : The object to tbe published .
: return : The published object ."""
|
if self . is_draft : # If the object has previously been linked then patch the
# placeholder data and remove the previously linked object .
# Otherwise set the published date .
if self . publishing_linked :
self . patch_placeholders ( )
# Unlink draft and published copies then delete published .
# NOTE : This indirect dance is necessary to avoid
# triggering unwanted MPTT tree structure updates via
# ` save ` .
type ( self . publishing_linked ) . objects . filter ( pk = self . publishing_linked . pk ) . delete ( )
# Instead of self . publishing _ linked . delete ( )
else :
self . publishing_published_at = timezone . now ( )
# Create a new object copying all fields .
publish_obj = deepcopy ( self )
# If any fields are defined not to copy set them to None .
for fld in self . publishing_publish_empty_fields + ( 'urlnode_ptr_id' , 'publishing_linked_id' ) :
setattr ( publish_obj , fld , None )
# Set the state of publication to published on the object .
publish_obj . publishing_is_draft = False
# Update Fluent ' s publishing status field mechanism to correspond
# to our own notion of publication , to help use work together more
# easily with Fluent Pages .
if isinstance ( self , UrlNode ) :
self . status = UrlNode . DRAFT
publish_obj . status = UrlNode . PUBLISHED
# Set the date the object should be published at .
publish_obj . publishing_published_at = self . publishing_published_at
# Perform per - model preparation before saving published copy
publish_obj . publishing_prepare_published_copy ( self )
# Save the new published object as a separate instance to self .
publish_obj . save ( )
# Sanity - check that we successfully saved the published copy
if not publish_obj . pk : # pragma : no cover
raise PublishingException ( "Failed to save published copy" )
# As it is a new object we need to clone each of the
# translatable fields , placeholders and required relations .
self . clone_parler_translations ( publish_obj )
self . clone_fluent_placeholders_and_content_items ( publish_obj )
self . clone_fluent_contentitems_m2m_relationships ( publish_obj )
# Extra relationship - cloning smarts
publish_obj . publishing_clone_relations ( self )
# Link the published object to the draft object .
self . publishing_linked = publish_obj
# Flag draft instance when it is being updated as part of a
# publish action , for use in ` publishing _ set _ update _ time `
self . _skip_update_publishing_modified_at = True
# Signal the pre - save hook for publication , save then signal
# the post publish hook .
publishing_signals . publishing_publish_pre_save_draft . send ( sender = type ( self ) , instance = self )
# Save the draft and its new relationship with the published copy
publishing_signals . publishing_publish_save_draft . send ( sender = type ( self ) , instance = self )
publishing_signals . publishing_post_publish . send ( sender = type ( self ) , instance = self )
return publish_obj
|
def detect_format ( self , candidates ) :
"""Detects the format of the fileset from a list of possible
candidates . If multiple candidates match the potential files , e . g .
NiFTI - X ( see dcm2niix ) and NiFTI , then the first matching candidate is
selected .
If a ' format _ name ' was specified when the fileset was
created then that is used to select between the candidates . Otherwise
the file extensions of the primary path and potential auxiliary files ,
or extensions of the files within the directory for directories are
matched against those specified for the file formats
Parameters
candidates : FileFormat
A list of file - formats to select from ."""
|
if self . _format is not None :
raise ArcanaFileFormatError ( "Format has already been set for {}" . format ( self ) )
matches = [ c for c in candidates if c . matches ( self ) ]
if not matches :
raise ArcanaFileFormatError ( "None of the candidate file formats ({}) match {}" . format ( ', ' . join ( str ( c ) for c in candidates ) , self ) )
return matches [ 0 ]
|
def decode_offset_response ( cls , response ) :
"""Decode OffsetResponse into OffsetResponsePayloads
Arguments :
response : OffsetResponse
Returns : list of OffsetResponsePayloads"""
|
return [ kafka . structs . OffsetResponsePayload ( topic , partition , error , tuple ( offsets ) ) for topic , partitions in response . topics for partition , error , offsets in partitions ]
|
def watch_prefix ( self , key_prefix , ** kwargs ) :
"""The same as ` ` watch ` ` , but watches a range of keys with a prefix ."""
|
kwargs [ 'range_end' ] = _increment_last_byte ( key_prefix )
return self . watch ( key_prefix , ** kwargs )
|
def flash ( self , flash = True ) :
"""Turn on or off flashing of the device ' s LED for physical
identification purposes ."""
|
if flash :
action = canstat . kvLED_ACTION_ALL_LEDS_ON
else :
action = canstat . kvLED_ACTION_ALL_LEDS_OFF
try :
kvFlashLeds ( self . _read_handle , action , 30000 )
except ( CANLIBError , NotImplementedError ) as e :
log . error ( 'Could not flash LEDs (%s)' , e )
|
def init ( lang , domain ) :
"""Initialize translations for a language code ."""
|
translations_dir = _get_translations_dir ( )
domain = _get_translations_domain ( domain )
pot = os . path . join ( translations_dir , f'{domain}.pot' )
return _run ( f'init -i {pot} -d {translations_dir} -l {lang} --domain={domain}' )
|
def onBinaryMessage ( self , msg , fromClient ) :
data = bytearray ( )
data . extend ( msg )
"""self . print _ debug ( " message length : { } " . format ( len ( data ) ) )
self . print _ debug ( " message data : { } " . format ( hexlify ( data ) ) )"""
|
try :
self . queue . put_nowait ( data )
except asyncio . QueueFull :
pass
|
def _assign_numbers ( self ) :
"""Assign numbers in preparation for validating these receipts .
WARNING : Don ' t call the method manually unless you know what you ' re
doing !"""
|
first = self . select_related ( 'point_of_sales' , 'receipt_type' ) . first ( )
next_num = Receipt . objects . fetch_last_receipt_number ( first . point_of_sales , first . receipt_type , ) + 1
for receipt in self . filter ( receipt_number__isnull = True ) : # Atomically update receipt number
Receipt . objects . filter ( pk = receipt . id , receipt_number__isnull = True , ) . update ( receipt_number = next_num , )
next_num += 1
|
def non_fluents ( self ) -> Dict [ str , PVariable ] :
'''Returns non - fluent pvariables .'''
|
return { str ( pvar ) : pvar for pvar in self . pvariables if pvar . is_non_fluent ( ) }
|
def clone ( url , path = None , remove = True ) :
"""Clone a local repo from that URL to that path
If path is not given , then use the git default : same as repo name
If path is given and remove is True
then the path is removed before cloning
Because this is run from a script it is assumed that user should be Admin
so set config user values for the GitLab Admin"""
|
clean = True
if path and os . path . isdir ( path ) :
if not remove :
clean = False
else :
shutil . rmtree ( path )
if clean :
stdout = run ( 'clone %s %s' % ( url , path or '' ) )
into = stdout . splitlines ( ) [ 0 ] . split ( "'" ) [ 1 ]
path_to_clone = os . path . realpath ( into )
else :
path_to_clone = path
old_dir = _working_dirs [ 0 ]
_working_dirs [ 0 ] = path_to_clone
config ( 'user.name' , 'Release Script' )
config ( 'user.email' , 'gitlab@wwts.com' )
_working_dirs [ 0 ] = old_dir
return path_to_clone
|
def write ( self ) :
"""This actually runs the qvality program from PATH ."""
|
outfn = self . create_outfilepath ( self . fn , self . outsuffix )
command = [ 'qvality' ]
command . extend ( self . qvalityoptions )
command . extend ( [ self . scores [ 'target' ] [ 'fn' ] , self . scores [ 'decoy' ] [ 'fn' ] , '-o' , outfn ] )
subprocess . call ( command )
|
def remove_watcher ( self , issue , watcher ) :
"""Remove a user from an issue ' s watch list .
: param issue : ID or key of the issue affected
: param watcher : username of the user to remove from the watchers list
: rtype : Response"""
|
url = self . _get_url ( 'issue/' + str ( issue ) + '/watchers' )
params = { 'username' : watcher }
result = self . _session . delete ( url , params = params )
return result
|
def start ( self ) :
"""Start discovering and listing to connections ."""
|
if self . _state == CLOSED :
raise NSQException ( 'producer already closed' )
if self . is_running :
self . logger . warn ( 'producer already started' )
return
self . logger . debug ( 'starting producer...' )
self . _state = RUNNING
for address in self . nsqd_tcp_addresses :
address , port = address . split ( ':' )
self . connect_to_nsqd ( address , int ( port ) )
|
def static_get_type_attr ( t , name ) :
"""Get a type attribute statically , circumventing the descriptor protocol ."""
|
for type_ in t . mro ( ) :
try :
return vars ( type_ ) [ name ]
except KeyError :
pass
raise AttributeError ( name )
|
def _add_dictlist_to_database_via_load_in_file ( masterListIndex , log , dbTablename , dbSettings , dateModified = False ) :
"""* load a list of dictionaries into a database table with load data infile *
* * Key Arguments : * *
- ` ` masterListIndex ` ` - - the index of the sharedList of dictionary lists to process
- ` ` dbTablename ` ` - - the name of the database table to add the list to
- ` ` dbSettings ` ` - - the dictionary of database settings
- ` ` log ` ` - - logger
- ` ` dateModified ` ` - - add a dateModified stamp with an updated flag to rows ?
* * Return : * *
- None
* * Usage : * *
. . todo : :
add usage info
create a sublime snippet for usage
. . code - block : : python
usage code"""
|
log . debug ( 'starting the ``_add_dictlist_to_database_via_load_in_file`` function' )
global sharedList
dictList = sharedList [ masterListIndex ] [ 0 ]
count = sharedList [ masterListIndex ] [ 1 ]
if count > totalCount :
count = totalCount
ltotalCount = totalCount
# SETUP ALL DATABASE CONNECTIONS
dbConn = database ( log = log , dbSettings = dbSettings ) . connect ( )
now = datetime . now ( )
tmpTable = now . strftime ( "tmp_%Y%m%dt%H%M%S%f" )
# CREATE A TEMPORY TABLE TO ADD DATA TO
sqlQuery = """CREATE TEMPORARY TABLE %(tmpTable)s SELECT * FROM %(dbTablename)s WHERE 1=0;""" % locals ( )
writequery ( log = log , sqlQuery = sqlQuery , dbConn = dbConn )
csvColumns = [ k for d in dictList for k in d . keys ( ) ]
csvColumns = list ( set ( csvColumns ) )
csvColumnsString = ( ', ' ) . join ( csvColumns )
df = pd . DataFrame ( dictList )
df . replace ( [ 'nan' , 'None' , '' , 'NaN' , np . nan ] , '\\N' , inplace = True )
df . to_csv ( '/tmp/%(tmpTable)s' % locals ( ) , sep = "|" , index = False , escapechar = "\\" , quotechar = '"' , columns = csvColumns , encoding = 'utf-8' )
sqlQuery = """LOAD DATA LOCAL INFILE '/tmp/%(tmpTable)s'
INTO TABLE %(tmpTable)s
FIELDS TERMINATED BY '|' OPTIONALLY ENCLOSED BY '"'
IGNORE 1 LINES
(%(csvColumnsString)s);""" % locals ( )
writequery ( log = log , sqlQuery = sqlQuery , dbConn = dbConn )
updateStatement = ""
for i in csvColumns :
updateStatement += "`%(i)s` = VALUES(`%(i)s`), " % locals ( )
if dateModified :
updateStatement += "dateLastModified = NOW(), updated = 1"
else :
updateStatement = updateStatement [ 0 : - 2 ]
sqlQuery = """
INSERT IGNORE INTO %(dbTablename)s
SELECT * FROM %(tmpTable)s
ON DUPLICATE KEY UPDATE %(updateStatement)s;""" % locals ( )
writequery ( log = log , sqlQuery = sqlQuery , dbConn = dbConn )
sqlQuery = """DROP TEMPORARY TABLE %(tmpTable)s;""" % locals ( )
writequery ( log = log , sqlQuery = sqlQuery , dbConn = dbConn )
try :
os . remove ( '/tmp/%(tmpTable)s' % locals ( ) )
except :
pass
log . debug ( 'completed the ``_add_dictlist_to_database_via_load_in_file`` function' )
return None
|
def find_bounding_indices ( arr , values , axis , from_below = True ) :
"""Find the indices surrounding the values within arr along axis .
Returns a set of above , below , good . Above and below are lists of arrays of indices .
These lists are formulated such that they can be used directly to index into a numpy
array and get the expected results ( no extra slices or ellipsis necessary ) . ` good ` is
a boolean array indicating the " columns " that actually had values to bound the desired
value ( s ) .
Parameters
arr : array - like
Array to search for values
values : array - like
One or more values to search for in ` arr `
axis : int
The dimension of ` arr ` along which to search .
from _ below : bool , optional
Whether to search from " below " ( i . e . low indices to high indices ) . If ` False ` ,
the search will instead proceed from high indices to low indices . Defaults to ` True ` .
Returns
above : list of arrays
List of broadcasted indices to the location above the desired value
below : list of arrays
List of broadcasted indices to the location below the desired value
good : array
Boolean array indicating where the search found proper bounds for the desired value"""
|
# The shape of generated indices is the same as the input , but with the axis of interest
# replaced by the number of values to search for .
indices_shape = list ( arr . shape )
indices_shape [ axis ] = len ( values )
# Storage for the found indices and the mask for good locations
indices = np . empty ( indices_shape , dtype = np . int )
good = np . empty ( indices_shape , dtype = np . bool )
# Used to put the output in the proper location
store_slice = [ slice ( None ) ] * arr . ndim
# Loop over all of the values and for each , see where the value would be found from a
# linear search
for level_index , value in enumerate ( values ) : # Look for changes in the value of the test for < = value in consecutive points
# Taking abs ( ) because we only care if there is a flip , not which direction .
switches = np . abs ( np . diff ( ( arr <= value ) . astype ( np . int ) , axis = axis ) )
# Good points are those where it ' s not just 0 ' s along the whole axis
good_search = np . any ( switches , axis = axis )
if from_below : # Look for the first switch ; need to add 1 to the index since argmax is giving the
# index within the difference array , which is one smaller .
index = switches . argmax ( axis = axis ) + 1
else : # Generate a list of slices to reverse the axis of interest so that searching from
# 0 to N is starting at the " top " of the axis .
arr_slice = [ slice ( None ) ] * arr . ndim
arr_slice [ axis ] = slice ( None , None , - 1 )
# Same as above , but we use the slice to come from the end ; then adjust those
# indices to measure from the front .
index = arr . shape [ axis ] - 1 - switches [ tuple ( arr_slice ) ] . argmax ( axis = axis )
# Set all indices where the results are not good to 0
index [ ~ good_search ] = 0
# Put the results in the proper slice
store_slice [ axis ] = level_index
indices [ tuple ( store_slice ) ] = index
good [ tuple ( store_slice ) ] = good_search
# Create index values for broadcasting arrays
above = broadcast_indices ( arr , indices , arr . ndim , axis )
below = broadcast_indices ( arr , indices - 1 , arr . ndim , axis )
return above , below , good
|
def crt_prf_ftr_tc ( aryMdlRsp , aryTmpExpInf , varNumVol , varTr , varTmpOvsmpl , switchHrfSet , tplPngSize , varPar , dctPrm = None , lgcPrint = True ) :
"""Create all spatial x feature prf time courses .
Parameters
aryMdlRsp : 2d numpy array , shape [ n _ x _ pos * n _ y _ pos * n _ sd , n _ cond ]
Responses of 2D Gauss models to spatial conditions
aryTmpExpInf : 2d numpy array , shape [ unknown , 4]
Temporal information about conditions
varNumVol : float , positive
Number of volumes of the ( fMRI ) data .
varTr : float , positive
Time to repeat ( TR ) of the ( fMRI ) experiment .
varTmpOvsmpl : int , positive
Factor by which the data hs been temporally upsampled .
switchHrfSet : int , ( 1 , 2 , 3)
Switch to determine which hrf basis functions are used
tplPngSize : tuple
Pixel dimensions of the visual space ( width , height ) .
varPar : int , positive
Description of input 1.
dctPrm : dictionary , default None
Dictionary with customized hrf parameters . If this is None , default
hrf parameters will be used .
lgcPrint : boolean , default True
Should print messages be sent to user ?
Returns
aryNrlTcConv : 3d numpy array ,
shape [ nr of models , nr of unique feautures , varNumVol ]
Prf time course models"""
|
# Identify number of unique features
vecFeat = np . unique ( aryTmpExpInf [ : , 3 ] )
vecFeat = vecFeat [ np . nonzero ( vecFeat ) [ 0 ] ]
# Preallocate the output array
aryPrfTc = np . zeros ( ( aryMdlRsp . shape [ 0 ] , 0 , varNumVol ) , dtype = np . float32 )
# Loop over unique features
for indFtr , ftr in enumerate ( vecFeat ) :
if lgcPrint :
print ( '---------Create prf time course model for feature ' + str ( ftr ) )
# Derive sptial conditions , onsets and durations for this specific
# feature
aryTmpCnd = aryTmpExpInf [ aryTmpExpInf [ : , 3 ] == ftr , 0 ]
aryTmpOns = aryTmpExpInf [ aryTmpExpInf [ : , 3 ] == ftr , 1 ]
aryTmpDrt = aryTmpExpInf [ aryTmpExpInf [ : , 3 ] == ftr , 2 ]
# Create temporally upsampled neural time courses .
aryNrlTcTmp = crt_nrl_tc ( aryMdlRsp , aryTmpCnd , aryTmpOns , aryTmpDrt , varTr , varNumVol , varTmpOvsmpl , lgcPrint = lgcPrint )
# Convolve with hrf to create model pRF time courses .
aryPrfTcTmp = crt_prf_tc ( aryNrlTcTmp , varNumVol , varTr , varTmpOvsmpl , switchHrfSet , tplPngSize , varPar , dctPrm = dctPrm , lgcPrint = lgcPrint )
# Add temporal time course to time course that will be returned
aryPrfTc = np . concatenate ( ( aryPrfTc , aryPrfTcTmp ) , axis = 1 )
return aryPrfTc
|
def xlim_as_gps ( func ) :
"""Wrap ` ` func ` ` to handle pass limit inputs through ` gwpy . time . to _ gps `"""
|
@ wraps ( func )
def wrapped_func ( self , left = None , right = None , ** kw ) :
if right is None and numpy . iterable ( left ) :
left , right = left
kw [ 'left' ] = left
kw [ 'right' ] = right
gpsscale = self . get_xscale ( ) in GPS_SCALES
for key in ( 'left' , 'right' ) :
if gpsscale :
try :
kw [ key ] = numpy . longdouble ( str ( to_gps ( kw [ key ] ) ) )
except TypeError :
pass
return func ( self , ** kw )
return wrapped_func
|
def get_dataset ( self , key , info ) :
"""Load a dataset"""
|
logger . debug ( 'Reading %s.' , key . name )
try :
variable = self . nc [ info [ 'file_key' ] ]
except KeyError :
return
info . update ( variable . attrs )
variable . attrs = info
return variable
|
def parse_version ( str_ ) :
"""Parses the program ' s version from a python variable declaration ."""
|
v = re . findall ( r"\d+.\d+.\d+" , str_ )
if v :
return v [ 0 ]
else :
print ( "cannot parse string {}" . format ( str_ ) )
raise KeyError
|
def grad_numerical ( self , x , func , epsilon = None ) :
"""symmetric gradient"""
|
eps = 1e-8 * ( 1 + abs ( x ) ) if epsilon is None else epsilon
grad = np . zeros ( len ( x ) )
ei = np . zeros ( len ( x ) )
# float is 1.6 times faster than int
for i in rglen ( x ) :
ei [ i ] = eps [ i ]
grad [ i ] = ( func ( x + ei ) - func ( x - ei ) ) / ( 2 * eps [ i ] )
ei [ i ] = 0
return grad
|
async def getFile ( self , file_id ) :
"""See : https : / / core . telegram . org / bots / api # getfile"""
|
p = _strip ( locals ( ) )
return await self . _api_request ( 'getFile' , _rectify ( p ) )
|
def _rebind_variables ( self , new_inputs ) :
"""Return self . _ expr with all variables rebound to the indices implied by
new _ inputs ."""
|
expr = self . _expr
# If we have 11 + variables , some of our variable names may be
# substrings of other variable names . For example , we might have x _ 1,
# x _ 10 , and x _ 100 . By enumerating in reverse order , we ensure that
# every variable name which is a substring of another variable name is
# processed after the variable of which it is a substring . This
# guarantees that the substitution of any given variable index only
# ever affects exactly its own index . For example , if we have variables
# with indices going up to 100 , we will process all of the x _ 1xx names
# before x _ 1x , which will be before x _ 1 , so the substitution of x _ 1
# will not affect x _ 1x , which will not affect x _ 1xx .
for idx , input_ in reversed ( list ( enumerate ( self . inputs ) ) ) :
old_varname = "x_%d" % idx
# Temporarily rebind to x _ temp _ N so that we don ' t overwrite the
# same value multiple times .
temp_new_varname = "x_temp_%d" % new_inputs . index ( input_ )
expr = expr . replace ( old_varname , temp_new_varname )
# Clear out the temp variables now that we ' ve finished iteration .
return expr . replace ( "_temp_" , "_" )
|
def sorted ( self , by , ** kwargs ) :
"""Sort array by a column .
Parameters
by : str
Name of the columns to sort by ( e . g . ' time ' ) ."""
|
sort_idc = np . argsort ( self [ by ] , ** kwargs )
return self . __class__ ( self [ sort_idc ] , h5loc = self . h5loc , split_h5 = self . split_h5 , name = self . name )
|
def copy_fs_if_newer ( src_fs , # type : Union [ FS , Text ]
dst_fs , # type : Union [ FS , Text ]
walker = None , # type : Optional [ Walker ]
on_copy = None , # type : Optional [ _ OnCopy ]
workers = 0 , # type : int
) : # type : ( . . . ) - > None
"""Copy the contents of one filesystem to another , checking times .
If both source and destination files exist , the copy is executed
only if the source file is newer than the destination file . In case
modification times of source or destination files are not available ,
copy file is always executed .
Arguments :
src _ fs ( FS or str ) : Source filesystem ( URL or instance ) .
dst _ fs ( FS or str ) : Destination filesystem ( URL or instance ) .
walker ( ~ fs . walk . Walker , optional ) : A walker object that will be
used to scan for files in ` ` src _ fs ` ` . Set this if you only want
to consider a sub - set of the resources in ` ` src _ fs ` ` .
on _ copy ( callable ) : A function callback called after a single file copy
is executed . Expected signature is ` ` ( src _ fs , src _ path , dst _ fs ,
dst _ path ) ` ` .
workers ( int ) : Use ` ` worker ` ` threads to copy data , or ` ` 0 ` ` ( default ) for
a single - threaded copy ."""
|
return copy_dir_if_newer ( src_fs , "/" , dst_fs , "/" , walker = walker , on_copy = on_copy , workers = workers )
|
def get_subgraph ( self , starting_node , block_addresses ) :
"""Get a sub - graph out of a bunch of basic block addresses .
: param CFGNode starting _ node : The beginning of the subgraph
: param iterable block _ addresses : A collection of block addresses that should be included in the subgraph if
there is a path between ` starting _ node ` and a CFGNode with the specified
address , and all nodes on the path should also be included in the subgraph .
: return : A new CFG that only contain the specific subgraph .
: rtype : CFGEmulated"""
|
graph = networkx . DiGraph ( )
if starting_node not in self . graph :
raise AngrCFGError ( 'get_subgraph(): the specified "starting_node" %s does not exist in the current CFG.' % starting_node )
addr_set = set ( block_addresses )
graph . add_node ( starting_node )
queue = [ starting_node ]
while queue :
node = queue . pop ( )
for _ , dst , data in self . graph . out_edges ( [ node ] , data = True ) :
if dst not in graph and dst . addr in addr_set :
graph . add_edge ( node , dst , ** data )
queue . append ( dst )
cfg = self . copy ( )
cfg . _graph = graph
cfg . _starts = ( starting_node . addr , )
return cfg
|
def FanOut ( self , obj , parent = None ) :
"""Expand values from various attribute types .
Strings are returned as is .
Dictionaries are returned with a key string , and an expanded set of values .
Other iterables are expanded until they flatten out .
Other items are returned in string format .
Args :
obj : The object to expand out .
parent : The parent object : Used to short - circuit infinite recursion .
Returns :
a list of expanded values as strings ."""
|
# Catch cases where RDFs are iterable but return themselves .
if parent and obj == parent :
results = [ utils . SmartUnicode ( obj ) . strip ( ) ]
elif isinstance ( obj , ( string_types , rdf_structs . EnumNamedValue ) ) :
results = [ utils . SmartUnicode ( obj ) . strip ( ) ]
elif isinstance ( obj , rdf_protodict . DataBlob ) :
results = self . FanOut ( obj . GetValue ( ) )
elif isinstance ( obj , ( collections . Mapping , rdf_protodict . Dict ) ) :
results = [ ]
# rdf _ protodict . Dict only has items , not iteritems .
for k , v in iteritems ( obj ) :
expanded_v = [ utils . SmartUnicode ( r ) for r in self . FanOut ( v ) ]
results . append ( "%s:%s" % ( utils . SmartUnicode ( k ) , "," . join ( expanded_v ) ) )
elif isinstance ( obj , ( collections . Iterable , rdf_structs . RepeatedFieldHelper ) ) :
results = [ ]
for rslt in [ self . FanOut ( o , obj ) for o in obj ] :
results . extend ( rslt )
else :
results = [ utils . SmartUnicode ( obj ) . strip ( ) ]
return results
|
def append ( self , value ) :
"""Add an item to the end of the list ."""
|
return super ( Collection , self ) . append ( self . _ensure_value_is_valid ( value ) )
|
def is_type ( type_ , * p ) :
"""True if all args have the same type"""
|
try :
for i in p :
if i . type_ != type_ :
return False
return True
except :
pass
return False
|
def edges_between_two_vertices ( self , vertex1 , vertex2 , keys = False ) :
"""Iterates over edges between two supplied vertices in current : class : ` BreakpointGraph `
Proxies a call to : meth : ` Breakpoint . _ Breakpoint _ _ edges _ between _ two _ vertices ` method .
: param vertex1 : a first vertex out of two , edges of interest are incident to
: type vertex1 : any hashable object , : class : ` bg . vertex . BGVertex ` is expected
: param vertex2 : a second vertex out of two , edges of interest are incident to
: type vertex2 : any hashable object , : class : ` bg . vertex . BGVertex ` is expected
: param keys : a flag to indicate if information about unique edge ' s ids has to be returned alongside with edge
: type keys : ` ` Boolean ` `
: return : generator over edges ( tuples ` ` edge , edge _ id ` ` if keys specified ) between two supplied vertices in current : class : ` BreakpointGraph ` wrapped in : class : ` bg . vertex . BGVertex `
: rtype : ` ` generator ` `"""
|
for entry in self . __edges_between_two_vertices ( vertex1 = vertex1 , vertex2 = vertex2 , keys = keys ) :
yield entry
|
def run ( ) :
"""Installs required development dependencies . Uses git to checkout other
modularcrypto repos for more accurate coverage data ."""
|
deps_dir = os . path . join ( build_root , 'modularcrypto-deps' )
if os . path . exists ( deps_dir ) :
shutil . rmtree ( deps_dir , ignore_errors = True )
os . mkdir ( deps_dir )
try :
print ( "Staging ci dependencies" )
_stage_requirements ( deps_dir , os . path . join ( package_root , 'requires' , 'ci' ) )
print ( "Checking out modularcrypto packages for coverage" )
for other_package in other_packages :
pkg_url = 'https://github.com/wbond/%s.git' % other_package
pkg_dir = os . path . join ( build_root , other_package )
if os . path . exists ( pkg_dir ) :
print ( "%s is already present" % other_package )
continue
print ( "Cloning %s" % pkg_url )
_execute ( [ 'git' , 'clone' , pkg_url ] , build_root )
print ( )
except ( Exception ) :
if os . path . exists ( deps_dir ) :
shutil . rmtree ( deps_dir , ignore_errors = True )
raise
return True
|
def importGurobiSolution ( self , grbmodel ) :
"""Import the solution from a gurobipy . Model object .
Args :
grbmodel : A : class : ` gurobipy . Model ` object with the model solved ."""
|
self . eval ( '' . join ( 'let {} := {};' . format ( var . VarName , var . X ) for var in grbmodel . getVars ( ) if '$' not in var . VarName ) )
|
def i4_sobol_generate ( dim_num , n , skip = 1 ) :
"""i4 _ sobol _ generate generates a Sobol dataset .
Parameters :
Input , integer dim _ num , the spatial dimension .
Input , integer N , the number of points to generate .
Input , integer SKIP , the number of initial points to skip .
Output , real R ( M , N ) , the points ."""
|
r = np . full ( ( n , dim_num ) , np . nan )
for j in range ( n ) :
seed = j + skip
r [ j , 0 : dim_num ] , next_seed = i4_sobol ( dim_num , seed )
return r
|
def segments ( self ) :
"""Return a list of ordered tuple objects , representing contiguous occupied data addresses .
Each tuple has a length of two and follows the semantics of the range and xrange objects .
The second entry of the tuple is always an integer greater than the first entry ."""
|
addresses = self . addresses ( )
if not addresses :
return [ ]
elif len ( addresses ) == 1 :
return ( [ ( addresses [ 0 ] , addresses [ 0 ] + 1 ) ] )
adjacent_differences = [ ( b - a ) for ( a , b ) in zip ( addresses [ : - 1 ] , addresses [ 1 : ] ) ]
breaks = [ i for ( i , x ) in enumerate ( adjacent_differences ) if x > 1 ]
endings = [ addresses [ b ] for b in breaks ]
endings . append ( addresses [ - 1 ] )
beginings = [ addresses [ b + 1 ] for b in breaks ]
beginings . insert ( 0 , addresses [ 0 ] )
return [ ( a , b + 1 ) for ( a , b ) in zip ( beginings , endings ) ]
|
def server_by_name ( self , name ) :
'''Find a server by its name'''
|
return self . server_show_libcloud ( self . server_list ( ) . get ( name , { } ) . get ( 'id' , '' ) )
|
def remove_users ( self , user_ids , nid = None ) :
"""Remove users from a network ` nid `
: type user _ ids : list of str
: param user _ ids : a list of user ids . These are the same
ids that are returned by get _ all _ users .
: type nid : str
: param nid : This is the ID of the network to remove students
from . This is optional and only to override the existing
` network _ id ` entered when created the class
: returns : Python object containing returned data , a list
of dicts of user data of all of the users remaining in
the network after users are removed ."""
|
r = self . request ( method = "network.update" , data = { "remove_users" : user_ids } , nid = nid , nid_key = "id" )
return self . _handle_error ( r , "Could not remove users." )
|
def ReadVarInt ( self , max = sys . maxsize ) :
"""Read a variable length integer from the stream .
The NEO network protocol supports encoded storage for space saving . See : http : / / docs . neo . org / en - us / node / network - protocol . html # convention
Args :
max ( int ) : ( Optional ) maximum number of bytes to read .
Returns :
int :"""
|
fb = self . ReadByte ( )
if fb is 0 :
return fb
value = 0
if hex ( fb ) == '0xfd' :
value = self . ReadUInt16 ( )
elif hex ( fb ) == '0xfe' :
value = self . ReadUInt32 ( )
elif hex ( fb ) == '0xff' :
value = self . ReadUInt64 ( )
else :
value = fb
if value > max :
raise Exception ( "Invalid format" )
return int ( value )
|
def _find_matching_collections_externally ( collections , record ) :
"""Find matching collections with percolator engine .
: param collections : set of collections where search
: param record : record to match"""
|
index , doc_type = RecordIndexer ( ) . record_to_index ( record )
body = { "doc" : record . dumps ( ) }
results = current_search_client . percolate ( index = index , doc_type = doc_type , allow_no_indices = True , ignore_unavailable = True , body = body )
prefix_len = len ( 'collection-' )
for match in results [ 'matches' ] :
collection_name = match [ '_id' ]
if collection_name . startswith ( 'collection-' ) :
name = collection_name [ prefix_len : ]
if name in collections :
yield collections [ name ] [ 'ancestors' ]
raise StopIteration
|
def float ( self , item , default = None ) :
"""Return value of key as a float
: param item : key of value to transform
: param default : value to return if item does not exist
: return : float of value"""
|
try :
item = self . __getattr__ ( item )
except AttributeError as err :
if default is not None :
return default
raise err
return float ( item )
|
def find_binutils_libs ( self , libdir , lib_ext ) :
"""Find Binutils libraries ."""
|
bfd_expr = re . compile ( "(lib(?:bfd)|(?:opcodes))(.*?)\%s" % lib_ext )
libs = { }
for root , dirs , files in os . walk ( libdir ) :
for f in files :
m = bfd_expr . search ( f )
if m :
lib , version = m . groups ( )
fp = os . path . join ( root , f )
if version in libs :
libs [ version ] . append ( fp )
else :
libs [ version ] = [ fp , ]
# first , search for multiarch files .
# check if we found more than one version of the multiarch libs .
multiarch_libs = dict ( [ ( v , _l ) for v , _l in libs . items ( ) if v . find ( "multiarch" ) != - 1 ] )
if len ( multiarch_libs ) > 1 :
print "[W] Multiple binutils versions detected. Trying to build with default..."
return multiarch_libs . values ( ) [ 0 ]
if len ( multiarch_libs ) == 1 :
return multiarch_libs . values ( ) [ 0 ]
# or use the default libs , or . . none
return libs . get ( "" , [ ] )
|
def insert_loudest_triggers_option_group ( parser , coinc_options = True ) :
"""Add options to the optparser object for selecting templates in bins .
Parameters
parser : object
OptionParser instance ."""
|
opt_group = insert_bank_bins_option_group ( parser )
opt_group . title = "Options for finding loudest triggers."
if coinc_options :
opt_group . add_argument ( "--statmap-file" , default = None , help = "HDF format clustered coincident trigger " "result file." )
opt_group . add_argument ( "--statmap-group" , default = "foreground" , help = "Name of group in statmap file to " "get triggers." )
opt_group . add_argument ( "--sngl-trigger-files" , nargs = "+" , default = None , action = types . MultiDetOptionAction , help = "HDF format merged single detector " "trigger files." )
opt_group . add_argument ( "--veto-file" , default = None , help = "XML file with segment_definer and " "segment table." )
opt_group . add_argument ( "--veto-segment-name" , default = None , help = "Name of segment to use as veto in " "XML file's segment_definer table." )
opt_group . add_argument ( "--search-n-loudest" , type = int , default = None , help = "Number of triggers to search over." )
opt_group . add_argument ( "--n-loudest" , type = int , default = None , help = "Number of triggers to return in results." )
return opt_group
|
def cpp_checker ( code , working_directory ) :
"""Return checker ."""
|
return gcc_checker ( code , '.cpp' , [ os . getenv ( 'CXX' , 'g++' ) , '-std=c++0x' ] + INCLUDE_FLAGS , working_directory = working_directory )
|
def validate_book ( body ) :
'''This does not only accept / refuse a book . It also returns an ENHANCED
version of body , with ( mostly fts - related ) additional fields .
This function is idempotent .'''
|
if '_language' not in body :
raise ValueError ( 'language needed' )
if len ( body [ '_language' ] ) > 2 :
raise ValueError ( 'invalid language: %s' % body [ '_language' ] )
# remove old _ text _ * fields
for k in body . keys ( ) :
if k . startswith ( '_text' ) :
del ( body [ k ] )
allfields = collectStrings ( body )
body [ '_text_%s' % body [ '_language' ] ] = ' ' . join ( allfields )
return body
|
def logger ( self ) :
"""The logger for this class ."""
|
# This is internal / CPython only / etc
# It ' s also astonishingly faster than alternatives .
frame = sys . _getframe ( 1 )
file_name = frame . f_code . co_filename
module_name = _get_module ( file_name )
return logging . getLogger ( module_name )
|
def modname_source_to_target ( self , spec , modname , source ) :
"""Create a target file name from the input module name and its
source file name . The result should be a path relative to the
build _ dir , and this is derived directly from the modname with NO
implicit convers of path separators ( i . e . ' / ' or any other ) into
a system or OS specific form ( e . g . ' \\ ' ) . The rationale for
this choice is that there exists Node . js / JavaScript tools that
handle this internally and / or these paths and values are
directly exposed on the web and thus these separators must be
preserved .
If the specific implementation requires this to be done ,
implementations may override by wrapping the result of this
using os . path . normpath . For the generation of transpile write
targets , this will be done in _ generate _ transpile _ target .
Default is to append the module name with the filename _ suffix
assigned to this instance ( setup by setup _ filename _ suffix ) , iff
the provided source also end with this filename suffix .
However , certain tools have issues dealing with loader plugin
syntaxes showing up on the filesystem ( and certain filesystems
definitely do not like some of the characters ) , so the usage of
the loaderplugin registry assigned to the spec may be used for
lookup if available .
Called by generator method ` _ gen _ modname _ source _ target _ modpath ` ."""
|
loaderplugin_registry = spec . get ( CALMJS_LOADERPLUGIN_REGISTRY )
if '!' in modname and loaderplugin_registry :
handler = loaderplugin_registry . get ( modname )
if handler :
return handler . modname_source_to_target ( self , spec , modname , source )
if ( source . endswith ( self . filename_suffix ) and not modname . endswith ( self . filename_suffix ) ) :
return modname + self . filename_suffix
else : # assume that modname IS the filename
return modname
|
def compile ( self , compass ) :
"""Calls the compass script specified in the compass extension
with the paths provided by the config . rb ."""
|
try :
output = subprocess . check_output ( [ compass . compass_path , 'compile' , '-q' ] , cwd = self . base_dir )
os . utime ( self . dest , None )
compass . log . debug ( output )
except OSError , e :
if e . errno == errno . ENOENT :
compass . log . error ( "Compass could not be found in the PATH " + "and/or in the COMPASS_PATH setting! " + "Disabling compilation." )
compass . disabled = True
else :
raise e
|
def _compile_operation_rule ( self , rule , left , right , result_class ) :
"""Compile given operation rule , when possible for given compination of
operation operands ."""
|
# Make sure variables always have constant with correct datatype on the
# opposite side of operation .
if isinstance ( left , VariableRule ) and isinstance ( right , ( ConstantRule , ListRule ) ) :
return self . _cor_compile ( rule , left , right , result_class , clean_variable ( left . value ) , self . compilations_variable )
if isinstance ( right , VariableRule ) and isinstance ( left , ( ConstantRule , ListRule ) ) :
return self . _cor_compile ( rule , right , left , result_class , clean_variable ( right . value ) , self . compilations_variable )
# Make sure functions always have constant with correct datatype on the
# opposite side of operation .
if isinstance ( left , FunctionRule ) and isinstance ( right , ( ConstantRule , ListRule ) ) :
return self . _cor_compile ( rule , left , right , result_class , left . function , self . compilations_function )
if isinstance ( right , FunctionRule ) and isinstance ( left , ( ConstantRule , ListRule ) ) :
return self . _cor_compile ( rule , right , left , result_class , right . function , self . compilations_function )
# In all other cases just keep things the way they are .
return result_class ( rule . operation , left , right )
|
def get_token_async ( self , refresh = False ) :
"""Get an authentication token .
The token is cached in memcache , keyed by the scopes argument .
Uses a random token expiration headroom value generated in the constructor
to eliminate a burst of GET _ ACCESS _ TOKEN API requests .
Args :
refresh : If True , ignore a cached token ; default False .
Yields :
An authentication token . This token is guaranteed to be non - expired ."""
|
key = '%s,%s' % ( self . service_account_id , ',' . join ( self . scopes ) )
ts = yield _AE_TokenStorage_ . get_by_id_async ( key , use_cache = True , use_memcache = self . retry_params . memcache_access_token , use_datastore = self . retry_params . save_access_token )
if refresh or ts is None or ts . expires < ( time . time ( ) + self . expiration_headroom ) :
token , expires_at = yield self . make_token_async ( self . scopes , self . service_account_id )
timeout = int ( expires_at - time . time ( ) )
ts = _AE_TokenStorage_ ( id = key , token = token , expires = expires_at )
if timeout > 0 :
yield ts . put_async ( memcache_timeout = timeout , use_datastore = self . retry_params . save_access_token , force_writes = True , use_cache = True , use_memcache = self . retry_params . memcache_access_token )
raise ndb . Return ( ts . token )
|
def _sample_points ( X , centers , oversampling_factor , random_state ) :
r"""Sample points independently with probability
. . math : :
p _ x = \ frac { \ ell \ cdot d ^ 2 ( x , \ mathcal { C } ) } { \ phi _ X ( \ mathcal { C } ) }"""
|
# re - implement evaluate _ cost here , to avoid redundant computation
distances = pairwise_distances ( X , centers ) . min ( 1 ) ** 2
denom = distances . sum ( )
p = oversampling_factor * distances / denom
draws = random_state . uniform ( size = len ( p ) , chunks = p . chunks )
picked = p > draws
new_idxs , = da . where ( picked )
return new_idxs
|
def new ( cls ) -> 'Generator' :
"""Creates and returns random generator point that satisfy BLS algorithm requirements .
: return : BLS generator"""
|
logger = logging . getLogger ( __name__ )
logger . debug ( "Generator::new: >>>" )
c_instance = c_void_p ( )
do_call ( cls . new_handler , byref ( c_instance ) )
res = cls ( c_instance )
logger . debug ( "Generator::new: <<< res: %r" , res )
return res
|
def set_mode_flag ( self , flag , enable ) :
'''Enables / disables MAV _ MODE _ FLAG
@ param flag The mode flag ,
see MAV _ MODE _ FLAG enum
@ param enable Enable the flag , ( True / False )'''
|
if self . mavlink10 ( ) :
mode = self . base_mode
if ( enable == True ) :
mode = mode | flag
elif ( enable == False ) :
mode = mode & ~ flag
self . mav . command_long_send ( self . target_system , self . target_component , mavlink . MAV_CMD_DO_SET_MODE , 0 , mode , 0 , 0 , 0 , 0 , 0 , 0 )
else :
print ( "Set mode flag not supported" )
|
def state_to_modelparams ( self , state ) :
"""Converts a QuTiP - represented state into a model parameter vector .
: param qutip . Qobj state : State to be converted .
: rtype : : class : ` np . ndarray `
: return : The representation of the given state in this basis ,
as a vector of real parameters ."""
|
basis = self . flat ( )
data = state . data . todense ( ) . view ( np . ndarray ) . flatten ( )
# NB : assumes Hermitian state and basis !
return np . real ( np . dot ( basis . conj ( ) , data ) )
|
def stmt_type ( obj , mk = True ) :
"""Return standardized , backwards compatible object type String .
This is a temporary solution to make sure type comparisons and
matches keys of Statements and related classes are backwards
compatible ."""
|
if isinstance ( obj , Statement ) and mk :
return type ( obj )
else :
return type ( obj ) . __name__
|
def apply_patch ( self , patch ) :
"""Applies given patch .
: param patch : Patch .
: type patch : Patch
: return : Method success .
: rtype : bool"""
|
history_file = File ( self . __history_file )
patches_history = history_file . cache ( ) and [ line . strip ( ) for line in history_file . content ] or [ ]
if patch . uid not in patches_history :
LOGGER . debug ( "> Applying '{0}' patch!" . format ( patch . name ) )
if patch . apply ( ) :
history_file . content = [ "{0}\n" . format ( patch . uid ) ]
history_file . append ( )
else :
raise umbra . exceptions . PatchApplyError ( "{0} | '{1}' patch failed to apply!" . format ( self . __class__ . __name__ , patch . path ) )
else :
LOGGER . debug ( "> '{0}' patch is already applied!" . format ( patch . name ) )
return True
|
def _collect_settings ( self , apps ) :
"""Iterate over given apps or INSTALLED _ APPS and collect the content of each ' s
settings file , which is expected to be in JSON format ."""
|
contents = { }
if apps :
for app in apps :
if app not in settings . INSTALLED_APPS :
raise CommandError ( "Application '{0}' not in settings.INSTALLED_APPS" . format ( app ) )
else :
apps = settings . INSTALLED_APPS
for app in apps :
module = import_module ( app )
for module_dir in module . __path__ :
json_file = os . path . abspath ( os . path . join ( module_dir , self . json_file ) )
if os . path . isfile ( json_file ) :
with open ( json_file , 'r' ) as fp :
contents [ app ] = json . load ( fp )
return contents
|
def do_delete ( endpoint , access_token ) :
'''Do an HTTP GET request and return JSON .
Args :
endpoint ( str ) : Azure Resource Manager management endpoint .
access _ token ( str ) : A valid Azure authentication token .
Returns :
HTTP response .'''
|
headers = { "Authorization" : 'Bearer ' + access_token }
headers [ 'User-Agent' ] = get_user_agent ( )
return requests . delete ( endpoint , headers = headers )
|
def gauss_box_model ( x , amplitude = 1.0 , mean = 0.0 , stddev = 1.0 , hpix = 0.5 ) :
"""Integrate a Gaussian profile ."""
|
z = ( x - mean ) / stddev
z2 = z + hpix / stddev
z1 = z - hpix / stddev
return amplitude * ( norm . cdf ( z2 ) - norm . cdf ( z1 ) )
|
def check_db_for_missing_notifications ( ) :
"""Check the database for missing notifications ."""
|
aws_access_key_id = os . environ [ 'aws_access_key_id' ]
aws_secret_access_key = os . environ [ 'aws_secret_access_key' ]
if config . getboolean ( 'Shell Parameters' , 'launch_in_sandbox_mode' ) :
conn = MTurkConnection ( aws_access_key_id = aws_access_key_id , aws_secret_access_key = aws_secret_access_key , host = 'mechanicalturk.sandbox.amazonaws.com' )
else :
conn = MTurkConnection ( aws_access_key_id = aws_access_key_id , aws_secret_access_key = aws_secret_access_key )
# get all participants with status < 100
participants = Participant . query . filter_by ( status = "working" ) . all ( )
# get current time
current_time = datetime . now ( )
# get experiment duration in seconds
duration = float ( config . get ( 'HIT Configuration' , 'duration' ) ) * 60 * 60
# for each participant , if current _ time - start _ time > duration + 5 mins
for p in participants :
p_time = ( current_time - p . creation_time ) . total_seconds ( )
if p_time > ( duration + 120 ) :
print ( "Error: participant {} with status {} has been playing for too " "long and no notification has arrived - " "running emergency code" . format ( p . id , p . status ) )
# get their assignment
assignment_id = p . assignment_id
# ask amazon for the status of the assignment
try :
assignment = conn . get_assignment ( assignment_id ) [ 0 ]
status = assignment . AssignmentStatus
except :
status = None
print "assignment status from AWS is {}" . format ( status )
hit_id = p . hit_id
# general email settings :
username = os . getenv ( 'wallace_email_username' )
fromaddr = username + "@gmail.com"
email_password = os . getenv ( "wallace_email_key" )
toaddr = config . get ( 'HIT Configuration' , 'contact_email_on_error' )
whimsical = os . getenv ( "whimsical" )
if status == "Approved" : # if its been approved , set the status accordingly
print "status set to approved"
p . status = "approved"
session . commit ( )
elif status == "Rejected" :
print "status set to rejected"
# if its been rejected , set the status accordingly
p . status = "rejected"
session . commit ( )
elif status == "Submitted" : # if it has been submitted then resend a submitted notification
args = { 'Event.1.EventType' : 'AssignmentSubmitted' , 'Event.1.AssignmentId' : assignment_id }
requests . post ( "http://" + os . environ [ 'HOST' ] + '/notifications' , data = args )
# send the researcher an email to let them know
if whimsical :
msg = MIMEText ( """Dearest Friend,\n\nI am writing to let you know that at
{}, during my regular (and thoroughly enjoyable) perousal of the most charming
participant data table, I happened to notice that assignment {} has been
taking longer than we were expecting. I recall you had suggested {} minutes as
an upper limit for what was an acceptable length of time for each assignement
, however this assignment had been underway for a shocking {} minutes, a full
{} minutes over your allowance. I immediately dispatched a telegram to our
mutual friends at AWS and they were able to assure me that although the
notification had failed to be correctly processed, the assignment had in fact
been completed. Rather than trouble you, I dealt with this myself and I can
assure you there is no immediate cause for concern. Nonetheless, for my own
peace of mind, I would appreciate you taking the time to look into this matter
at your earliest convenience.\n\nI remain your faithful and obedient servant,
\nAlfred R. Wallace\n\n P.S. Please do not respond to this message, I am busy
with other matters.""" . format ( datetime . now ( ) , assignment_id , round ( duration / 60 ) , round ( p_time / 60 ) , round ( ( p_time - duration ) / 60 ) ) )
msg [ 'Subject' ] = "A matter of minor concern."
else :
msg = MIMEText ( """Dear experimenter,\n\nThis is an automated email from
Wallace. You are receiving this email because the Wallace platform has
discovered evidence that a notification from Amazon Web Services failed to
arrive at the server. Wallace has automatically contacted AWS and has
determined the dropped notification was a submitted notification (i.e. the
participant has finished the experiment). This is a non-fatal error and so
Wallace has auto-corrected the problem. Nonetheless you may wish to check the
database.\n\nBest,\nThe Wallace dev. team.\n\n Error details:\nAssignment: {}
\nAllowed time: {}\nTime since participant started: {}""" ) . format ( assignment_id , round ( duration / 60 ) , round ( p_time / 60 ) )
msg [ 'Subject' ] = "Wallace automated email - minor error."
# This method commented out as gmail now blocks emails from
# new locations
# server = smtplib . SMTP ( ' smtp . gmail . com : 587 ' )
# server . starttls ( )
# server . login ( username , email _ password )
# server . sendmail ( fromaddr , toaddr , msg . as _ string ( ) )
# server . quit ( )
print ( "Error - submitted notification for participant {} missed. " "Database automatically corrected, but proceed with caution." . format ( p . id ) )
else : # if it has not been submitted shut everything down
# first turn off autorecruit
host = os . environ [ 'HOST' ]
host = host [ : - len ( ".herokuapp.com" ) ]
args = json . dumps ( { "auto_recruit" : "false" } )
headers = { "Accept" : "application/vnd.heroku+json; version=3" , "Content-Type" : "application/json" }
heroku_email_address = os . getenv ( 'heroku_email_address' )
heroku_password = os . getenv ( 'heroku_password' )
requests . patch ( "https://api.heroku.com/apps/{}/config-vars" . format ( host ) , data = args , auth = ( heroku_email_address , heroku_password ) , headers = headers )
# then force expire the hit via boto
conn . expire_hit ( hit_id )
# send the researcher an email to let them know
if whimsical :
msg = MIMEText ( """Dearest Friend,\n\nI am afraid I write to you with most
grave tidings. At {}, during a routine check of the usually most delightful
participant data table, I happened to notice that assignment {} has been
taking longer than we were expecting. I recall you had suggested {} minutes as
an upper limit for what was an acceptable length of time for each assignment,
however this assignment had been underway for a shocking {} minutes, a full {}
minutes over your allowance. I immediately dispatched a telegram to our mutual
friends at AWS and they infact informed me that they had already sent us a
notification which we must have failed to process, implying that the
assignment had not been successfully completed. Of course when the seriousness
of this scenario dawned on me I had to depend on my trusting walking stick for
support: without the notification I didn't know to remove the old assignment's
data from the tables and AWS will have already sent their replacement, meaning
that the tables may already be in a most unsound state!\n\nI am sorry to
trouble you with this, however, I do not know how to proceed so rather than
trying to remedy the scenario myself, I have instead temporarily ceased
operations by expiring the HIT with the fellows at AWS and have refrained from
posting any further invitations myself. Once you see fit I would be most
appreciative if you could attend to this issue with the caution, sensitivity
and intelligence for which I know you so well.\n\nI remain your faithful and
obedient servant,\nAlfred R. Wallace\n\nP.S. Please do not respond to this
message, I am busy with other matters.""" . format ( datetime . now ( ) , assignment_id , round ( duration / 60 ) , round ( p_time / 60 ) , round ( ( p_time - duration ) / 60 ) ) )
msg [ 'Subject' ] = "Most troubling news."
else :
msg = MIMEText ( """Dear experimenter,\n\nThis is an automated email from
Wallace. You are receiving this email because the Wallace platform has
discovered evidence that a notification from Amazon Web Services failed to
arrive at the server. Wallace has automatically contacted AWS and has
determined the dropped notification was an abandoned/returned notification
(i.e. the participant had returned the experiment or had run out of time).
This is a serious error and so Wallace has paused the experiment - expiring
the HIT on MTurk and setting auto_recruit to false. Participants currently
playing will be able to finish, however no further participants will be
recruited until you do so manually. We strongly suggest you use the details
below to check the database to make sure the missing notification has not caused
additional problems before resuming.\nIf you are receiving a lot of these
emails this suggests something is wrong with your experiment code.\n\nBest,
\nThe Wallace dev. team.\n\n Error details:\nAssignment: {}
\nAllowed time: {}\nTime since participant started: {}""" ) . format ( assignment_id , round ( duration / 60 ) , round ( p_time / 60 ) )
msg [ 'Subject' ] = "Wallace automated email - major error."
# This method commented out as gmail now blocks emails from
# new locations
# server = smtplib . SMTP ( ' smtp . gmail . com : 587 ' )
# server . starttls ( )
# server . login ( username , email _ password )
# server . sendmail ( fromaddr , toaddr , msg . as _ string ( ) )
# server . quit ( )
# send a notificationmissing notification
args = { 'Event.1.EventType' : 'NotificationMissing' , 'Event.1.AssignmentId' : assignment_id }
requests . post ( "http://" + os . environ [ 'HOST' ] + '/notifications' , data = args )
print ( "Error - abandoned/returned notification for participant {} missed. " "Experiment shut down. Please check database and then manually " "resume experiment." . format ( p . id ) )
|
def configure_default_logger ( self , log_freq = 'midnight' , log_total = 30 , log_level = 'INFO' , log_format = ReportingFormats . DEFAULT . value , custom_args = '' ) :
"""default logger that every Prosper script should use ! !
Args :
log _ freq ( str ) : TimedRotatingFileHandle _ str - - https : / / docs . python . org / 3 / library / logging . handlers . html # timedrotatingfilehandler
log _ total ( int ) : how many log _ freq periods between log rotations
log _ level ( str ) : minimum desired log level https : / / docs . python . org / 3 / library / logging . html # logging - levels
log _ format ( str ) : format for logging messages https : / / docs . python . org / 3 / library / logging . html # logrecord - attributes
custom _ args ( str ) : special ID to include in ( ALL ) messages"""
|
# # Override defaults if required # #
log_freq = self . config . get_option ( 'LOGGING' , 'log_freq' , None , log_freq )
log_total = self . config . get_option ( 'LOGGING' , 'log_total' , None , log_total )
# # Set up log file handles / name # #
log_filename = self . log_name + '.log'
log_abspath = path . join ( self . log_path , log_filename )
general_handler = TimedRotatingFileHandler ( log_abspath , when = log_freq , interval = 1 , backupCount = int ( log_total ) )
self . _configure_common ( '' , log_level , log_format , 'default' , general_handler , custom_args = custom_args )
|
def get_resources ( self , ids , cache = True ) :
"""Retrieve ecs resources for serverless policies or related resources
Requires arns in new format .
https : / / docs . aws . amazon . com / AmazonECS / latest / userguide / ecs - resource - ids . html"""
|
cluster_resources = { }
for i in ids :
_ , ident = i . rsplit ( ':' , 1 )
parts = ident . split ( '/' , 2 )
if len ( parts ) != 3 :
raise PolicyExecutionError ( "New format ecs arn required" )
cluster_resources . setdefault ( parts [ 1 ] , [ ] ) . append ( parts [ 2 ] )
results = [ ]
client = local_session ( self . manager . session_factory ) . client ( 'ecs' )
for cid , resource_ids in cluster_resources . items ( ) :
results . extend ( self . process_cluster_resources ( client , cid , resource_ids ) )
return results
|
def reshape ( attrs , inputs , proto_obj ) :
"""Reshape the given array by the shape attribute ."""
|
if len ( inputs ) == 1 :
return 'reshape' , attrs , inputs [ 0 ]
reshape_shape = list ( proto_obj . _params [ inputs [ 1 ] . name ] . asnumpy ( ) )
reshape_shape = [ int ( i ) for i in reshape_shape ]
new_attrs = { 'shape' : reshape_shape }
return 'reshape' , new_attrs , inputs [ : 1 ]
|
def libvlc_media_list_event_manager ( p_ml ) :
'''Get libvlc _ event _ manager from this media list instance .
The p _ event _ manager is immutable , so you don ' t have to hold the lock .
@ param p _ ml : a media list instance .
@ return : libvlc _ event _ manager .'''
|
f = _Cfunctions . get ( 'libvlc_media_list_event_manager' , None ) or _Cfunction ( 'libvlc_media_list_event_manager' , ( ( 1 , ) , ) , class_result ( EventManager ) , ctypes . c_void_p , MediaList )
return f ( p_ml )
|
def decode ( self ) :
"""Decompress compressed UTF16 value ."""
|
hi = self . enc_byte ( )
flagbits = 0
while self . encpos < len ( self . encdata ) :
if flagbits == 0 :
flags = self . enc_byte ( )
flagbits = 8
flagbits -= 2
t = ( flags >> flagbits ) & 3
if t == 0 :
self . put ( self . enc_byte ( ) , 0 )
elif t == 1 :
self . put ( self . enc_byte ( ) , hi )
elif t == 2 :
self . put ( self . enc_byte ( ) , self . enc_byte ( ) )
else :
n = self . enc_byte ( )
if n & 0x80 :
c = self . enc_byte ( )
for _ in range ( ( n & 0x7f ) + 2 ) :
lo = ( self . std_byte ( ) + c ) & 0xFF
self . put ( lo , hi )
else :
for _ in range ( n + 2 ) :
self . put ( self . std_byte ( ) , 0 )
return self . buf . decode ( "utf-16le" , "replace" )
|
def print_input_output ( opts ) :
"""Prints the input and output directories to the console .
: param opts : namespace that contains printable ' input ' and ' output ' fields ."""
|
if opts . is_dir :
print ( "Root input directory:\t" + opts . input )
print ( "Outputting to:\t\t" + opts . output + "\n" )
else :
print ( "Input file:\t\t" + opts . input )
print ( "Outputting to:\t\t" + opts . output + opts . input + "\n" )
|
def check_wide_data_for_blank_choices ( choice_col , wide_data ) :
"""Checks ` wide _ data ` for null values in the choice column , and raises a
helpful ValueError if null values are found .
Parameters
choice _ col : str .
Denotes the column in ` wide _ data ` that is used to record each
observation ' s choice .
wide _ data : pandas dataframe .
Contains one row for each observation . Should contain ` choice _ col ` .
Returns
None ."""
|
if wide_data [ choice_col ] . isnull ( ) . any ( ) :
msg_1 = "One or more of the values in wide_data[choice_col] is null."
msg_2 = " Remove null values in the choice column or fill them in."
raise ValueError ( msg_1 + msg_2 )
return None
|
def _set_version ( self , v , load = False ) :
"""Setter method for version , mapped from YANG variable / interface / port _ channel / hide _ vrrp _ holer / vrrp / version ( uint8)
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ version is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ version ( ) directly ."""
|
parent = getattr ( self , "_parent" , None )
if parent is not None and load is False :
raise AttributeError ( "Cannot set keys directly when" + " within an instantiated list" )
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = RestrictedClassType ( base_type = int , restriction_dict = { 'range' : [ '0..255' ] } , int_size = 8 ) , restriction_dict = { 'range' : [ u'2..3' ] } ) , is_leaf = True , yang_name = "version" , rest_name = "version" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Set VRRP version 2/3' , u'key-default' : u'2' , u'cli-expose-key-name' : None , u'cli-hide-in-submode' : None } } , is_keyval = True , namespace = 'urn:brocade.com:mgmt:brocade-vrrp' , defining_module = 'brocade-vrrp' , yang_type = 'uint8' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """version must be of a type compatible with uint8""" , 'defined-type' : "uint8" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': [u'2..3']}), is_leaf=True, yang_name="version", rest_name="version", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set VRRP version 2/3', u'key-default': u'2', u'cli-expose-key-name': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='uint8', is_config=True)""" , } )
self . __version = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def patch ( self , item , byte_order = BYTEORDER ) :
"""Returns a memory : class : ` Patch ` for the given * item * that shall be
patched in the ` data source ` .
: param item : item to patch .
: param byte _ order : encoding : class : ` Byteorder ` for the item .
: type byte _ order : : class : ` Byteorder ` , : class : ` str `"""
|
# Re - index the data object
self . index_data ( )
if is_container ( item ) :
length = item . container_size ( )
if length [ 1 ] is not 0 : # Incomplete container
raise ContainerLengthError ( item , length )
field = item . first_field ( )
if field is None : # Empty container ?
return None
index = field . index
if index . bit is not 0 : # Bad placed container
raise FieldIndexError ( field , index )
# Create a dummy byte array filled with zero bytes .
# The dummy byte array is necessary because the length of
# the buffer must correlate to the field indexes of the
# appending fields .
buffer = bytearray ( b'\x00' * index . byte )
# Append to the buffer the content mapped by the container fields
item . serialize ( buffer , index , byte_order = byte_order )
# Content of the buffer mapped by the container fields
content = buffer [ index . byte : ]
if len ( content ) != length [ 0 ] : # Not correct filled buffer !
raise BufferError ( len ( content ) , length [ 0 ] )
return Patch ( content , index . address , byte_order , length [ 0 ] * 8 , 0 , False )
elif is_field ( item ) : # Field index
index = item . index
# Field alignment
alignment = item . alignment
if index . bit != alignment . bit_offset : # Bad aligned field ?
raise FieldGroupOffsetError ( item , index , Alignment ( alignment . byte_size , index . bit ) )
# Create a dummy byte array filled with zero bytes .
# The dummy byte array is necessary because the length of
# the buffer must correlate to the field index of the
# appending field group .
buffer = bytearray ( b'\x00' * index . byte )
# Append to the buffer the content mapped by the field
item . serialize ( buffer , index , byte_order = byte_order )
# Content of the buffer mapped by the field group
content = buffer [ index . byte : ]
if len ( content ) != alignment . byte_size : # Not correct filled buffer !
raise BufferError ( len ( content ) , alignment . byte_size )
# Patch size in bytes for the field in the content buffer
patch_size , bit_offset = divmod ( item . bit_size , 8 )
if bit_offset is not 0 :
inject = True
patch_size += 1
else :
inject = False
# Patch offset in bytes for the field in the content buffer
patch_offset , bit_offset = divmod ( alignment . bit_offset , 8 )
if bit_offset is not 0 :
inject = True
if byte_order is Byteorder . big :
start = alignment . byte_size - ( patch_offset + patch_size )
stop = alignment . byte_size - patch_offset
else :
start = patch_offset
stop = patch_offset + patch_size
return Patch ( content [ start : stop ] , index . address + start , byte_order , item . bit_size , bit_offset , inject )
else :
raise MemberTypeError ( self , item )
|
def issue_date ( self ) :
"""Date when the DOI was issued ( : class : ` datetime . datetime . Datetime ` ) ."""
|
dates = _pluralize ( self . _r [ 'dates' ] , 'date' )
for date in dates :
if date [ '@dateType' ] == 'Issued' :
return datetime . datetime . strptime ( date [ '#text' ] , '%Y-%m-%d' )
|
def action ( self , action_id , ** kwargs ) :
"""Query an action , specify the parameters for the action as keyword parameters . An optional keyword parameter method = ' GET ' ( default ) or method = ' POST ' can be set . The character set encoding of the response can be configured using the encoding keyword parameter ( defaults to utf - 8 by default )"""
|
if 'method' in kwargs :
method = kwargs [ 'method' ]
del kwargs [ 'method' ]
else :
method = 'GET'
if 'encoding' in kwargs :
encoding = kwargs [ 'encoding' ]
del kwargs [ 'encoding' ]
else :
encoding = 'utf-8'
return self . request ( 'actions/' + action_id , method , kwargs , False , encoding )
|
def check_and_set_unreachability ( self , hosts , services ) :
"""Check if all dependencies are down , if yes set this object
as unreachable .
todo : this function do not care about execution _ failure _ criteria !
: param hosts : hosts objects , used to get object in act _ depend _ of
: type hosts : alignak . objects . host . Hosts
: param services : services objects , used to get object in act _ depend _ of
: type services : alignak . objects . service . Services
: return : None"""
|
parent_is_down = [ ]
for ( dep_id , _ , _ , _ ) in self . act_depend_of :
if dep_id in hosts :
dep = hosts [ dep_id ]
else :
dep = services [ dep_id ]
if dep . state in [ 'd' , 'DOWN' , 'c' , 'CRITICAL' , 'u' , 'UNKNOWN' , 'x' , 'UNREACHABLE' ] :
parent_is_down . append ( True )
else :
parent_is_down . append ( False )
if False in parent_is_down :
return
# all parents down
self . set_unreachable ( )
|
def split_none ( self ) :
"Don ' t split the data and create an empty validation set ."
|
val = self [ [ ] ]
val . ignore_empty = True
return self . _split ( self . path , self , val )
|
def get_timestamp ( str_len = 13 ) :
"""get timestamp string , length can only between 0 and 16"""
|
if isinstance ( str_len , integer_types ) and 0 < str_len < 17 :
return builtin_str ( time . time ( ) ) . replace ( "." , "" ) [ : str_len ]
raise ParamsError ( "timestamp length can only between 0 and 16." )
|
def pretty_spaces ( level ) :
"""Return spaces and new line .
: type level : int or None
: param level : deep level
: rtype : unicode
: return : string with new line and spaces"""
|
if level is None :
return u''
return ( os . linesep if level >= 0 else u'' ) + ( u' ' * ( INDENT * level ) )
|
def save_semantic_data_for_state ( state , state_path_full ) :
"""Saves the semantic data in a separate json file .
: param state : The state of which the script file should be saved
: param str state _ path _ full : The path to the file system storage location of the state"""
|
destination_script_file = os . path . join ( state_path_full , SEMANTIC_DATA_FILE )
try :
storage_utils . write_dict_to_json ( state . semantic_data , destination_script_file )
except IOError :
logger . exception ( "Storing of semantic data for state {0} failed! Destination path: {1}" . format ( state . get_path ( ) , destination_script_file ) )
raise
|
def _sysfs_attr ( name , value = None , log_lvl = None , log_msg = None ) :
'''Simple wrapper with logging around sysfs . attr'''
|
if isinstance ( name , six . string_types ) :
name = [ name ]
res = __salt__ [ 'sysfs.attr' ] ( os . path . join ( * name ) , value )
if not res and log_lvl is not None and log_msg is not None :
log . log ( LOG [ log_lvl ] , log_msg )
return res
|
def rotation_df ( ATT ) :
'''return the current DCM rotation matrix'''
|
r = Matrix3 ( )
r . from_euler ( radians ( ATT . Roll ) , radians ( ATT . Pitch ) , radians ( ATT . Yaw ) )
return r
|
def ssl_required ( allow_non_ssl = False ) :
"""Views decorated with this will always get redirected to https
except when allow _ non _ ssl is set to true ."""
|
def wrapper ( view_func ) :
def _checkssl ( request , * args , ** kwargs ) : # allow _ non _ ssl = True lets non - https requests to come
# through to this view ( and hence not redirect )
if hasattr ( settings , 'SSL_ENABLED' ) and settings . SSL_ENABLED and not request . is_secure ( ) and not allow_non_ssl :
return HttpResponseRedirect ( request . build_absolute_uri ( ) . replace ( 'http://' , 'https://' ) )
return view_func ( request , * args , ** kwargs )
return _checkssl
return wrapper
|
def create_temporary_table ( self , table_name , custom_sql ) :
"""Create Temporary table based on sql query . This will be used as a basis for executing expectations .
WARNING : this feature is new in v0.4.
It hasn ' t been tested in all SQL dialects , and may change based on community feedback .
: param custom _ sql :"""
|
stmt = "CREATE TEMPORARY TABLE {table_name} AS {custom_sql}" . format ( table_name = table_name , custom_sql = custom_sql )
self . engine . execute ( stmt )
|
def last_first_initial ( self ) :
"""Return a name in the format of :
Lastname , F [ ( Nickname ) ]"""
|
return ( "{}{} " . format ( self . last_name , ", " + self . first_name [ : 1 ] + "." if self . first_name else "" ) + ( "({}) " . format ( self . nickname ) if self . nickname else "" ) )
|
def stack_trace ( depth = None ) :
"""returns a print friendly stack trace at the current frame ,
without aborting the application .
: param depth : The depth of the stack trace . if omitted , the entire
stack will be printed .
usage : :
print stack _ trace ( 10)"""
|
frames = inspect . stack ( ) [ 2 : ]
if depth :
frames = frames [ : depth ]
result = StringIO ( )
result . write ( "----------------------------------------------------\n" )
for ( frame , file , line , context , code , status ) in frames :
result . write ( "In %s from %s\n%s %s" % ( context , file , line , "\n" . join ( code ) ) )
result . write ( "----------------------------------------------------\n" )
return result . getvalue ( )
|
def addDataToQueue ( self , displacement , reset = False ) :
"""Add the given displacement to the region ' s internal queue . Calls to compute
will cause items in the queue to be dequeued in FIFO order .
: param displacement : Two floats representing translation vector [ dx , dy ] to
be passed to the linked regions via ' dataOut '
: type displacement : list
: param reset : Reset flag to be passed to the linked regions via ' resetOut '
: type reset : bool"""
|
self . queue . appendleft ( { "dataOut" : list ( displacement ) , "reset" : bool ( reset ) } )
|
def plot_losses ( self , skip_start : int = 0 , skip_end : int = 0 , return_fig : bool = None ) -> Optional [ plt . Figure ] :
"Plot training and validation losses ."
|
fig , ax = plt . subplots ( 1 , 1 )
losses = self . _split_list ( self . losses , skip_start , skip_end )
iterations = self . _split_list ( range_of ( self . losses ) , skip_start , skip_end )
ax . plot ( iterations , losses , label = 'Train' )
val_iter = self . _split_list_val ( np . cumsum ( self . nb_batches ) , skip_start , skip_end )
val_losses = self . _split_list_val ( self . val_losses , skip_start , skip_end )
ax . plot ( val_iter , val_losses , label = 'Validation' )
ax . set_ylabel ( 'Loss' )
ax . set_xlabel ( 'Batches processed' )
ax . legend ( )
if ifnone ( return_fig , defaults . return_fig ) :
return fig
if not IN_NOTEBOOK :
plot_sixel ( fig )
|
def __grant_generate ( grant , database , user , host = 'localhost' , grant_option = False , escape = True , ssl_option = False ) :
'''Validate grants and build the query that could set the given grants
Note that this query contains arguments for user and host but not for
grants or database .'''
|
# TODO : Re - order the grant so it is according to the
# SHOW GRANTS for xxx @ yyy query ( SELECT comes first , etc )
grant = re . sub ( r'\s*,\s*' , ', ' , grant ) . upper ( )
grant = __grant_normalize ( grant )
db_part = database . rpartition ( '.' )
dbc = db_part [ 0 ]
table = db_part [ 2 ]
if escape :
if dbc != '*' : # _ and % are authorized on GRANT queries and should get escaped
# on the db name , but only if not requesting a table level grant
dbc = quote_identifier ( dbc , for_grants = ( table == '*' ) )
if table != '*' :
table = quote_identifier ( table )
# identifiers cannot be used as values , and same thing for grants
qry = 'GRANT {0} ON {1}.{2} TO %(user)s@%(host)s' . format ( grant , dbc , table )
args = { }
args [ 'user' ] = user
args [ 'host' ] = host
if ssl_option and isinstance ( ssl_option , list ) :
qry += __ssl_option_sanitize ( ssl_option )
if salt . utils . data . is_true ( grant_option ) :
qry += ' WITH GRANT OPTION'
log . debug ( 'Grant Query generated: %s args %s' , qry , repr ( args ) )
return { 'qry' : qry , 'args' : args }
|
def supported_languages ( self , task = None ) :
"""Languages that are covered by a specific task .
Args :
task ( string ) : Task name ."""
|
if task :
collection = self . get_collection ( task = task )
return [ isoLangs [ x . id . split ( '.' ) [ 1 ] ] [ "name" ] for x in collection . packages ]
else :
return [ x . name . split ( ) [ 0 ] for x in self . collections ( ) if Downloader . LANG_PREFIX in x . id ]
|
def connect ( self , username = None , passcode = None , wait = False , headers = None , ** keyword_headers ) :
"""Start a connection .
: param str username : the username to connect with
: param str passcode : the password used to authenticate with
: param bool wait : if True , wait for the connection to be established / acknowledged
: param dict headers : a map of any additional headers the broker requires
: param keyword _ headers : any additional headers the broker requires"""
|
cmd = CMD_STOMP
headers = utils . merge_headers ( [ headers , keyword_headers ] )
headers [ HDR_ACCEPT_VERSION ] = self . version
if self . transport . vhost :
headers [ HDR_HOST ] = self . transport . vhost
if username is not None :
headers [ HDR_LOGIN ] = username
if passcode is not None :
headers [ HDR_PASSCODE ] = passcode
self . send_frame ( cmd , headers )
if wait :
self . transport . wait_for_connection ( )
if self . transport . connection_error :
raise ConnectFailedException ( )
|
def smart_search ( cls , query_string , search_options = None , extra_query = None ) :
"""Perform a smart VRF search .
Maps to the function
: py : func : ` nipap . backend . Nipap . smart _ search _ vrf ` in the backend .
Please see the documentation for the backend function for
information regarding input arguments and return values ."""
|
if search_options is None :
search_options = { }
xmlrpc = XMLRPCConnection ( )
try :
smart_result = xmlrpc . connection . smart_search_vrf ( { 'query_string' : query_string , 'search_options' : search_options , 'auth' : AuthOptions ( ) . options , 'extra_query' : extra_query } )
except xmlrpclib . Fault as xml_fault :
raise _fault_to_exception ( xml_fault )
result = dict ( )
result [ 'interpretation' ] = smart_result [ 'interpretation' ]
result [ 'search_options' ] = smart_result [ 'search_options' ]
result [ 'error' ] = smart_result [ 'error' ]
if 'error_message' in smart_result :
result [ 'error_message' ] = smart_result [ 'error_message' ]
result [ 'result' ] = list ( )
for v in smart_result [ 'result' ] :
result [ 'result' ] . append ( VRF . from_dict ( v ) )
return result
|
def plural ( self , text , count = None ) :
"""Return the plural of text .
If count supplied , then return text if count is one of :
1 , a , an , one , each , every , this , that
otherwise return the plural .
Whitespace at the start and end is preserved ."""
|
pre , word , post = self . partition_word ( text )
if not word :
return text
plural = self . postprocess ( word , self . _pl_special_adjective ( word , count ) or self . _pl_special_verb ( word , count ) or self . _plnoun ( word , count ) , )
return "{}{}{}" . format ( pre , plural , post )
|
def move ( self , target ) :
"""Moves this DriveItem to another Folder .
Can ' t move between different Drives .
: param target : a Folder , Drive item or Item Id string .
If it ' s a drive the item will be moved to the root folder .
: type target : drive . Folder or DriveItem or str
: return : Success / Failure
: rtype : bool"""
|
if isinstance ( target , Folder ) :
target_id = target . object_id
elif isinstance ( target , Drive ) : # we need the root folder id
root_folder = target . get_root_folder ( )
if not root_folder :
return False
target_id = root_folder . object_id
elif isinstance ( target , str ) :
target_id = target
else :
raise ValueError ( 'Target must be a Folder or Drive' )
if not self . object_id or not target_id :
raise ValueError ( 'Both self, and target must have a valid object_id.' )
if target_id == 'root' :
raise ValueError ( "When moving, target id can't be 'root'" )
url = self . build_url ( self . _endpoints . get ( 'item' ) . format ( id = self . object_id ) )
data = { 'parentReference' : { 'id' : target_id } }
response = self . con . patch ( url , data = data )
if not response :
return False
self . parent_id = target_id
return True
|
def _main ( ) :
"""Some demo ."""
|
if sys . argv [ 1 : ] == [ "test" ] :
for k , v in sorted ( globals ( ) . items ( ) ) :
if not k . startswith ( "test_" ) :
continue
print ( "running: %s()" % k )
v ( )
print ( "ok." )
sys . exit ( )
elif sys . argv [ 1 : ] == [ "debug_shell" ] :
debug_shell ( locals ( ) , globals ( ) )
sys . exit ( )
elif sys . argv [ 1 : ] == [ "debug_shell_exception" ] :
try :
raise Exception ( "demo exception" )
except Exception :
better_exchook ( * sys . exc_info ( ) , debugshell = True )
sys . exit ( )
elif sys . argv [ 1 : ] :
print ( "Usage: %s (test|...)" % sys . argv [ 0 ] )
sys . exit ( 1 )
# some examples
# this code produces this output : https : / / gist . github . com / 922622
try :
x = { 1 : 2 , "a" : "b" }
# noinspection PyMissingOrEmptyDocstring
def f ( ) :
y = "foo"
# noinspection PyUnresolvedReferences , PyStatementEffect
x , 42 , sys . stdin . __class__ , sys . exc_info , y , z
f ( )
except Exception :
better_exchook ( * sys . exc_info ( ) )
try : # noinspection PyArgumentList
( lambda _x : None ) ( __name__ , 42 )
# multiline
except Exception :
better_exchook ( * sys . exc_info ( ) )
try :
class Obj :
def __repr__ ( self ) :
return ( "<Obj multi-\n" + " line repr>" )
obj = Obj ( )
assert not obj
except Exception :
better_exchook ( * sys . exc_info ( ) )
# noinspection PyMissingOrEmptyDocstring
def f1 ( a ) :
f2 ( a + 1 , 2 )
# noinspection PyMissingOrEmptyDocstring
def f2 ( a , b ) :
f3 ( a + b )
# noinspection PyMissingOrEmptyDocstring
def f3 ( a ) :
b = ( "abc" * 100 ) + "-interesting"
# some long demo str
a ( b )
# error , not callable
try :
f1 ( 13 )
except Exception :
better_exchook ( * sys . exc_info ( ) )
# use this to overwrite the global exception handler
install ( )
# and fail
# noinspection PyUnresolvedReferences
finalfail ( sys )
|
def EncodeForCSV ( x ) :
"Encodes one value for CSV ."
|
k = x . encode ( 'utf-8' )
if ',' in k or '"' in k :
return '"%s"' % k . replace ( '"' , '""' )
else :
return k
|
def from_string ( cls , s , space ) :
"""Produce a TopNumber by hashing a string ."""
|
import hashlib
hs = hashlib . sha1 ( s ) . hexdigest ( )
return cls . from_hex ( hs , space )
|
def children ( self , p_todo , p_only_direct = False ) :
"""Returns a list of child todos that the given todo ( in ) directly depends
on ."""
|
children = self . _depgraph . outgoing_neighbors ( hash ( p_todo ) , not p_only_direct )
return [ self . _tododict [ child ] for child in children ]
|
def histogram_phase ( phase_slices , phase , histbins = 200 , show_plot = False ) :
"""histograms the phase slices such as to build a histogram of the position
distribution at each phase value .
Parameters
phase _ slices : ndarray
2d array containing slices from many oscillations at each phase
phase : ndarray
1d array of phases corresponding to slices
histbins : int , optional ( default = 200)
number of bins to use in histogramming data
show _ plot : bool , optional ( default = False )
if true plots and shows the heatmap of the
phase against the positon distribution
Returns
counts _ array : ndarray
2d array containing the number of counts varying with
phase and position .
bin _ edges : ndarray
positions of bin edges"""
|
counts_array = _np . zeros ( [ len ( phase ) , histbins ] )
histedges = [ phase_slices . min ( ) , phase_slices . max ( ) ]
for i , phase_slice in enumerate ( phase_slices ) : # for each value of phase
counts , bin_edges = _np . histogram ( phase_slice , bins = histbins , range = histedges )
# histogram the position distribution at that phase
counts_array [ i ] = counts
counts_array = _np . array ( counts_array )
counts_array_transposed = _np . transpose ( counts_array ) . astype ( float )
if show_plot == True :
fig = _plt . figure ( figsize = ( 12 , 6 ) )
ax = fig . add_subplot ( 111 )
ax . set_title ( 'Phase Distribution' )
ax . set_xlabel ( "phase (°)" )
ax . set_ylabel ( "x" )
_plt . imshow ( counts_array_transposed , cmap = 'hot' , interpolation = 'nearest' , extent = [ phase [ 0 ] , phase [ - 1 ] , histedges [ 0 ] , histedges [ 1 ] ] )
ax . set_aspect ( 'auto' )
_plt . show ( )
return counts_array_transposed , bin_edges
|
def resolve_format ( format , path ) :
"""Looks at a file ' s extension and format ( if any ) and returns format ."""
|
if format is None :
if ( re . match ( r'.+\.(yml|yaml)$' , path ) ) :
return 'yaml'
elif ( re . match ( r'.+\.tsv$' , path ) ) :
return 'tsv'
else :
return format . lower ( )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.