signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def p_arg1 ( p ) :
"""arg1 : STRING
| NUMBER
| IDENT
| GLOBAL"""
|
# a hack to support " clear global "
p [ 0 ] = node . string ( value = str ( p [ 1 ] ) , lineno = p . lineno ( 1 ) , lexpos = p . lexpos ( 1 ) )
|
def cross_val_score ( estimator , X , y = None , scoring = None , cv = None , n_jobs = 1 , verbose = 0 , fit_params = None , pre_dispatch = '2*n_jobs' ) :
"""Evaluate a score by cross - validation
Parameters
estimator : estimator object implementing ' fit '
The object to use to fit the data .
X : array - like
The data to fit . Can be , for example a list , or an array at least 2d .
y : array - like , optional , default : None
The target variable to try to predict in the case of
supervised learning .
scoring : string , callable or None , optional , default : None
A string ( see model evaluation documentation ) or
a scorer callable object / function with signature
` ` scorer ( estimator , X , y ) ` ` .
cv : cross - validation generator or int , optional , default : None
A cross - validation generator to use . If int , determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier , or the number
of folds in KFold otherwise . If None , it is equivalent to cv = 3.
n _ jobs : integer , optional
The number of CPUs to use to do the computation . - 1 means
' all CPUs ' .
verbose : integer , optional
The verbosity level .
fit _ params : dict , optional
Parameters to pass to the fit method of the estimator .
pre _ dispatch : int , or string , optional
Controls the number of jobs that get dispatched during parallel
execution . Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process . This parameter can be :
- None , in which case all the jobs are immediately
created and spawned . Use this for lightweight and
fast - running jobs , to avoid delays due to on - demand
spawning of the jobs
- An int , giving the exact number of total jobs that are
spawned
- A string , giving an expression as a function of n _ jobs ,
as in ' 2 * n _ jobs '
Returns
scores : array of float , shape = ( len ( list ( cv ) ) , )
Array of scores of the estimator for each run of the cross validation ."""
|
X , y = indexable ( X , y )
cv = _check_cv ( cv , X , y , classifier = is_classifier ( estimator ) )
scorer = check_scoring ( estimator , scoring = scoring )
# We clone the estimator to make sure that all the folds are
# independent , and that it is pickle - able .
parallel = Parallel ( n_jobs = n_jobs , verbose = verbose , pre_dispatch = pre_dispatch )
scores = parallel ( delayed ( _fit_and_score ) ( clone ( estimator ) , X , y , scorer , train , test , verbose , None , fit_params ) for train , test in cv )
return np . array ( scores ) [ : , 0 ]
|
def remove ( self , item ) :
"""See : meth : ` list . remove ( ) ` method
Calls observer ` ` self . observer ( UpdateType . DELETED , item , index ) ` ` where
* * index * * is * item position *"""
|
index = self . index ( item )
self . real_list . remove ( item )
self . observer ( UpdateType . DELETED , item , index )
|
def _get_authorization_headers ( self , context ) :
"""Gets the authorization headers for a request .
Returns :
Sequence [ Tuple [ str , str ] ] : A list of request headers ( key , value )
to add to the request ."""
|
headers = { }
self . _credentials . before_request ( self . _request , context . method_name , context . service_url , headers )
return list ( six . iteritems ( headers ) )
|
def ix_ ( selection , shape ) :
"""Convert an orthogonal selection to a numpy advanced ( fancy ) selection , like numpy . ix _
but with support for slices and single ints ."""
|
# normalisation
selection = replace_ellipsis ( selection , shape )
# replace slice and int as these are not supported by numpy . ix _
selection = [ slice_to_range ( dim_sel , dim_len ) if isinstance ( dim_sel , slice ) else [ dim_sel ] if is_integer ( dim_sel ) else dim_sel for dim_sel , dim_len in zip ( selection , shape ) ]
# now get numpy to convert to a coordinate selection
selection = np . ix_ ( * selection )
return selection
|
def _mirror_idx_cov ( self , f_values , idx1 ) : # will most likely be removed
"""obsolete and subject to removal ( TODO ) ,
return indices for negative ( " active " ) update of the covariance matrix
assuming that ` ` f _ values [ idx1 [ i ] ] ` ` and ` ` f _ values [ - 1 - i ] ` ` are
the corresponding mirrored values
computes the index of the worse solution sorted by the f - value of the
better solution .
TODO : when the actual mirror was rejected , it is better
to return idx1 instead of idx2.
Remark : this function might not be necessary at all : if the worst solution
is the best mirrored , the covariance matrix updates cancel ( cave : weights
and learning rates ) , which seems what is desirable . If the mirror is bad ,
as strong negative update is made , again what is desirable .
And the fitness - - step - length correlation is in part addressed by
using flat weights ."""
|
idx2 = np . arange ( len ( f_values ) - 1 , len ( f_values ) - 1 - len ( idx1 ) , - 1 )
f = [ ]
for i in rglen ( ( idx1 ) ) :
f . append ( min ( ( f_values [ idx1 [ i ] ] , f_values [ idx2 [ i ] ] ) ) )
# idx . append ( idx1 [ i ] if f _ values [ idx1 [ i ] ] > f _ values [ idx2 [ i ] ] else idx2 [ i ] )
return idx2 [ np . argsort ( f ) ] [ - 1 : : - 1 ]
|
def PlaceCall ( self , * Targets ) :
"""Places a call to a single user or creates a conference call .
: Parameters :
Targets : str
One or more call targets . If multiple targets are specified , a conference call is
created . The call target can be a Skypename , phone number , or speed dial code .
: return : A call object .
: rtype : ` call . Call `"""
|
calls = self . ActiveCalls
reply = self . _DoCommand ( 'CALL %s' % ', ' . join ( Targets ) )
# Skype for Windows returns the call status which gives us the call Id ;
if reply . startswith ( 'CALL ' ) :
return Call ( self , chop ( reply , 2 ) [ 1 ] )
# On linux we get ' OK ' as reply so we search for the new call on
# list of active calls .
for c in self . ActiveCalls :
if c not in calls :
return c
raise SkypeError ( 0 , 'Placing call failed' )
|
def _set_collector_vrf ( self , v , load = False ) :
"""Setter method for collector _ vrf , mapped from YANG variable / sflow / collector _ vrf ( common - def : vrf - name )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ collector _ vrf is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ collector _ vrf ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_dict = { 'pattern' : u'((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.' , 'length' : [ u'1..32' ] } ) , is_leaf = True , yang_name = "collector-vrf" , rest_name = "collector-vrf" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Sflow Collector VRF Configuration' , u'hidden' : u'full' } } , namespace = 'urn:brocade.com:mgmt:brocade-sflow' , defining_module = 'brocade-sflow' , yang_type = 'common-def:vrf-name' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """collector_vrf must be of a type compatible with common-def:vrf-name""" , 'defined-type' : "common-def:vrf-name" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.)*([a-zA-Z0-9_]([a-zA-Z0-9\\-_]){0,61})?[a-zA-Z0-9]\\.?)|\\.', 'length': [u'1..32']}), is_leaf=True, yang_name="collector-vrf", rest_name="collector-vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sflow Collector VRF Configuration', u'hidden': u'full'}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='common-def:vrf-name', is_config=True)""" , } )
self . __collector_vrf = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def operator_same_class ( method ) :
"""Intended to wrap operator methods , this decorator ensures the ` other `
parameter is of the same type as the ` self ` parameter .
: param method : The method being decorated .
: return : The wrapper to replace the method with ."""
|
def wrapper ( self , other ) :
if not isinstance ( other , self . __class__ ) :
raise TypeError ( 'unsupported operand types: \'{0}\' and \'{1}\'' . format ( self . __class__ . __name__ , other . __class__ . __name__ ) )
return method ( self , other )
return wrapper
|
def write_report ( self , template ) :
"""Write the compiled jinja template to the results file
: param template str : the compiled jinja template"""
|
with open ( self . fn , 'w' ) as f :
f . write ( template )
|
def _renew_by ( name , window = None ) :
'''Date before a certificate should be renewed
: param name : Common Name of the certificate ( DNS name of certificate )
: param window : days before expiry date to renew
: return datetime object of first renewal date'''
|
expiry = _expires ( name )
if window is not None :
expiry = expiry - datetime . timedelta ( days = window )
return expiry
|
def get_networks ( project_id , include_data = 'N' , ** kwargs ) :
"""Get all networks in a project
Returns an array of network objects ."""
|
log . info ( "Getting networks for project %s" , project_id )
user_id = kwargs . get ( 'user_id' )
project = _get_project ( project_id )
project . check_read_permission ( user_id )
rs = db . DBSession . query ( Network . id , Network . status ) . filter ( Network . project_id == project_id ) . all ( )
networks = [ ]
for r in rs :
if r . status != 'A' :
continue
try :
net = network . get_network ( r . id , summary = True , include_data = include_data , ** kwargs )
log . info ( "Network %s retrieved" , net . name )
networks . append ( net )
except PermissionError :
log . info ( "Not returning network %s as user %s does not have " "permission to read it." % ( r . id , user_id ) )
return networks
|
def is_empty ( self ) :
"""Return whether this rule is considered " empty " - - i . e . , has no
contents that should end up in the final CSS ."""
|
if self . properties : # Rules containing CSS properties are never empty
return False
if not self . descendants :
for header in self . ancestry . headers :
if header . is_atrule and header . directive != '@media' : # At - rules should always be preserved , UNLESS they are @ media
# blocks , which are known to be noise if they don ' t have any
# contents of their own
return False
return True
|
def progress_updater ( size , total ) :
"""Progress reporter for checksum verification ."""
|
current_task . update_state ( state = state ( 'PROGRESS' ) , meta = dict ( size = size , total = total ) )
|
def word_spans ( self ) :
"""The list of spans representing ` ` words ` ` layer elements ."""
|
if not self . is_tagged ( WORDS ) :
self . tokenize_words ( )
return self . spans ( WORDS )
|
def check_input ( self , token ) :
"""Performs checks on the input token . Raises an exception if unsupported .
: param token : the token to check
: type token : Token"""
|
if isinstance ( token . payload , Evaluation ) :
return None
if isinstance ( token . payload , ClusterEvaluation ) :
return None
raise Exception ( self . full_name + ": Input token is not a supported Evaluation object - " + classes . get_classname ( token . payload ) )
|
def update_user_display_name ( user , ** kwargs ) :
"""Update a user ' s display name"""
|
# check _ perm ( kwargs . get ( ' user _ id ' ) , ' edit _ user ' )
try :
user_i = db . DBSession . query ( User ) . filter ( User . id == user . id ) . one ( )
user_i . display_name = user . display_name
return user_i
except NoResultFound :
raise ResourceNotFoundError ( "User (id=%s) not found" % ( user . id ) )
|
def decode ( pieces , sequence_length , model_file = None , model_proto = None , reverse = False , name = None ) :
"""Decode pieces into postprocessed text .
Args :
pieces : A 2D int32 or string tensor [ batch _ size x max _ length ] of
encoded sequences .
sequence _ length : A 1D int32 tensor [ batch _ size ] representing the
length of pieces .
model _ file : The sentencepiece model file path .
model _ proto : The sentencepiece model serialized proto .
Either ` model _ file ` or ` model _ proto ` must be set .
reverse : Reverses the tokenized sequence ( Default = false )
name : The name argument that is passed to the op function .
Returns :
text : A 1D string tensor of decoded string ."""
|
return _gen_sentencepiece_processor_op . sentencepiece_decode ( pieces , sequence_length , model_file = model_file , model_proto = model_proto , reverse = reverse , name = name )
|
def clean ( ) :
"take out the trash"
|
src_dir = easy . options . setdefault ( "docs" , { } ) . get ( 'src_dir' , None )
if src_dir is None :
src_dir = 'src' if easy . path ( 'src' ) . exists ( ) else '.'
with easy . pushd ( src_dir ) :
for pkg in set ( easy . options . setup . packages ) | set ( ( "tests" , ) ) :
for filename in glob . glob ( pkg . replace ( '.' , os . sep ) + "/*.py[oc~]" ) :
easy . path ( filename ) . remove ( )
|
def _assign_uid ( self , sid ) :
"""Purpose : Assign a uid to the current object based on the sid passed . Pass the current uid to children of
current object"""
|
self . _uid = ru . generate_id ( 'pipeline.%(item_counter)04d' , ru . ID_CUSTOM , namespace = sid )
for stage in self . _stages :
stage . _assign_uid ( sid )
self . _pass_uid ( )
|
def _RunMethod ( dev , args , extra ) :
"""Runs a method registered via MakeSubparser ."""
|
logging . info ( '%s(%s)' , args . method . __name__ , ', ' . join ( args . positional ) )
result = args . method ( dev , * args . positional , ** extra )
if result is not None :
if isinstance ( result , io . StringIO ) :
sys . stdout . write ( result . getvalue ( ) )
elif isinstance ( result , ( list , types . GeneratorType ) ) :
r = ''
for r in result :
r = str ( r )
sys . stdout . write ( r )
if not r . endswith ( '\n' ) :
sys . stdout . write ( '\n' )
else :
result = str ( result )
sys . stdout . write ( result )
if not result . endswith ( '\n' ) :
sys . stdout . write ( '\n' )
return 0
|
def create_review ( self , review , pub_name , ext_name ) :
"""CreateReview .
[ Preview API ] Creates a new review for an extension
: param : class : ` < Review > < azure . devops . v5_0 . gallery . models . Review > ` review : Review to be created for the extension
: param str pub _ name : Name of the publisher who published the extension
: param str ext _ name : Name of the extension
: rtype : : class : ` < Review > < azure . devops . v5_0 . gallery . models . Review > `"""
|
route_values = { }
if pub_name is not None :
route_values [ 'pubName' ] = self . _serialize . url ( 'pub_name' , pub_name , 'str' )
if ext_name is not None :
route_values [ 'extName' ] = self . _serialize . url ( 'ext_name' , ext_name , 'str' )
content = self . _serialize . body ( review , 'Review' )
response = self . _send ( http_method = 'POST' , location_id = 'e6e85b9d-aa70-40e6-aa28-d0fbf40b91a3' , version = '5.0-preview.1' , route_values = route_values , content = content )
return self . _deserialize ( 'Review' , response )
|
def p_InClassDefList ( p ) :
'''InClassDefList : InClassDef
| InClassDefList InClassDef'''
|
if len ( p ) < 3 :
p [ 0 ] = InClassDefList ( None , p [ 1 ] )
else :
p [ 0 ] = InClassDefList ( p [ 1 ] , p [ 2 ] )
|
def agent_check_warn ( consul_url = None , token = None , checkid = None , ** kwargs ) :
'''This endpoint is used with a check that is of the TTL type . When this
is called , the status of the check is set to warning and the TTL
clock is reset .
: param consul _ url : The Consul server URL .
: param checkid : The ID of the check to deregister from Consul .
: param note : A human - readable message with the status of the check .
: return : Boolean and message indicating success or failure .
CLI Example :
. . code - block : : bash
salt ' * ' consul . agent _ check _ warn checkid = ' redis _ check1 ' note = ' Forcing check into warning state . ' '''
|
ret = { }
query_params = { }
if not consul_url :
consul_url = _get_config ( )
if not consul_url :
log . error ( 'No Consul URL found.' )
ret [ 'message' ] = 'No Consul URL found.'
ret [ 'res' ] = False
return ret
if not checkid :
raise SaltInvocationError ( 'Required argument "checkid" is missing.' )
if 'note' in kwargs :
query_params [ 'note' ] = kwargs [ 'note' ]
function = 'agent/check/warn/{0}' . format ( checkid )
res = _query ( consul_url = consul_url , function = function , token = token , query_params = query_params , method = 'GET' )
if res [ 'res' ] :
ret [ 'res' ] = True
ret [ 'message' ] = 'Check {0} marked as warning.' . format ( checkid )
else :
ret [ 'res' ] = False
ret [ 'message' ] = 'Unable to update check {0}.' . format ( checkid )
return ret
|
def soa_rec ( rdata ) :
'''Validate and parse DNS record data for SOA record ( s )
: param rdata : DNS record data
: return : dict w / fields'''
|
rschema = OrderedDict ( ( ( 'mname' , str ) , ( 'rname' , str ) , ( 'serial' , int ) , ( 'refresh' , int ) , ( 'retry' , int ) , ( 'expire' , int ) , ( 'minimum' , int ) , ) )
return _data2rec ( rschema , rdata )
|
def update ( self ) :
"""Request an updated set of data from casper . jxml ."""
|
response = self . jss . session . post ( self . url , data = self . auth )
response_xml = ElementTree . fromstring ( response . text . encode ( "utf_8" ) )
# Remove previous data , if any , and then add in response ' s XML .
self . clear ( )
for child in response_xml . getchildren ( ) :
self . append ( child )
|
def add_firmware_image ( self , name , datafile , ** kwargs ) :
"""Add a new firmware reference .
: param str name : Firmware file short name ( Required )
: param str datafile : The file object or * path * to the firmware image file ( Required )
: param str description : Firmware file description
: return : the newly created firmware file object
: rtype : FirmwareImage"""
|
kwargs . update ( { 'name' : name } )
firmware_image = FirmwareImage . _create_request_map ( kwargs )
firmware_image . update ( { 'datafile' : datafile } )
api = self . _get_api ( update_service . DefaultApi )
return FirmwareImage ( api . firmware_image_create ( ** firmware_image ) )
|
def list_cubes ( self ) :
"""List all available JSON files ."""
|
for file_name in os . listdir ( self . directory ) :
if '.' in file_name :
name , ext = file_name . rsplit ( '.' , 1 )
if ext . lower ( ) == 'json' :
yield name
|
def get_objective_lookup_session ( self ) :
"""Gets the OsidSession associated with the objective lookup
service .
return : ( osid . learning . ObjectiveLookupSession ) - an
ObjectiveLookupSession
raise : OperationFailed - unable to complete request
raise : Unimplemented - supports _ objective _ lookup ( ) is false
compliance : optional - This method must be implemented if
supports _ objective _ lookup ( ) is true ."""
|
if not self . supports_objective_lookup ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise
# OperationFailed ( )
try :
session = sessions . ObjectiveLookupSession ( runtime = self . _runtime )
except AttributeError :
raise
# OperationFailed ( )
return session
|
def set_operator_password ( operator , password , auth , url ) :
"""Function to set the password of an existing operator
: param operator : str Name of the operator account
: param password : str New password
: param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class
: param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass
: return : int of 204 if successfull ,
: rtype : int
> > > from pyhpeimc . auth import *
> > > from pyhpeimc . plat . operator import *
> > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " )
> > > operator = { " fullName " : " test administrator " , " sessionTimeout " : " 30 " ,
" password " : " password " , " operatorGroupId " : " 1 " ,
" name " : " testadmin " , " desc " : " test admin account " ,
" defaultAcl " : " " , " authType " : " 0 " }
> > > new _ operator = create _ operator ( operator , auth . creds , auth . url )
> > > set _ new _ password = set _ operator _ password ( ' testadmin ' , ' newpassword ' , auth . creds , auth . url )
> > > assert type ( set _ new _ password ) is int
> > > assert set _ new _ password = = 204"""
|
if operator is None :
operator = input ( '''\n What is the username you wish to change the password?''' )
oper_id = ''
authtype = None
plat_oper_list = get_plat_operator ( auth , url )
for i in plat_oper_list :
if i [ 'name' ] == operator :
oper_id = i [ 'id' ]
authtype = i [ 'authType' ]
if oper_id == '' :
return "User does not exist"
change_pw_url = "/imcrs/plat/operator/"
f_url = url + change_pw_url + oper_id
if password is None :
password = input ( '''\n ============ Please input the operators new password:\n ============ ''' )
payload = json . dumps ( { 'password' : password , 'authType' : authtype } )
response = requests . put ( f_url , data = payload , auth = auth , headers = HEADERS )
try :
if response . status_code == 204 : # print ( " Operator : " + operator +
# " password was successfully changed " )
return response . status_code
except requests . exceptions . RequestException as error :
return "Error:\n" + str ( error ) + ' set_operator_password: An Error has occured'
|
def wr_txt_section_hdrgos ( self , fout_txt , sortby = None , prt_section = True ) :
"""Write high GO IDs that are actually used to group current set of GO IDs ."""
|
sec2d_go = self . grprobj . get_sections_2d ( )
# lists of GO IDs
sec2d_nt = self . get_sections_2dnt ( sec2d_go )
# lists of GO Grouper namedtuples
if sortby is None :
sortby = self . fncsortnt
with open ( fout_txt , 'w' ) as prt :
self . prt_ver ( prt )
prt . write ( "# GROUP NAME: {NAME}\n" . format ( NAME = self . grprobj . grpname ) )
for section_name , nthdrgos_actual in sec2d_nt :
if prt_section :
prt . write ( "# SECTION: {SECTION}\n" . format ( SECTION = section_name ) )
self . prt_ntgos ( prt , nthdrgos_actual )
if prt_section :
prt . write ( "\n" )
dat = SummarySec2dHdrGos ( ) . summarize_sec2hdrgos ( sec2d_go )
sys . stdout . write ( self . grprobj . fmtsum . format ( GO_DESC = 'hdr' , SECs = len ( dat [ 'S' ] ) , GOs = len ( dat [ 'G' ] ) , UNGRP = len ( dat [ 'U' ] ) , undesc = "unused" , ACTION = "WROTE:" , FILE = fout_txt ) )
return sec2d_nt
|
def get_smt_userid ( ) :
"""Get the userid of smt server"""
|
cmd = [ "sudo" , "/sbin/vmcp" , "query userid" ]
try :
userid = subprocess . check_output ( cmd , close_fds = True , stderr = subprocess . STDOUT )
userid = bytes . decode ( userid )
userid = userid . split ( ) [ 0 ]
return userid
except Exception as err :
msg = ( "Could not find the userid of the smt server: %s" ) % err
raise exception . SDKInternalError ( msg = msg )
|
def bar_chart ( data , bar_char = '=' , width = 80 ) :
"""Return an horizontal bar chart
> > > print bar _ chart ( {
. . . ' one ' : ' 1 ' ,
. . . ' two ' : ' 2 ' ,
. . . ' three ' : ' 3 ' ,
. . . ' four ' : ' 4 ' ,
. . . ' five ' : ' 5 ' ,
five = = = = =
four = = = =
one =
three = = =
two = =
> > > print bar _ chart ( {
. . . ' 1/1 ' : 1/1.0,
. . . ' 1/2 ' : 1/2.0,
. . . ' 1/3 ' : 1/3.0,
. . . ' 1/4 ' : 1/4.0,
. . . ' 1/5 ' : 1/5.0,
. . . ' 2 ' : 2,
. . . ' 3 ' : 3,
. . . ' 4 ' : 4,
. . . ' 5 ' : 5,
1/1 = = = = =
1/2 = = = = =
1/3 = = = = =
1/4 = = =
1/5 = = =
> > > print bar _ chart ( {
. . . ' 1 ' : 2 * * 1,
. . . ' 2 ' : 2 * * 2,
. . . ' 3 ' : 2 * * 3,
. . . ' 4 ' : 2 * * 4,
. . . ' 5 ' : 2 * * 5,
. . . ' 6 ' : 2 * * 6,
. . . ' 7 ' : 2 * * 7,"""
|
if type ( data ) is dict :
output = [ ]
max_len = len ( max ( data , key = len ) )
float_values = map ( float , data . values ( ) )
max_value = max ( float_values )
min_value = min ( float_values )
all_integer = all ( f . is_integer ( ) for f in float_values )
for key in sorted ( data ) :
output . append ( '%s %s' % ( key . rjust ( max_len , ' ' ) , draw_bar ( bar_char , float ( data [ key ] ) , all_integer , min_value , max_value , width - max_len - 2 ) ) )
return '\n' . join ( output )
|
def _publish ( tgt , fun , arg = None , tgt_type = 'glob' , returner = '' , timeout = 5 , form = 'clean' , wait = False , via_master = None ) :
'''Publish a command from the minion out to other minions , publications need
to be enabled on the Salt master and the minion needs to have permission
to publish the command . The Salt master will also prevent a recursive
publication loop , this means that a minion cannot command another minion
to command another minion as that would create an infinite command loop .
The arguments sent to the minion publish function are separated with
commas . This means that for a minion executing a command with multiple
args it will look like this : :
salt system . example . com publish . publish ' * ' user . add ' foo , 1020,1020'
CLI Example :
. . code - block : : bash
salt system . example . com publish . publish ' * ' cmd . run ' ls - la / tmp ' '''
|
if 'master_uri' not in __opts__ :
log . error ( 'Cannot run publish commands without a connection to a salt master. No command sent.' )
return { }
if fun . startswith ( 'publish.' ) :
log . info ( 'Cannot publish publish calls. Returning {}' )
return { }
arg = _parse_args ( arg )
if via_master :
if 'master_uri_list' not in __opts__ :
raise SaltInvocationError ( message = 'Could not find list of masters \
in minion configuration but `via_master` was specified.' )
else : # Find the master in the list of master _ uris generated by the minion base class
matching_master_uris = [ master for master in __opts__ [ 'master_uri_list' ] if '//{0}:' . format ( via_master ) in master ]
if not matching_master_uris :
raise SaltInvocationError ( 'Could not find match for {0} in \
list of configured masters {1} when using `via_master` option' . format ( via_master , __opts__ [ 'master_uri_list' ] ) )
if len ( matching_master_uris ) > 1 : # If we have multiple matches , consider this a non - fatal error
# and continue with whatever we found first .
log . warning ( 'The `via_master` flag found ' 'more than one possible match found for %s when ' 'evaluating list %s' , via_master , __opts__ [ 'master_uri_list' ] )
master_uri = matching_master_uris . pop ( )
else : # If no preference is expressed by the user , just publish to the first master
# in the list .
master_uri = __opts__ [ 'master_uri' ]
log . info ( 'Publishing \'%s\' to %s' , fun , master_uri )
auth = salt . crypt . SAuth ( __opts__ )
tok = auth . gen_token ( b'salt' )
load = { 'cmd' : 'minion_pub' , 'fun' : fun , 'arg' : arg , 'tgt' : tgt , 'tgt_type' : tgt_type , 'ret' : returner , 'tok' : tok , 'tmo' : timeout , 'form' : form , 'id' : __opts__ [ 'id' ] , 'no_parse' : __opts__ . get ( 'no_parse' , [ ] ) }
channel = salt . transport . client . ReqChannel . factory ( __opts__ , master_uri = master_uri )
try :
try :
peer_data = channel . send ( load )
except SaltReqTimeoutError :
return '\'{0}\' publish timed out' . format ( fun )
if not peer_data :
return { }
# CLI args are passed as strings , re - cast to keep time . sleep happy
if wait :
loop_interval = 0.3
matched_minions = set ( peer_data [ 'minions' ] )
returned_minions = set ( )
loop_counter = 0
while returned_minions ^ matched_minions :
load = { 'cmd' : 'pub_ret' , 'id' : __opts__ [ 'id' ] , 'tok' : tok , 'jid' : peer_data [ 'jid' ] }
ret = channel . send ( load )
returned_minions = set ( ret . keys ( ) )
end_loop = False
if returned_minions >= matched_minions :
end_loop = True
elif ( loop_interval * loop_counter ) > timeout : # This may be unnecessary , but I am paranoid
if not returned_minions :
return { }
end_loop = True
if end_loop :
if form == 'clean' :
cret = { }
for host in ret :
cret [ host ] = ret [ host ] [ 'ret' ]
return cret
else :
return ret
loop_counter = loop_counter + 1
time . sleep ( loop_interval )
else :
time . sleep ( float ( timeout ) )
load = { 'cmd' : 'pub_ret' , 'id' : __opts__ [ 'id' ] , 'tok' : tok , 'jid' : peer_data [ 'jid' ] }
ret = channel . send ( load )
if form == 'clean' :
cret = { }
for host in ret :
cret [ host ] = ret [ host ] [ 'ret' ]
return cret
else :
return ret
finally :
channel . close ( )
return { }
|
def _aln_filename ( self , prefix ) :
"""Return name of file containing the alignment
prefix - - str , prefix of alignment file ."""
|
if self . Parameters [ '-outfile' ] . isOn ( ) :
aln_filename = self . _absolute ( self . Parameters [ '-outfile' ] . Value )
else :
aln_filename = prefix + self . _suffix ( )
return aln_filename
|
def enable_digital_reporting ( self , pin ) :
"""Enables digital reporting . By turning reporting on for all
8 bits in the " port " .
This is part of Firmata ' s protocol specification .
: param pin : Pin and all pins for this port
: returns : No return value"""
|
task = asyncio . ensure_future ( self . core . enable_digital_reporting ( pin ) )
self . loop . run_until_complete ( task )
|
def _opt_soft ( eigvectors , rot_matrix , n_clusters ) :
"""Optimizes the PCCA + rotation matrix such that the memberships are exclusively nonnegative .
Parameters
eigenvectors : ndarray
A matrix with the sorted eigenvectors in the columns . The stationary eigenvector should
be first , then the one to the slowest relaxation process , etc .
rot _ mat : ndarray ( m x m )
nonoptimized rotation matrix
n _ clusters : int
Number of clusters to group to .
Returns
rot _ mat : ndarray ( m x m )
Optimized rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships , i . e . :
chi = np . dot ( evec , rot _ matrix
References
[1 ] S . Roeblitz and M . Weber , Fuzzy spectral clustering by PCCA + :
application to Markov state models and data classification .
Adv Data Anal Classif 7 , 147-179 ( 2013 ) ."""
|
# only consider first n _ clusters eigenvectors
eigvectors = eigvectors [ : , : n_clusters ]
# crop first row and first column from rot _ matrix
# rot _ crop _ matrix = rot _ matrix [ 1 : , 1 : ]
rot_crop_matrix = rot_matrix [ 1 : ] [ : , 1 : ]
( x , y ) = rot_crop_matrix . shape
# reshape rot _ crop _ matrix into linear vector
rot_crop_vec = np . reshape ( rot_crop_matrix , x * y )
# Susanna Roeblitz ' target function for optimization
def susanna_func ( rot_crop_vec , eigvectors ) : # reshape into matrix
rot_crop_matrix = np . reshape ( rot_crop_vec , ( x , y ) )
# fill matrix
rot_matrix = _fill_matrix ( rot_crop_matrix , eigvectors )
result = 0
for i in range ( 0 , n_clusters ) :
for j in range ( 0 , n_clusters ) :
result += np . power ( rot_matrix [ j , i ] , 2 ) / rot_matrix [ 0 , i ]
return - result
from scipy . optimize import fmin
rot_crop_vec_opt = fmin ( susanna_func , rot_crop_vec , args = ( eigvectors , ) , disp = False )
rot_crop_matrix = np . reshape ( rot_crop_vec_opt , ( x , y ) )
rot_matrix = _fill_matrix ( rot_crop_matrix , eigvectors )
return rot_matrix
|
def get_data_path ( data , module , check_exists = True ) :
"""return a directory path to data within a module
Parameters
data : str or list [ str ]
file name or list of sub - directories
and file name ( e . g . [ ' lammps ' , ' data . txt ' ] )"""
|
basepath = os . path . dirname ( os . path . abspath ( inspect . getfile ( module ) ) )
if isinstance ( data , basestring ) :
data = [ data ]
dirpath = os . path . join ( basepath , * data )
if check_exists :
assert os . path . exists ( dirpath ) , '{0} does not exist' . format ( dirpath )
return pathlib . Path ( dirpath )
|
def affects ( self , reglist ) :
"""Returns if this instruction affects any of the registers
in reglist ."""
|
if isinstance ( reglist , str ) :
reglist = [ reglist ]
reglist = single_registers ( reglist )
return len ( [ x for x in self . destroys if x in reglist ] ) > 0
|
def add_full_state_methods ( class_with_globalize_methods ) :
"""class decorator to create " _ full _ state " methods / properties on the class ( so they
are valid for all instances created from this class ) .
Parameters
class _ with _ globalize _ methods"""
|
assert hasattr ( class_with_globalize_methods , 'active_set' )
assert hasattr ( class_with_globalize_methods , 'nstates_full' )
for name , method in class_with_globalize_methods . __dict__ . copy ( ) . items ( ) :
if isinstance ( method , property ) and hasattr ( method . fget , '_map_to_full_state_def_arg' ) :
default_value = method . fget . _map_to_full_state_def_arg
axis = method . fget . _map_to_full_state_along_axis
new_getter = _wrap_to_full_state ( name , default_value , axis )
alias_to_full_state_inst = property ( new_getter )
elif hasattr ( method , '_map_to_full_state_def_arg' ) :
default_value = method . _map_to_full_state_def_arg
axis = method . _map_to_full_state_along_axis
alias_to_full_state_inst = _wrap_to_full_state ( name , default_value , axis )
else :
continue
name += "_full_state"
setattr ( class_with_globalize_methods , name , alias_to_full_state_inst )
return class_with_globalize_methods
|
def logger_delete ( self , project , logger_name ) :
"""API call : delete all entries in a logger via a DELETE request
See
https : / / cloud . google . com / logging / docs / reference / v2 / rest / v2 / projects . logs / delete
: type project : str
: param project : ID of project containing the log entries to delete
: type logger _ name : str
: param logger _ name : name of logger containing the log entries to delete"""
|
path = "/projects/%s/logs/%s" % ( project , logger_name )
self . api_request ( method = "DELETE" , path = path )
|
def download ( cls , url = None , force_download = False ) :
"""Downloads uniprot _ sprot . xml . gz and reldate . txt ( release date information ) from URL or file path
. . note : :
only URL / path of xml . gz is needed and valid value for parameter url . URL / path for reldate . txt have to be the
same folder
: param str url : UniProt gzipped URL or file path
: param force _ download : force method to download
: type force _ download : bool"""
|
if url :
version_url = os . path . join ( os . path . dirname ( url ) , defaults . VERSION_FILE_NAME )
else :
url = os . path . join ( defaults . XML_DIR_NAME , defaults . SWISSPROT_FILE_NAME )
version_url = os . path . join ( defaults . XML_DIR_NAME , defaults . VERSION_FILE_NAME )
xml_file_path = cls . get_path_to_file_from_url ( url )
version_file_path = cls . get_path_to_file_from_url ( version_url )
if force_download or not os . path . exists ( xml_file_path ) :
log . info ( 'download {} and {}' . format ( xml_file_path , version_file_path ) )
scheme = urlsplit ( url ) . scheme
if scheme in ( 'ftp' , 'http' ) :
urlretrieve ( version_url , version_file_path )
urlretrieve ( url , xml_file_path )
elif not scheme and os . path . isfile ( url ) :
shutil . copyfile ( url , xml_file_path )
shutil . copyfile ( version_url , version_file_path )
return xml_file_path , version_file_path
|
def get_by_params ( self , process_name , timeperiod , start_id , end_id ) :
"""method finds unit _ of _ work record and returns it to the caller"""
|
query = { unit_of_work . PROCESS_NAME : process_name , unit_of_work . TIMEPERIOD : timeperiod , unit_of_work . START_ID : start_id , unit_of_work . END_ID : end_id }
collection = self . ds . connection ( COLLECTION_UNIT_OF_WORK )
document = collection . find_one ( query )
if document is None :
raise LookupError ( 'UOW satisfying query {0} was not found' . format ( query ) )
return UnitOfWork . from_json ( document )
|
def addprojecthook ( self , project_id , url , push = False , issues = False , merge_requests = False , tag_push = False ) :
"""add a hook to a project
: param project _ id : project id
: param url : url of the hook
: return : True if success"""
|
data = { 'id' : project_id , 'url' : url , 'push_events' : int ( bool ( push ) ) , 'issues_events' : int ( bool ( issues ) ) , 'merge_requests_events' : int ( bool ( merge_requests ) ) , 'tag_push_events' : int ( bool ( tag_push ) ) , }
request = requests . post ( '{0}/{1}/hooks' . format ( self . projects_url , project_id ) , headers = self . headers , data = data , verify = self . verify_ssl , auth = self . auth , timeout = self . timeout )
if request . status_code == 201 :
return request . json ( )
else :
return False
|
def set_datapoint ( self , ind , datapoint ) :
"""Sets the value of the datapoint at the given index ."""
|
if ind >= self . num_datapoints :
raise ValueError ( 'Index %d out of bounds! Tensor has %d datapoints' % ( ind , self . num_datapoints ) )
self . data [ ind , ... ] = np . array ( datapoint ) . astype ( self . dtype )
|
def shutdown ( name , ** kwargs ) :
'''Shuts down the device .
. . code - block : : yaml
shut the device :
junos :
- shutdown
- in _ min : 10
Parameters :
Optional
* kwargs :
* reboot :
Whether to reboot instead of shutdown . ( default = False )
* at :
Specify time for reboot . ( To be used only if reboot = yes )
* in _ min :
Specify delay in minutes for shutdown'''
|
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
ret [ 'changes' ] = __salt__ [ 'junos.shutdown' ] ( ** kwargs )
return ret
|
def load_from_json ( file_path ) :
"""Load the stored data from json , and return as a dict ."""
|
if os . path . exists ( file_path ) :
raw_data = open ( file_path , 'rb' ) . read ( )
return json . loads ( base64 . decodestring ( raw_data ) . decode ( 'utf-8' ) )
|
async def on_raw_731 ( self , message ) :
"""Someone we are monitoring got offline ."""
|
for nick in message . params [ 1 ] . split ( ',' ) :
self . _destroy_user ( nick , monitor_override = True )
await self . on_user_offline ( nickname )
|
def virt_conf ( self , conf , template_repo = None , template_store = None , do_bootstrap = True , do_build = True ) :
"""Initializes all the virt infrastructure of the prefix , creating the
domains disks , doing any network leases and creating all the virt
related files and dirs inside this prefix .
Args :
conf ( dict ) : Configuration spec
template _ repo ( TemplateRepository ) : template repository intance
template _ store ( TemplateStore ) : template store instance
do _ bootstrap ( bool ) : If true run virt - sysprep on the images
do _ build ( bool ) : If true run build commands on the images ,
see lago . build . py for more info .
Returns :
None"""
|
os . environ [ 'LAGO_PREFIX_PATH' ] = self . paths . prefix_path ( )
with utils . RollbackContext ( ) as rollback :
rollback . prependDefer ( shutil . rmtree , self . paths . prefix_path ( ) , ignore_errors = True )
self . _metadata = { 'lago_version' : pkg_resources . get_distribution ( "lago" ) . version , }
conf = self . _prepare_domains_images ( conf = conf , template_repo = template_repo , template_store = template_store , )
conf = self . _config_net_topology ( conf )
conf [ 'domains' ] = self . _copy_deploy_scripts_for_hosts ( domains = conf [ 'domains' ] )
self . _virt_env = self . VIRT_ENV_CLASS ( prefix = self , vm_specs = conf [ 'domains' ] , net_specs = conf [ 'nets' ] , )
if do_bootstrap :
self . virt_env . bootstrap ( )
if do_build :
self . build ( conf [ 'domains' ] )
self . save ( )
rollback . clear ( )
|
def get_order_info ( self , order_id , _async = False ) :
"""查询某个订单
: param order _ id :
: return :"""
|
params = { }
path = f'/v1/order/orders/{order_id}'
return api_key_get ( params , path , _async = _async )
|
def config ( config , fork_name = "" , origin_name = "" ) :
"""Setting various configuration options"""
|
state = read ( config . configfile )
any_set = False
if fork_name :
update ( config . configfile , { "FORK_NAME" : fork_name } )
success_out ( "fork-name set to: {}" . format ( fork_name ) )
any_set = True
if origin_name :
update ( config . configfile , { "ORIGIN_NAME" : origin_name } )
success_out ( "origin-name set to: {}" . format ( origin_name ) )
any_set = True
if not any_set :
info_out ( "Fork-name: {}" . format ( state [ "FORK_NAME" ] ) )
|
def existing_choice_control ( self ) :
"""It controls errors . It generates an error message
if zero or more than one channels are selected ."""
|
self . current . task_data [ 'existing' ] = False
self . current . task_data [ 'msg' ] = _ ( u"You should choose just one channel to do operation." )
keys , names = self . return_selected_form_items ( self . input [ 'form' ] [ 'ChannelList' ] )
if len ( keys ) == 1 :
self . current . task_data [ 'existing' ] = True
self . current . task_data [ 'target_channel_key' ] = keys [ 0 ]
|
def from_dict ( cls , coll , d ) :
"""Construct from dict
: param coll : Collection for the mark
: param d : Input
: type d : dict
: return : new instance
: rtype : Mark"""
|
return Mark ( collection = coll , operation = Operation [ d [ cls . FLD_OP ] ] , pos = d [ cls . FLD_MARK ] , field = d [ cls . FLD_FLD ] )
|
def exportdb ( outdir ) :
"""Export all anchore images to JSON files"""
|
ecode = 0
try :
imgdir = os . path . join ( outdir , "images" )
feeddir = os . path . join ( outdir , "feeds" )
storedir = os . path . join ( outdir , "storedfiles" )
for d in [ outdir , imgdir , feeddir , storedir ] :
if not os . path . exists ( d ) :
os . makedirs ( d )
anchore_print ( "exporting images..." )
imagelist = anchore_utils . get_image_list ( ) . keys ( )
for imageId in imagelist :
thefile = os . path . join ( imgdir , imageId + ".json" )
if not os . path . exists ( thefile ) :
with open ( thefile , 'w' ) as OFH :
OFH . write ( json . dumps ( contexts [ 'anchore_db' ] . load_image_new ( imageId ) ) )
stored_namespaces = contexts [ 'anchore_db' ] . load_files_namespaces ( imageId )
for namespace in stored_namespaces :
stored_files = contexts [ 'anchore_db' ] . load_files_tarfile ( imageId , namespace )
if os . path . exists ( stored_files ) :
thedir = os . path . join ( storedir , imageId , namespace )
if not os . path . exists ( thedir ) :
os . makedirs ( thedir )
thefile = os . path . join ( thedir , "stored_files.tar.gz" )
shutil . copy ( stored_files , thefile )
anchore_print ( "exporting feeds..." )
feedmeta = contexts [ 'anchore_db' ] . load_feedmeta ( )
thefile = os . path . join ( feeddir , "feedmeta.json" )
with open ( thefile , 'w' ) as OFH :
OFH . write ( json . dumps ( feedmeta ) )
for feed in feedmeta :
feedobj = feedmeta [ feed ]
for group in feedobj [ 'groups' ] :
groupobj = feedobj [ 'groups' ] [ group ]
datafiles = groupobj . pop ( 'datafiles' , [ ] )
for datafile in datafiles :
thedir = os . path . join ( feeddir , feed , group )
if not os . path . exists ( thedir ) :
os . makedirs ( thedir )
thefile = os . path . join ( thedir , datafile )
if not os . path . exists ( thefile ) :
with open ( thefile , 'w' ) as OFH :
OFH . write ( json . dumps ( contexts [ 'anchore_db' ] . load_feed_group_data ( feed , group , datafile ) ) )
except Exception as err :
anchore_print_err ( "operation failed: " + str ( err ) )
ecode = 1
sys . exit ( ecode )
|
def LighterColor ( self , level ) :
'''Create a new instance based on this one but lighter .
Parameters :
: level :
The amount by which the color should be lightened to produce
the new one [ 0 . . . 1 ] .
Returns :
A grapefruit . Color instance .
> > > Color . NewFromHsl ( 30 , 1 , 0.5 ) . LighterColor ( 0.25)
(1.0 , 0.75 , 0.5 , 1.0)
> > > Color . NewFromHsl ( 30 , 1 , 0.5 ) . LighterColor ( 0.25 ) . hsl
(30 , 1 , 0.75)'''
|
h , s , l = self . __hsl
return Color ( ( h , s , min ( l + level , 1 ) ) , 'hsl' , self . __a , self . __wref )
|
def resolution ( self , index ) :
"""Resolution with a given index .
Parameters
index : int
Resolution index .
Global if this is the ` ` aionationstates . wa ` ` object , local
if this is ` ` aionationstates . ga ` ` or ` ` aionationstates . sc ` ` .
Returns
: class : ` ApiQuery ` of : class : ` Resolution `
Raises
: class : ` NotFound `
If a resolution with the requested index doesn ' t exist ."""
|
@ api_query ( 'resolution' , id = str ( index ) )
async def result ( _ , root ) :
elem = root . find ( 'RESOLUTION' )
if not elem :
raise NotFound ( f'No resolution found with index {index}' )
return Resolution ( elem )
return result ( self )
|
def getLockStatsDB ( self ) :
"""Returns the number of active lock discriminated by database .
@ return : : Dictionary of stats ."""
|
info_dict = { 'all' : { } , 'wait' : { } }
cur = self . _conn . cursor ( )
cur . execute ( "SELECT d.datname, l.granted, COUNT(*) FROM pg_database d " "JOIN pg_locks l ON d.oid=l.database " "GROUP BY d.datname, l.granted;" )
rows = cur . fetchall ( )
for ( db , granted , cnt ) in rows :
info_dict [ 'all' ] [ db ] = info_dict [ 'all' ] . get ( db , 0 ) + cnt
if not granted :
info_dict [ 'wait' ] [ db ] = info_dict [ 'wait' ] . get ( db , 0 ) + cnt
return info_dict
|
def nl_recv ( sk , nla , buf , creds = None ) :
"""Receive data from Netlink socket .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / nl . c # L625
Receives data from a connected netlink socket using recvmsg ( ) and returns the number of bytes read . The read data is
stored in a newly allocated buffer that is assigned to ` buf ` . The peer ' s netlink address will be stored in ` nla ` .
This function blocks until data is available to be read unless the socket has been put into non - blocking mode using
nl _ socket _ set _ nonblocking ( ) in which case this function will return immediately with a return value of 0.
The buffer size used when reading from the netlink socket and thus limiting the maximum size of a netlink message
that can be read defaults to the size of a memory page ( getpagesize ( ) ) . The buffer size can be modified on a per
socket level using the function ` nl _ socket _ set _ msg _ buf _ size ( ) ` .
If message peeking is enabled using nl _ socket _ enable _ msg _ peek ( ) the size of the message to be read will be
determined using the MSG _ PEEK flag prior to performing the actual read . This leads to an additional recvmsg ( ) call
for every read operation which has performance implications and is not recommended for high throughput protocols .
An eventual interruption of the recvmsg ( ) system call is automatically handled by retrying the operation .
If receiving of credentials has been enabled using the function ` nl _ socket _ set _ passcred ( ) ` , this function will
allocate a new struct ` ucred ` filled with the received credentials and assign it to ` creds ` .
Positional arguments :
sk - - Netlink socket ( nl _ sock class instance ) ( input ) .
nla - - Netlink socket structure to hold address of peer ( sockaddr _ nl class instance ) ( output ) .
buf - - destination bytearray ( ) for message content ( output ) .
creds - - destination class instance for credentials ( ucred class instance ) ( output ) .
Returns :
Two - item tuple . First item is number of bytes read , 0 on EOF , 0 on no data event ( non - blocking mode ) , or a negative
error code . Second item is the message content from the socket or None ."""
|
flags = 0
page_size = resource . getpagesize ( ) * 4
if sk . s_flags & NL_MSG_PEEK :
flags |= socket . MSG_PEEK | socket . MSG_TRUNC
iov_len = sk . s_bufsize or page_size
if creds and sk . s_flags & NL_SOCK_PASSCRED :
raise NotImplementedError
# TODO https : / / github . com / Robpol86 / libnl / issues / 2
while True : # This is the ` goto retry ` implementation .
try :
if hasattr ( sk . socket_instance , 'recvmsg' ) :
iov , _ , msg_flags , address = sk . socket_instance . recvmsg ( iov_len , 0 , flags )
else :
iov , address = sk . socket_instance . recvfrom ( iov_len , flags )
msg_flags = 0
except OSError as exc :
if exc . errno == errno . EINTR :
continue
# recvmsg ( ) returned EINTR , retrying .
return - nl_syserr2nlerr ( exc . errno )
nla . nl_family = sk . socket_instance . family
# recvmsg ( ) in C does this , but not Python ' s .
if not iov :
return 0
if msg_flags & socket . MSG_CTRUNC :
raise NotImplementedError
# TODO https : / / github . com / Robpol86 / libnl / issues / 2
if iov_len < len ( iov ) or msg_flags & socket . MSG_TRUNC : # Provided buffer is not long enough .
# Enlarge it to size of n ( which should be total length of the message ) and try again .
iov_len = len ( iov )
continue
if flags : # Buffer is big enough , do the actual reading .
flags = 0
continue
nla . nl_pid = address [ 0 ]
nla . nl_groups = address [ 1 ]
if creds and sk . s_flags * NL_SOCK_PASSCRED :
raise NotImplementedError
# TODO https : / / github . com / Robpol86 / libnl / issues / 2
if iov :
buf += iov
return len ( buf )
|
def wysiwyg_setup ( protocol = "http" , editor_override = None ) :
"""Create the < style > and < script > tags needed to initialize the rich text editor .
Create a local django _ wysiwyg / includes . html template if you don ' t want to use Yahoo ' s CDN"""
|
ctx = { "protocol" : protocol , }
ctx . update ( get_settings ( editor_override = editor_override ) )
return render_to_string ( "django_wysiwyg/%s/includes.html" % ctx [ 'DJANGO_WYSIWYG_FLAVOR' ] , ctx )
|
def copy ( cell ) :
"""Copy the contents of a SpiceCell of any data type to another
cell of the same type .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / copy _ c . html
: param cell : Cell to be copied .
: type cell : spiceypy . utils . support _ types . SpiceCell
: return : New cell
: rtype : spiceypy . utils . support _ types . SpiceCell"""
|
assert isinstance ( cell , stypes . SpiceCell )
# Next line was redundant with [ raise NotImpImplementedError ] below
# assert cell . dtype = = 0 or cell . dtype = = 1 or cell . dtype = = 2
if cell . dtype is 0 :
newcopy = stypes . SPICECHAR_CELL ( cell . size , cell . length )
elif cell . dtype is 1 :
newcopy = stypes . SPICEDOUBLE_CELL ( cell . size )
elif cell . dtype is 2 :
newcopy = stypes . SPICEINT_CELL ( cell . size )
else :
raise NotImplementedError
libspice . copy_c ( ctypes . byref ( cell ) , ctypes . byref ( newcopy ) )
return newcopy
|
def makeDataFiles ( prefix , dir ) :
"""Create distutils data _ files structure from dir
distutil will copy all file rooted under dir into prefix , excluding
dir itself , just like ' ditto src dst ' works , and unlike ' cp - r src
dst , which copy src into dst ' .
Typical usage :
# install the contents of ' wiki ' under sys . prefix + ' share / moin '
data _ files = makeDataFiles ( ' share / moin ' , ' wiki ' )
For this directory structure :
root
file1
file2
dir
file
subdir
file
makeDataFiles ( ' prefix ' , ' root ' ) will create this distutil data _ files structure :
[ ( ' prefix ' , [ ' file1 ' , ' file2 ' ] ) ,
( ' prefix / dir ' , [ ' file ' ] ) ,
( ' prefix / dir / subdir ' , [ ' file ' ] ) ]"""
|
# Strip ' dir / ' from of path before joining with prefix
dir = dir . rstrip ( '/' )
strip = len ( dir ) + 1
found = [ ]
os . path . walk ( dir , visit , ( prefix , strip , found ) )
# print found [ 0]
return found [ 0 ]
|
def update_vertices ( self , vertices ) :
"""Update the triangle vertices ."""
|
vertices = np . array ( vertices , dtype = np . float32 )
self . _vbo_v . set_data ( vertices )
|
def discard ( self , key ) :
"""Remove an element . Do not raise an exception if absent .
The MutableSet mixin uses this to implement the . remove ( ) method , which
* does * raise an error when asked to remove a non - existent item ."""
|
if key in self :
i = self . map [ key ]
del self . items [ i ]
del self . map [ key ]
for k , v in self . map . items ( ) :
if v >= i :
self . map [ k ] = v - 1
|
async def initialize ( self , timeout = 2.0 ) :
"""Launch any background tasks associated with this subsystem .
This method will synchronously await self . initialized ( ) which makes
sure that the background tasks start up correctly ."""
|
if self . initialized . is_set ( ) :
raise InternalError ( "initialize called when already initialized" )
self . _emulator . add_task ( 8 , self . _reset_vector ( ) )
await asyncio . wait_for ( self . initialized . wait ( ) , timeout = timeout )
|
def reset_query_marks ( self ) :
"""set or reset hyb and neighbors marks to atoms ."""
|
for i , atom in self . atoms ( ) :
neighbors = 0
hybridization = 1
p_neighbors = 0
p_hybridization = 1
# hyb 1 - sp3 ; 2 - sp2 ; 3 - sp1 ; 4 - aromatic
for j , bond in self . _adj [ i ] . items ( ) :
isnth = self . _node [ j ] . element != 'H'
order = bond . order
if order :
if isnth :
neighbors += 1
if hybridization not in ( 3 , 4 ) :
if order == 4 :
hybridization = 4
elif order == 3 :
hybridization = 3
elif order == 2 :
if hybridization == 2 :
hybridization = 3
else :
hybridization = 2
order = bond . p_order
if order :
if isnth :
p_neighbors += 1
if p_hybridization not in ( 3 , 4 ) :
if order == 4 :
p_hybridization = 4
elif order == 3 :
p_hybridization = 3
elif order == 2 :
if p_hybridization == 2 :
p_hybridization = 3
else :
p_hybridization = 2
atom . _reactant . _neighbors = neighbors
atom . _reactant . _hybridization = hybridization
atom . _product . _neighbors = p_neighbors
atom . _product . _hybridization = p_hybridization
atom . __dict__ . clear ( )
# flush cache
self . flush_cache ( )
|
def set_faultset_name ( self , name , fsObj ) :
"""Set name for Faultset
: param name : Name of Faultset
: param fsObj : ScaleIO FS object
: return : POST request response
: rtype : Requests POST response object"""
|
# Set name of FaultSet
self . conn . connection . _check_login ( )
faultSetNameDict = { 'Name' : name }
# This one is the most logical name comparing to other methods .
response = self . conn . connection . _do_post ( "{}/{}{}/{}" . format ( self . conn . connection . _api_url , "types/FaultSet::" , fsObj . id , 'instances/action/setFaultSetName' ) , json = faultSetNameSdcDict )
# This is how its documented in REST API Chapter
# response = self . _ do _ post ( " { } / { } { } / { } " . format ( self . _ api _ url , " types / FaultSet : : " , fsObj . id , ' instances / action / setFaultSetName ' ) , json = faultsetNameSdcDict )
return response
|
def _get_name ( self ) :
"""Find name of scoring function ."""
|
if self . name is not None :
return self . name
if self . scoring_ is None :
return 'score'
if isinstance ( self . scoring_ , str ) :
return self . scoring_
if isinstance ( self . scoring_ , partial ) :
return self . scoring_ . func . __name__
if isinstance ( self . scoring_ , _BaseScorer ) :
return self . scoring_ . _score_func . __name__
return self . scoring_ . __name__
|
def get_effective_agent_id_with_proxy ( proxy ) :
"""Given a Proxy , returns the Id of the effective Agent"""
|
if is_authenticated_with_proxy ( proxy ) :
if proxy . has_effective_agent ( ) :
return proxy . get_effective_agent_id ( )
else :
return proxy . get_authentication ( ) . get_agent_id ( )
else :
return Id ( identifier = 'MC3GUE$T@MIT.EDU' , namespace = 'authentication.Agent' , authority = 'MIT-ODL' )
|
def _mark_grid_bounds ( self , plane , region_bbox ) :
"""Assume all lines define a complete grid over the region _ bbox .
Detect which lines are missing so that we can recover merged
cells ."""
|
# Grid boundaries
vbars = np . zeros ( [ self . num_rows , self . num_cols + 1 ] , dtype = np . bool )
hbars = np . zeros ( [ self . num_rows + 1 , self . num_cols ] , dtype = np . bool )
def closest_idx ( arr , elem ) :
left = bisect . bisect_left ( arr , elem ) - 1
right = bisect . bisect_right ( arr , elem ) - 1
return left if abs ( arr [ left ] - elem ) < abs ( arr [ right ] - elem ) else right
# Figure out which separating segments are missing , i . e . merge cells
for row , ( y0 , y1 ) in enumerate ( self . yranges ) :
yc = ( y0 + y1 ) // 2
for l in plane . find ( ( region_bbox . x0 , yc , region_bbox . x1 , yc ) ) :
vbars [ row , closest_idx ( self . xs , l . xc ) ] = True
for col , ( x0 , x1 ) in enumerate ( self . xranges ) :
xc = ( x0 + x1 ) // 2
for l in plane . find ( ( xc , region_bbox . y0 , xc , region_bbox . y1 ) ) :
hbars [ closest_idx ( self . ys , l . yc ) , col ] = True
return vbars , hbars
|
def set_weather ( self , weather_type ) :
"""Queue up a set weather command . It will be applied when ` tick ` or ` step ` is called next .
By the next tick , the lighting , skysphere , fog , and relevant particle systems will be updated and / or spawned
to the given weather . If there is no skysphere or directional light in the world , the command may not function
properly but will not cause a crash .
NOTE : Because this command can effect the fog density , any changes made by a change _ fog _ density command before
a set _ weather command called will be undone . It is recommended to call change _ fog _ density after calling set
weather .
Args :
weather _ type ( str ) : The type of weather , which can be ' Rain ' or ' Cloudy ' . In all downloadable worlds ,
the weather is clear by default . If the given type string is not available , the command will not be sent ."""
|
if not SetWeatherCommand . has_type ( weather_type . lower ( ) ) :
raise HolodeckException ( "Invalid weather type " + weather_type )
self . _should_write_to_command_buffer = True
command_to_send = SetWeatherCommand ( weather_type . lower ( ) )
self . _commands . add_command ( command_to_send )
|
def mode_clipboard_watch ( options ) :
"""Clipboard Watch Mode : watches for a new string on the clipboard , and tries to fetch that URL"""
|
articles = set ( )
failures = set ( )
print ( 'Hello, this is news-scraper. Copy a URL to start!' )
print ( 'To quit, press CTRL+C in this window.\n' )
url = pyperclip . paste ( )
while True :
try :
tmp_value = pyperclip . paste ( )
if tmp_value != url :
url = tmp_value
print ( 'Fetching article...' )
if options . debug :
print ( "Value changed: %s" % str ( url ) [ : 100 ] )
article = _get_article ( url = url , bodyLines = options . bodyLines , debug = options . debug )
if ( article ) :
articles . add ( article )
else :
failures . add ( url )
time . sleep ( 0.2 )
except KeyboardInterrupt :
break
_output ( articles , options . outputFile , failures , options . failureFile )
|
def ip_rtm_config_route_static_route_nh_static_route_dest ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
ip = ET . SubElement ( config , "ip" , xmlns = "urn:brocade.com:mgmt:brocade-common-def" )
rtm_config = ET . SubElement ( ip , "rtm-config" , xmlns = "urn:brocade.com:mgmt:brocade-rtm" )
route = ET . SubElement ( rtm_config , "route" )
static_route_nh = ET . SubElement ( route , "static-route-nh" )
static_route_next_hop_key = ET . SubElement ( static_route_nh , "static-route-next-hop" )
static_route_next_hop_key . text = kwargs . pop ( 'static_route_next_hop' )
static_route_dest = ET . SubElement ( static_route_nh , "static-route-dest" )
static_route_dest . text = kwargs . pop ( 'static_route_dest' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def get_resources_by_query ( self , resource_query ) :
"""Gets a list of ` ` Resources ` ` matching the given resource query .
arg : resource _ query ( osid . resource . ResourceQuery ) : the
resource query
return : ( osid . resource . ResourceList ) - the returned
` ` ResourceList ` `
raise : NullArgument - ` ` resource _ query ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
raise : Unsupported - ` ` resource _ query ` ` is not of this service
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . ResourceQuerySession . get _ resources _ by _ query
and_list = list ( )
or_list = list ( )
for term in resource_query . _query_terms :
if '$in' in resource_query . _query_terms [ term ] and '$nin' in resource_query . _query_terms [ term ] :
and_list . append ( { '$or' : [ { term : { '$in' : resource_query . _query_terms [ term ] [ '$in' ] } } , { term : { '$nin' : resource_query . _query_terms [ term ] [ '$nin' ] } } ] } )
else :
and_list . append ( { term : resource_query . _query_terms [ term ] } )
for term in resource_query . _keyword_terms :
or_list . append ( { term : resource_query . _keyword_terms [ term ] } )
if or_list :
and_list . append ( { '$or' : or_list } )
view_filter = self . _view_filter ( )
if view_filter :
and_list . append ( view_filter )
if and_list :
query_terms = { '$and' : and_list }
collection = JSONClientValidated ( 'resource' , collection = 'Resource' , runtime = self . _runtime )
result = collection . find ( query_terms ) . sort ( '_id' , DESCENDING )
else :
result = [ ]
return objects . ResourceList ( result , runtime = self . _runtime , proxy = self . _proxy )
|
def get_toplosses_idxs ( cls , learn , n_imgs , ** kwargs ) :
"Sorts ` ds _ type ` dataset by top losses and returns dataset and sorted indices ."
|
dl = learn . data . fix_dl
if not n_imgs :
n_imgs = len ( dl . dataset )
_ , _ , top_losses = learn . get_preds ( ds_type = DatasetType . Fix , with_loss = True )
idxs = torch . topk ( top_losses , n_imgs ) [ 1 ]
return cls . padded_ds ( dl . dataset , ** kwargs ) , idxs
|
def length ( self , chain = - 1 ) :
"""Return the length of the trace .
: Parameters :
chain : int or None
The chain index . If None , returns the combined length of all chains ."""
|
if chain is not None :
return len ( self . _vlarrays [ chain ] )
else :
return sum ( map ( len , self . _vlarrays ) )
|
def fit_model ( df , filters , model_expression ) :
"""Use statsmodels OLS to construct a model relation .
Parameters
df : pandas . DataFrame
Data to use for fit . Should contain all the columns
referenced in the ` model _ expression ` .
filters : list of str
Any filters to apply before doing the model fit .
model _ expression : str
A patsy model expression that can be used with statsmodels .
Should contain both the left - and right - hand sides .
Returns
fit : statsmodels . regression . linear _ model . OLSResults"""
|
df = util . apply_filter_query ( df , filters )
model = smf . ols ( formula = model_expression , data = df )
if len ( model . exog ) != len ( df ) :
raise ModelEvaluationError ( 'Estimated data does not have the same length as input. ' 'This suggests there are null values in one or more of ' 'the input columns.' )
with log_start_finish ( 'statsmodels OLS fit' , logger ) :
return model . fit ( )
|
def get_view ( self , table ) :
"""Returns the SQL query for a view , or None if it doesn ' t exist or is not a view .
: param table : The table containing the view .
: type table : BQTable"""
|
request = self . client . tables ( ) . get ( projectId = table . project_id , datasetId = table . dataset_id , tableId = table . table_id )
try :
response = request . execute ( )
except http . HttpError as ex :
if ex . resp . status == 404 :
return None
raise
return response [ 'view' ] [ 'query' ] if 'view' in response else None
|
def _ScheduleGenericHunt ( hunt_obj ) :
"""Adds foreman rules for a generic hunt ."""
|
# TODO : Migrate foreman conditions to use relation expiration
# durations instead of absolute timestamps .
foreman_condition = foreman_rules . ForemanCondition ( creation_time = rdfvalue . RDFDatetime . Now ( ) , expiration_time = hunt_obj . init_start_time + hunt_obj . duration , description = "Hunt %s %s" % ( hunt_obj . hunt_id , hunt_obj . args . hunt_type ) , client_rule_set = hunt_obj . client_rule_set , hunt_id = hunt_obj . hunt_id )
# Make sure the rule makes sense .
foreman_condition . Validate ( )
data_store . REL_DB . WriteForemanRule ( foreman_condition )
|
def unparse_qsl ( qsl , sort = False , reverse = False ) :
"""Reverse conversion for parse _ qsl"""
|
result = [ ]
items = qsl
if sort :
items = sorted ( items , key = lambda x : x [ 0 ] , reverse = reverse )
for keys , values in items :
query_name = quote ( keys )
result . append ( query_name + "=" + quote ( values ) )
return "&" . join ( result )
|
def process_json_file ( file_name ) :
"""Return an EidosProcessor by processing the given Eidos JSON - LD file .
This function is useful if the output from Eidos is saved as a file and
needs to be processed .
Parameters
file _ name : str
The name of the JSON - LD file to be processed .
Returns
ep : EidosProcessor
A EidosProcessor containing the extracted INDRA Statements
in its statements attribute ."""
|
try :
with open ( file_name , 'rb' ) as fh :
json_str = fh . read ( ) . decode ( 'utf-8' )
return process_json_str ( json_str )
except IOError :
logger . exception ( 'Could not read file %s.' % file_name )
|
def _get_9q_square_qvm ( name : str , noisy : bool , connection : ForestConnection = None , qvm_type : str = 'qvm' ) -> QuantumComputer :
"""A nine - qubit 3x3 square lattice .
This uses a " generic " lattice not tied to any specific device . 9 qubits is large enough
to do vaguely interesting algorithms and small enough to simulate quickly .
: param name : The name of this QVM
: param connection : The connection to use to talk to external services
: param noisy : Whether to construct a noisy quantum computer
: param qvm _ type : The type of QVM . Either ' qvm ' or ' pyqvm ' .
: return : A pre - configured QuantumComputer"""
|
topology = nx . convert_node_labels_to_integers ( nx . grid_2d_graph ( 3 , 3 ) )
return _get_qvm_with_topology ( name = name , connection = connection , topology = topology , noisy = noisy , requires_executable = True , qvm_type = qvm_type )
|
def prettyMatcherList ( things ) :
"""Try to construct a nicely - formatted string for a list of matcher
objects . Those may be compiled regular expressions or strings . . ."""
|
norm = [ ]
for x in makeSequence ( things ) :
if hasattr ( x , 'pattern' ) :
norm . append ( x . pattern )
else :
norm . append ( x )
return "('%s')" % "', '" . join ( norm )
|
def bind ( self , prefix , namespace , * args , ** kwargs ) :
"""Extends the function to add an attribute to the class for each
added namespace to allow for use of dot notation . All prefixes are
converted to lowercase
Args :
prefix : string of namespace name
namespace : rdflib . namespace instance
kwargs :
calc : whether or not create the lookup reference dictionaries
Example usage :
RdfNsManager . rdf . type = >
http : / / www . w3 . org / 1999/02/22 - rdf - syntax - ns # type"""
|
# RdfNamespace ( prefix , namespace , * * kwargs )
setattr ( self , prefix , RdfNamespace ( prefix , namespace , ** kwargs ) )
if kwargs . pop ( 'calc' , True ) :
self . __make_dicts__
|
def _load_private_key_file ( fname ) :
"""Loads an onion - service private - key from the given file . This can
be either a ' key blog ' as returned from a previous ADD _ ONION call ,
or a v3 or v2 file as created by Tor when using the
HiddenServiceDir directive .
In any case , a key - blob suitable for ADD _ ONION use is returned ."""
|
with open ( fname , "rb" ) as f :
data = f . read ( )
if b"\x00\x00\x00" in data : # v3 private key file
blob = data [ data . find ( b"\x00\x00\x00" ) + 3 : ]
return u"ED25519-V3:{}" . format ( b2a_base64 ( blob . strip ( ) ) . decode ( 'ascii' ) . strip ( ) )
if b"-----BEGIN RSA PRIVATE KEY-----" in data : # v2 RSA key
blob = "" . join ( data . decode ( 'ascii' ) . split ( '\n' ) [ 1 : - 2 ] )
return u"RSA1024:{}" . format ( blob )
blob = data . decode ( 'ascii' ) . strip ( )
if ':' in blob :
kind , key = blob . split ( ':' , 1 )
if kind in [ 'ED25519-V3' , 'RSA1024' ] :
return blob
raise ValueError ( "'{}' does not appear to contain v2 or v3 private key data" . format ( fname , ) )
|
def set_coordinate_selection ( self , selection , value , fields = None ) :
"""Modify a selection of individual items , by providing the indices ( coordinates )
for each item to be modified .
Parameters
selection : tuple
An integer ( coordinate ) array for each dimension of the array .
value : scalar or array - like
Value to be stored into the array .
fields : str or sequence of str , optional
For arrays with a structured dtype , one or more fields can be specified to set
data for .
Examples
Setup a 2 - dimensional array : :
> > > import zarr
> > > import numpy as np
> > > z = zarr . zeros ( ( 5 , 5 ) , dtype = int )
Set data for a selection of items : :
> > > z . set _ coordinate _ selection ( ( [ 1 , 4 ] , [ 1 , 4 ] ) , 1)
array ( [ [ 0 , 0 , 0 , 0 , 0 ] ,
[0 , 1 , 0 , 0 , 0 ] ,
[0 , 0 , 0 , 0 , 0 ] ,
[0 , 0 , 0 , 0 , 0 ] ,
[0 , 0 , 0 , 0 , 1 ] ] )
For convenience , this functionality is also available via the ` vindex ` property .
E . g . : :
> > > z . vindex [ [ 1 , 4 ] , [ 1 , 4 ] ] = 2
array ( [ [ 0 , 0 , 0 , 0 , 0 ] ,
[0 , 2 , 0 , 0 , 0 ] ,
[0 , 0 , 0 , 0 , 0 ] ,
[0 , 0 , 0 , 0 , 0 ] ,
[0 , 0 , 0 , 0 , 2 ] ] )
Notes
Coordinate indexing is also known as point selection , and is a form of vectorized
or inner indexing .
Slices are not supported . Coordinate arrays must be provided for all dimensions
of the array .
See Also
get _ basic _ selection , set _ basic _ selection , get _ mask _ selection , set _ mask _ selection ,
get _ orthogonal _ selection , set _ orthogonal _ selection , get _ coordinate _ selection ,
vindex , oindex , _ _ getitem _ _ , _ _ setitem _ _"""
|
# guard conditions
if self . _read_only :
err_read_only ( )
# refresh metadata
if not self . _cache_metadata :
self . _load_metadata_nosync ( )
# setup indexer
indexer = CoordinateIndexer ( selection , self )
# handle value - need to flatten
if not is_scalar ( value , self . _dtype ) :
value = np . asanyarray ( value )
if hasattr ( value , 'shape' ) and len ( value . shape ) > 1 :
value = value . reshape ( - 1 )
self . _set_selection ( indexer , value , fields = fields )
|
def maximize ( self , element : Expression [ z3 . ExprRef ] ) -> None :
"""In solving this solver will try to maximize the passed expression .
: param element :"""
|
self . raw . maximize ( element . raw )
|
def main ( argString = None ) :
"""The main function of the module .
: param argString : the options .
: type argString : list
These are the steps :
1 . Prints the options .
2 . Compute frequency using Plink .
3 . Runs bafRegress ."""
|
# Getting and checking the options
args = parseArgs ( argString )
checkArgs ( args )
logger . info ( "Options used:" )
for key , value in vars ( args ) . iteritems ( ) :
logger . info ( " --{} {}" . format ( key . replace ( "_" , "-" ) , value ) )
# Checks the sample raw data
logger . info ( "Checking the raw data files" )
sample_files = check_sample_files ( args . bfile + ".fam" , args . raw_dir )
# Finds the markers to extract
logger . info ( "Creating extraction list (autosome only)" )
create_extraction_file ( args . bfile + ".bim" , args . out )
# Run plink
logger . info ( "Computing frequency using Plink" )
run_plink ( args . bfile , args . out , args . out + ".to_extract" )
# Run bafRegress
logger . info ( "Running bafRegress" )
if args . sge :
run_bafRegress_sge ( sample_files , args . out , args . out + ".to_extract" , args . out + ".frq" , args )
else :
run_bafRegress ( sample_files , args . out , args . out + ".to_extract" , args . out + ".frq" , args )
|
def uint8 ( self , val ) :
"""append a frame containing a uint8"""
|
try :
self . msg += [ pack ( "B" , val ) ]
except struct . error :
raise ValueError ( "Expected uint32" )
return self
|
def _read_current_marker ( self ) :
"""Reads the current marker and returns its genotypes ."""
|
return self . _geno_values [ np . frombuffer ( self . _bed . read ( self . _nb_bytes ) , dtype = np . uint8 ) ] . flatten ( order = "C" ) [ : self . _nb_samples ]
|
def validate_raw_manifest_format ( raw_manifest : str ) -> None :
"""Raise a ValidationError if a manifest . . .
- is not tightly packed ( i . e . no linebreaks or extra whitespace )
- does not have alphabetically sorted keys
- has duplicate keys
- is not UTF - 8 encoded
- has a trailing newline"""
|
try :
manifest_dict = json . loads ( raw_manifest , encoding = "UTF-8" )
except json . JSONDecodeError as err :
raise json . JSONDecodeError ( "Failed to load package data. File is not a valid JSON document." , err . doc , err . pos , )
compact_manifest = json . dumps ( manifest_dict , sort_keys = True , separators = ( "," , ":" ) )
if raw_manifest != compact_manifest :
raise ValidationError ( "The manifest appears to be malformed. Please ensure that it conforms to the " "EthPM-Spec for document format. " "http://ethpm.github.io/ethpm-spec/package-spec.html#document-format " )
|
def filter_picks ( catalog , stations = None , channels = None , networks = None , locations = None , top_n_picks = None , evaluation_mode = 'all' ) :
"""Filter events in the catalog based on a number of parameters .
: param catalog : Catalog to filter .
: type catalog : obspy . core . event . Catalog
: param stations : List for stations to keep picks from .
: type stations : list
: param channels : List of channels to keep picks from .
: type channels : list
: param networks : List of networks to keep picks from .
: type networks : list
: param locations : List of location codes to use
: type locations : list
: param top _ n _ picks : Filter only the top N most used station - channel pairs .
: type top _ n _ picks : int
: param evaluation _ mode :
To select only manual or automatic picks , or use all ( default ) .
: type evaluation _ mode : str
: return :
Filtered Catalog - if events are left with no picks , they are removed
from the catalog .
: rtype : obspy . core . event . Catalog
. . note : :
Will filter first by station , then by channel , then by network , if
using top _ n _ picks , this will be done last , after the other filters
have been applied .
. . note : :
Doesn ' t work in place on the catalog , your input catalog will be safe
unless you overwrite it .
. . note : : Doesn ' t expand wildcard characters .
. . rubric : : Example
> > > from obspy . clients . fdsn import Client
> > > from eqcorrscan . utils . catalog _ utils import filter _ picks
> > > from obspy import UTCDateTime
> > > client = Client ( ' NCEDC ' )
> > > t1 = UTCDateTime ( 2004 , 9 , 28)
> > > t2 = t1 + 86400
> > > catalog = client . get _ events ( starttime = t1 , endtime = t2 , minmagnitude = 3,
. . . minlatitude = 35.7 , maxlatitude = 36.1,
. . . minlongitude = - 120.6 , maxlongitude = - 120.2,
. . . includearrivals = True )
> > > print ( len ( catalog ) )
12
> > > filtered _ catalog = filter _ picks ( catalog , stations = [ ' BMS ' , ' BAP ' ,
. . . ' PAG ' , ' PAN ' ,
. . . ' PBI ' , ' PKY ' ,
. . . ' YEG ' , ' WOF ' ] )
> > > print ( len ( filtered _ catalog ) )
12
> > > stations = [ ]
> > > for event in filtered _ catalog :
. . . for pick in event . picks :
. . . stations . append ( pick . waveform _ id . station _ code )
> > > print ( sorted ( list ( set ( stations ) ) ) )
[ ' BAP ' , ' BMS ' , ' PAG ' , ' PAN ' , ' PBI ' , ' PKY ' , ' WOF ' , ' YEG ' ]"""
|
# Don ' t work in place on the catalog
filtered_catalog = catalog . copy ( )
if stations :
for event in filtered_catalog :
if len ( event . picks ) == 0 :
continue
event . picks = [ pick for pick in event . picks if pick . waveform_id . station_code in stations ]
if channels :
for event in filtered_catalog :
if len ( event . picks ) == 0 :
continue
event . picks = [ pick for pick in event . picks if pick . waveform_id . channel_code in channels ]
if networks :
for event in filtered_catalog :
if len ( event . picks ) == 0 :
continue
event . picks = [ pick for pick in event . picks if pick . waveform_id . network_code in networks ]
if locations :
for event in filtered_catalog :
if len ( event . picks ) == 0 :
continue
event . picks = [ pick for pick in event . picks if pick . waveform_id . location_code in locations ]
if evaluation_mode == 'manual' :
for event in filtered_catalog :
event . picks = [ pick for pick in event . picks if pick . evaluation_mode == 'manual' ]
elif evaluation_mode == 'automatic' :
for event in filtered_catalog :
event . picks = [ pick for pick in event . picks if pick . evaluation_mode == 'automatic' ]
elif evaluation_mode != 'all' :
warnings . warn ( 'Unrecognised evaluation_mode: %s, using all picks' % evaluation_mode )
if top_n_picks :
all_picks = [ ]
for event in filtered_catalog :
all_picks += [ ( pick . waveform_id . station_code , pick . waveform_id . channel_code ) for pick in event . picks ]
counted = Counter ( all_picks ) . most_common ( )
all_picks = [ ]
# Hack around sorting the counter object : Py 2 does it differently to 3
for i in range ( counted [ 0 ] [ 1 ] ) :
highest = [ item [ 0 ] for item in counted if item [ 1 ] >= counted [ 0 ] [ 1 ] - i ]
# Sort them by alphabetical order in station
highest = sorted ( highest , key = lambda tup : tup [ 0 ] )
for stachan in highest :
if stachan not in all_picks :
all_picks . append ( stachan )
if len ( all_picks ) > top_n_picks :
all_picks = all_picks [ 0 : top_n_picks ]
break
for event in filtered_catalog :
if len ( event . picks ) == 0 :
continue
event . picks = [ pick for pick in event . picks if ( pick . waveform_id . station_code , pick . waveform_id . channel_code ) in all_picks ]
# Remove events without picks
tmp_catalog = Catalog ( )
for event in filtered_catalog :
if len ( event . picks ) > 0 :
tmp_catalog . append ( event )
return tmp_catalog
|
def get_suffix ( name ) :
"""Check if file name have valid suffix for formatting .
if have suffix return it else return False ."""
|
a = name . count ( "." )
if a :
ext = name . split ( "." ) [ - 1 ]
if ext in LANGS . keys ( ) :
return ext
return False
else :
return False
|
def remove ( self , func ) :
"""Remove any provisioned log sink if auto created"""
|
if not self . data [ 'name' ] . startswith ( self . prefix ) :
return
parent = self . get_parent ( self . get_log ( ) )
_ , sink_path , _ = self . get_sink ( )
client = self . session . client ( 'logging' , 'v2' , '%s.sinks' % ( parent . split ( '/' , 1 ) [ 0 ] ) )
try :
client . execute_command ( 'delete' , { 'sinkName' : sink_path } )
except HttpError as e :
if e . resp . status != 404 :
raise
|
def _parse_kexgss_init ( self , m ) :
"""Parse the SSH2 _ MSG _ KEXGSS _ INIT message ( server mode ) .
: param ` . Message ` m : The content of the SSH2 _ MSG _ KEXGSS _ INIT message"""
|
# server mode
client_token = m . get_string ( )
self . e = m . get_mpint ( )
if ( self . e < 1 ) or ( self . e > self . P - 1 ) :
raise SSHException ( 'Client kex "e" is out of range' )
K = pow ( self . e , self . x , self . P )
self . transport . host_key = NullHostKey ( )
key = self . transport . host_key . __str__ ( )
# okay , build up the hash H of
# ( V _ C | | V _ S | | I _ C | | I _ S | | K _ S | | e | | f | | K )
hm = Message ( )
hm . add ( self . transport . remote_version , self . transport . local_version , self . transport . remote_kex_init , self . transport . local_kex_init , )
hm . add_string ( key )
hm . add_mpint ( self . e )
hm . add_mpint ( self . f )
hm . add_mpint ( K )
H = sha1 ( hm . asbytes ( ) ) . digest ( )
self . transport . _set_K_H ( K , H )
srv_token = self . kexgss . ssh_accept_sec_context ( self . gss_host , client_token )
m = Message ( )
if self . kexgss . _gss_srv_ctxt_status :
mic_token = self . kexgss . ssh_get_mic ( self . transport . session_id , gss_kex = True )
m . add_byte ( c_MSG_KEXGSS_COMPLETE )
m . add_mpint ( self . f )
m . add_string ( mic_token )
if srv_token is not None :
m . add_boolean ( True )
m . add_string ( srv_token )
else :
m . add_boolean ( False )
self . transport . _send_message ( m )
self . transport . gss_kex_used = True
self . transport . _activate_outbound ( )
else :
m . add_byte ( c_MSG_KEXGSS_CONTINUE )
m . add_string ( srv_token )
self . transport . _send_message ( m )
self . transport . _expect_packet ( MSG_KEXGSS_CONTINUE , MSG_KEXGSS_COMPLETE , MSG_KEXGSS_ERROR )
|
def load_resources ( bucket , prefix , region , account_config , accounts , assume , start , end , resources , store , db , verbose , debug ) :
"""load resources into resource database ."""
|
logging . basicConfig ( level = ( verbose and logging . DEBUG or logging . INFO ) )
logging . getLogger ( 'botocore' ) . setLevel ( logging . WARNING )
logging . getLogger ( 's3transfer' ) . setLevel ( logging . WARNING )
start = date_parse ( start )
end = date_parse ( end )
if not resources :
resources = [ 'NetworkInterface' , 'Instance' , 'LoadBalancer' ]
account_map = { }
data = yaml . safe_load ( account_config . read ( ) )
for a in data . get ( 'accounts' , ( ) ) :
if accounts and ( a [ 'name' ] in accounts or a [ 'account_id' ] in accounts ) :
account_map [ a [ 'account_id' ] ] = a
elif not accounts :
account_map [ a [ 'account_id' ] ] = a
account_ids = list ( account_map )
executor = ProcessPoolExecutor
if debug :
from c7n . executor import MainThreadExecutor
MainThreadExecutor . c7n_async = False
executor = MainThreadExecutor
stats = Counter ( )
t = time . time ( )
with executor ( max_workers = multiprocessing . cpu_count ( ) ) as w :
futures = { }
for a in account_ids :
for r in resources :
futures [ w . submit ( process_account_resources , a , bucket , prefix , region , store , start , end , r ) ] = ( a , r )
indexer = RESOURCE_FILE_INDEXERS [ r ]
for f in as_completed ( futures ) :
a , r = futures [ f ]
if f . exception ( ) :
log . error ( "account:%s error:%s" , a , f . exception ( ) )
continue
files , dl_stats = f . result ( )
idx_stats = indexer ( db , resource_config_iter ( files ) )
log . info ( "loaded account:%s files:%d bytes:%s events:%d resources:%d idx-time:%d dl-time:%d" , account_map [ a ] [ 'name' ] , len ( files ) , human_size ( dl_stats [ 'DownloadSize' ] + dl_stats [ 'CacheSize' ] ) , idx_stats [ 'Records' ] , idx_stats [ 'RowCount' ] , idx_stats [ 'IndexTime' ] , dl_stats [ 'FetchTime' ] )
stats . update ( dl_stats )
stats . update ( idx_stats )
log . info ( "Loaded %d resources across %d accounts in %0.2f" , stats [ 'RowCount' ] , len ( account_ids ) , time . time ( ) - t )
|
def discard ( self , val ) :
"""Remove the first occurrence of * val * .
If * val * is not a member , does nothing ."""
|
_maxes = self . _maxes
if not _maxes :
return
key = self . _key ( val )
pos = bisect_left ( _maxes , key )
if pos == len ( _maxes ) :
return
_keys = self . _keys
_lists = self . _lists
idx = bisect_left ( _keys [ pos ] , key )
len_keys = len ( _keys )
len_sublist = len ( _keys [ pos ] )
while True :
if _keys [ pos ] [ idx ] != key :
return
if _lists [ pos ] [ idx ] == val :
self . _delete ( pos , idx )
return
idx += 1
if idx == len_sublist :
pos += 1
if pos == len_keys :
return
len_sublist = len ( _keys [ pos ] )
idx = 0
|
def ParseSearchRow ( self , parser_mediator , query , row , ** unused_kwargs ) :
"""Parses a search row from the database .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
query ( str ) : query that created the row .
row ( sqlite3 . Row ) : row resulting from query ."""
|
query_hash = hash ( query )
event_data = TwitterAndroidSearchEventData ( )
event_data . query = query
event_data . name = self . _GetRowValue ( query_hash , row , 'name' )
event_data . search_query = self . _GetRowValue ( query_hash , row , 'query' )
timestamp = self . _GetRowValue ( query_hash , row , 'time' )
if timestamp :
date_time = dfdatetime_java_time . JavaTime ( timestamp = timestamp )
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_CREATION )
parser_mediator . ProduceEventWithEventData ( event , event_data )
|
def init_drivers ( enable_debug_driver = False ) :
"""Initialize all the drivers ."""
|
for driver in DRIVERS :
try :
if driver != DebugDriver or enable_debug_driver :
CLASSES . append ( driver )
except Exception : # pylint : disable = W0703
continue
|
def plot_chempot_range_map ( self , elements , referenced = True ) :
"""Plot the chemical potential range _ map . Currently works only for
3 - component PDs .
Args :
elements : Sequence of elements to be considered as independent
variables . E . g . , if you want to show the stability ranges of
all Li - Co - O phases wrt to uLi and uO , you will supply
[ Element ( " Li " ) , Element ( " O " ) ]
referenced : if True , gives the results with a reference being the
energy of the elemental phase . If False , gives absolute values ."""
|
self . get_chempot_range_map_plot ( elements , referenced = referenced ) . show ( )
|
def neural_networks ( self ) :
"""List of the neural networks in the ensemble .
Returns
list of ` Class1NeuralNetwork `"""
|
result = [ ]
for models in self . allele_to_allele_specific_models . values ( ) :
result . extend ( models )
result . extend ( self . class1_pan_allele_models )
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.