signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def insert ( self ) :
"""Insert this document"""
|
from mongoframes . queries import to_refs
# Send insert signal
signal ( 'insert' ) . send ( self . __class__ , frames = [ self ] )
# Prepare the document to be inserted
document = to_refs ( self . _document )
# Insert the document and update the Id
self . _id = self . get_collection ( ) . insert_one ( document ) . inserted_id
# Send inserted signal
signal ( 'inserted' ) . send ( self . __class__ , frames = [ self ] )
|
def _decode_v2 ( value ) :
"""Decode ' : ' and ' $ ' characters encoded by ` _ encode ` ."""
|
if re . search ( r'(?<!\$):' , value ) :
raise ValueError ( "Unescaped ':' in the encoded string" )
decode_colons = value . replace ( '$:' , ':' )
if re . search ( r'(?<!\$)(\$\$)*\$([^$]|\Z)' , decode_colons ) :
raise ValueError ( "Unescaped '$' in encoded string" )
return decode_colons . replace ( '$$' , '$' )
|
def save ( obj , path ) :
"""Pickle ( serialize ) object to input file path
Parameters
obj : any object
path : string
File path"""
|
with open ( path , 'wb' ) as f :
try :
pickle . dump ( obj , f , protocol = pickle . HIGHEST_PROTOCOL )
except Exception as e :
print ( 'Pickling failed for object {0}, path {1}' . format ( obj , path ) )
print ( 'Error message: {0}' . format ( e ) )
|
def isSequence ( arg ) :
"""Check if input is iterable ."""
|
if hasattr ( arg , "strip" ) :
return False
if hasattr ( arg , "__getslice__" ) :
return True
if hasattr ( arg , "__iter__" ) :
return True
return False
|
def to_paginated_list ( self , result , _ns , _operation , ** kwargs ) :
"""Convert a controller result to a paginated list .
The result format is assumed to meet the contract of this page class ' s ` parse _ result ` function ."""
|
items , context = self . parse_result ( result )
headers = dict ( )
paginated_list = PaginatedList ( items = items , _page = self , _ns = _ns , _operation = _operation , _context = context , )
return paginated_list , headers
|
def _set_show_mpls_ldp_statistics ( self , v , load = False ) :
"""Setter method for show _ mpls _ ldp _ statistics , mapped from YANG variable / brocade _ mpls _ rpc / show _ mpls _ ldp _ statistics ( rpc )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ show _ mpls _ ldp _ statistics is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ show _ mpls _ ldp _ statistics ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = show_mpls_ldp_statistics . show_mpls_ldp_statistics , is_leaf = True , yang_name = "show-mpls-ldp-statistics" , rest_name = "show-mpls-ldp-statistics" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = False , extensions = { u'tailf-common' : { u'hidden' : u'rpccmd' , u'actionpoint' : u'showMplsLdpStatistics' } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls' , defining_module = 'brocade-mpls' , yang_type = 'rpc' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """show_mpls_ldp_statistics must be of a type compatible with rpc""" , 'defined-type' : "rpc" , 'generated-type' : """YANGDynClass(base=show_mpls_ldp_statistics.show_mpls_ldp_statistics, is_leaf=True, yang_name="show-mpls-ldp-statistics", rest_name="show-mpls-ldp-statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'showMplsLdpStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""" , } )
self . __show_mpls_ldp_statistics = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def create_node ( self , network , participant ) :
"""Create a node for a participant ."""
|
return self . models . MCMCPAgent ( network = network , participant = participant )
|
def _get_banner ( ) :
"""Return a banner message for the interactive console ."""
|
result = ''
result += '\nPython %s' % _sys . version
result += '\n\nWbemcli interactive shell'
result += '\n%s' % _get_connection_info ( )
# Give hint about exiting . Most people exit with ' quit ( ) ' which will
# not return from the interact ( ) method , and thus will not write
# the history .
if _sys . platform == 'win32' :
result += '\nEnter Ctrl-Z or quit() or exit() to exit'
else :
result += '\nPress Ctrl-D or enter quit() or exit() to exit'
result += '\nEnter h() for help'
return result
|
def make_catalogs_backup ( catalogs , local_catalogs_dir = "" , include_metadata = True , include_data = True , include_metadata_xlsx = False , use_short_path = False ) :
"""Realiza una copia local de los datos y metadatos de un catálogo .
Args :
catalogs ( list or dict ) : Lista de catálogos ( elementos que pueden
ser interpretados por DataJson como catálogos ) o diccionario
donde las keys se interpretan como los catalog _ identifier :
" modernizacion " :
" http : / / infra . datos . gob . ar / catalog / modernizacion / data . json "
Cuando es una lista , los ids se toman de catalog _ identifer , y
se ignoran los catálogos que no tengan catalog _ identifier .
Cuando se pasa un diccionario , los keys reemplazan a los
catalog _ identifier ( estos no se leeen ) .
catalog _ id ( str ) : Si se especifica , se usa este identificador para el
backup . Si no se espedifica , se usa catalog [ " identifier " ] .
local _ catalogs _ dir ( str ) : Directorio local en el cual se va a crear
la carpeta " catalog / . . . " con todos los catálogos .
include _ metadata ( bool ) : Si es verdadero , se generan los archivos
data . json y catalog . xlsx .
include _ data ( bool ) : Si es verdadero , se descargan todas las
distribuciones de todos los catálogos .
Return :
None"""
|
if isinstance ( catalogs , list ) :
for catalog in catalogs :
try :
make_catalog_backup ( catalog , local_catalogs_dir = local_catalogs_dir , include_metadata = include_metadata , include_metadata_xlsx = include_metadata_xlsx , include_data = include_data , use_short_path = use_short_path )
except Exception :
logger . exception ( "ERROR en {}" . format ( catalog ) )
elif isinstance ( catalogs , dict ) :
for catalog_id , catalog in catalogs . iteritems ( ) :
try :
make_catalog_backup ( catalog , catalog_id , local_catalogs_dir = local_catalogs_dir , include_metadata = include_metadata , include_metadata_xlsx = include_metadata_xlsx , include_data = include_data , use_short_path = use_short_path )
except Exception :
logger . exception ( "ERROR en {} ({})" . format ( catalog , catalog_id ) )
|
def create_job ( batch_service_client , job_id , pool_id ) :
"""Creates a job with the specified ID , associated with the specified pool .
: param batch _ service _ client : A Batch service client .
: type batch _ service _ client : ` azure . batch . BatchServiceClient `
: param str job _ id : The ID for the job .
: param str pool _ id : The ID for the pool ."""
|
print ( 'Creating job [{}]...' . format ( job_id ) )
job = batch . models . JobAddParameter ( id = job_id , pool_info = batch . models . PoolInformation ( pool_id = pool_id ) )
try :
batch_service_client . job . add ( job )
except batchmodels . batch_error . BatchErrorException as err :
print_batch_exception ( err )
raise
|
def create_ikepolicy ( name , profile = None , ** kwargs ) :
'''Creates a new IKEPolicy
CLI Example :
. . code - block : : bash
salt ' * ' neutron . create _ ikepolicy ikepolicy - name
phase1 _ negotiation _ mode = main auth _ algorithm = sha1
encryption _ algorithm = aes - 128 pfs = group5
: param name : Name of the IKE policy
: param phase1 _ negotiation _ mode : IKE Phase1 negotiation mode in lowercase ,
default : main ( Optional )
: param auth _ algorithm : Authentication algorithm in lowercase ,
default : sha1 ( Optional )
: param encryption _ algorithm : Encryption algorithm in lowercase .
default : aes - 128 ( Optional )
: param pfs : Prefect Forward Security in lowercase ,
default : group5 ( Optional )
: param units : IKE lifetime attribute . default : seconds ( Optional )
: param value : IKE lifetime attribute . default : 3600 ( Optional )
: param ike _ version : IKE version in lowercase , default : v1 ( Optional )
: param profile : Profile to build on ( Optional )
: param kwargs :
: return : Created IKE policy information'''
|
conn = _auth ( profile )
return conn . create_ikepolicy ( name , ** kwargs )
|
def _list_distributions ( conn , name = None , region = None , key = None , keyid = None , profile = None , ) :
'''Private function that returns an iterator over all CloudFront distributions .
The caller is responsible for all boto - related error handling .
name
( Optional ) Only yield the distribution with the given name'''
|
for dl_ in conn . get_paginator ( 'list_distributions' ) . paginate ( ) :
distribution_list = dl_ [ 'DistributionList' ]
if 'Items' not in distribution_list : # If there are no items , AWS omits the ` Items ` key for some reason
continue
for partial_dist in distribution_list [ 'Items' ] :
tags = conn . list_tags_for_resource ( Resource = partial_dist [ 'ARN' ] )
tags = dict ( ( kv [ 'Key' ] , kv [ 'Value' ] ) for kv in tags [ 'Tags' ] [ 'Items' ] )
id_ = partial_dist [ 'Id' ]
if 'Name' not in tags :
log . warning ( 'CloudFront distribution %s has no Name tag.' , id_ )
continue
distribution_name = tags . pop ( 'Name' , None )
if name is not None and distribution_name != name :
continue
# NOTE : list _ distributions ( ) returns a DistributionList ,
# which nominally contains a list of Distribution objects .
# However , they are mangled in that they are missing values
# ( ` Logging ` , ` ActiveTrustedSigners ` , and ` ETag ` keys )
# and moreover flatten the normally nested DistributionConfig
# attributes to the top level .
# Hence , we must call get _ distribution ( ) to get the full object ,
# and we cache these objects to help lessen API calls .
distribution = _cache_id ( 'cloudfront' , sub_resource = distribution_name , region = region , key = key , keyid = keyid , profile = profile , )
if distribution :
yield ( distribution_name , distribution )
continue
dist_with_etag = conn . get_distribution ( Id = id_ )
distribution = { 'distribution' : dist_with_etag [ 'Distribution' ] , 'etag' : dist_with_etag [ 'ETag' ] , 'tags' : tags , }
_cache_id ( 'cloudfront' , sub_resource = distribution_name , resource_id = distribution , region = region , key = key , keyid = keyid , profile = profile , )
yield ( distribution_name , distribution )
|
def _get_perez_coefficients ( perezmodel ) :
'''Find coefficients for the Perez model
Parameters
perezmodel : string ( optional , default = ' allsitescomposite1990 ' )
a character string which selects the desired set of Perez
coefficients . If model is not provided as an input , the default ,
'1990 ' will be used .
All possible model selections are :
* ' 1990'
* ' allsitescomposite1990 ' ( same as ' 1990 ' )
* ' allsitescomposite1988'
* ' sandiacomposite1988'
* ' usacomposite1988'
* ' france1988'
* ' phoenix1988'
* ' elmonte1988'
* ' osage1988'
* ' albuquerque1988'
* ' capecanaveral1988'
* ' albany1988'
Returns
F1coeffs , F2coeffs : ( array , array )
F1 and F2 coefficients for the Perez model
References
[1 ] Loutzenhiser P . G . et . al . " Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation " 2007 , Solar Energy vol . 81 . pp . 254-267
[2 ] Perez , R . , Seals , R . , Ineichen , P . , Stewart , R . , Menicucci , D . ,
1987 . A new simplified version of the Perez diffuse irradiance model
for tilted surfaces . Solar Energy 39(3 ) , 221-232.
[3 ] Perez , R . , Ineichen , P . , Seals , R . , Michalsky , J . , Stewart , R . ,
1990 . Modeling daylight availability and irradiance components from
direct and global irradiance . Solar Energy 44 ( 5 ) , 271-289.
[4 ] Perez , R . et . al 1988 . " The Development and Verification of the
Perez Diffuse Radiation Model " . SAND88-7030'''
|
coeffdict = { 'allsitescomposite1990' : [ [ - 0.0080 , 0.5880 , - 0.0620 , - 0.0600 , 0.0720 , - 0.0220 ] , [ 0.1300 , 0.6830 , - 0.1510 , - 0.0190 , 0.0660 , - 0.0290 ] , [ 0.3300 , 0.4870 , - 0.2210 , 0.0550 , - 0.0640 , - 0.0260 ] , [ 0.5680 , 0.1870 , - 0.2950 , 0.1090 , - 0.1520 , - 0.0140 ] , [ 0.8730 , - 0.3920 , - 0.3620 , 0.2260 , - 0.4620 , 0.0010 ] , [ 1.1320 , - 1.2370 , - 0.4120 , 0.2880 , - 0.8230 , 0.0560 ] , [ 1.0600 , - 1.6000 , - 0.3590 , 0.2640 , - 1.1270 , 0.1310 ] , [ 0.6780 , - 0.3270 , - 0.2500 , 0.1560 , - 1.3770 , 0.2510 ] ] , 'allsitescomposite1988' : [ [ - 0.0180 , 0.7050 , - 0.071 , - 0.0580 , 0.1020 , - 0.0260 ] , [ 0.1910 , 0.6450 , - 0.1710 , 0.0120 , 0.0090 , - 0.0270 ] , [ 0.4400 , 0.3780 , - 0.2560 , 0.0870 , - 0.1040 , - 0.0250 ] , [ 0.7560 , - 0.1210 , - 0.3460 , 0.1790 , - 0.3210 , - 0.0080 ] , [ 0.9960 , - 0.6450 , - 0.4050 , 0.2600 , - 0.5900 , 0.0170 ] , [ 1.0980 , - 1.2900 , - 0.3930 , 0.2690 , - 0.8320 , 0.0750 ] , [ 0.9730 , - 1.1350 , - 0.3780 , 0.1240 , - 0.2580 , 0.1490 ] , [ 0.6890 , - 0.4120 , - 0.2730 , 0.1990 , - 1.6750 , 0.2370 ] ] , 'sandiacomposite1988' : [ [ - 0.1960 , 1.0840 , - 0.0060 , - 0.1140 , 0.1800 , - 0.0190 ] , [ 0.2360 , 0.5190 , - 0.1800 , - 0.0110 , 0.0200 , - 0.0380 ] , [ 0.4540 , 0.3210 , - 0.2550 , 0.0720 , - 0.0980 , - 0.0460 ] , [ 0.8660 , - 0.3810 , - 0.3750 , 0.2030 , - 0.4030 , - 0.0490 ] , [ 1.0260 , - 0.7110 , - 0.4260 , 0.2730 , - 0.6020 , - 0.0610 ] , [ 0.9780 , - 0.9860 , - 0.3500 , 0.2800 , - 0.9150 , - 0.0240 ] , [ 0.7480 , - 0.9130 , - 0.2360 , 0.1730 , - 1.0450 , 0.0650 ] , [ 0.3180 , - 0.7570 , 0.1030 , 0.0620 , - 1.6980 , 0.2360 ] ] , 'usacomposite1988' : [ [ - 0.0340 , 0.6710 , - 0.0590 , - 0.0590 , 0.0860 , - 0.0280 ] , [ 0.2550 , 0.4740 , - 0.1910 , 0.0180 , - 0.0140 , - 0.0330 ] , [ 0.4270 , 0.3490 , - 0.2450 , 0.0930 , - 0.1210 , - 0.0390 ] , [ 0.7560 , - 0.2130 , - 0.3280 , 0.1750 , - 0.3040 , - 0.0270 ] , [ 1.0200 , - 0.8570 , - 0.3850 , 0.2800 , - 0.6380 , - 0.0190 ] , [ 1.0500 , - 1.3440 , - 0.3480 , 0.2800 , - 0.8930 , 0.0370 ] , [ 0.9740 , - 1.5070 , - 0.3700 , 0.1540 , - 0.5680 , 0.1090 ] , [ 0.7440 , - 1.8170 , - 0.2560 , 0.2460 , - 2.6180 , 0.2300 ] ] , 'france1988' : [ [ 0.0130 , 0.7640 , - 0.1000 , - 0.0580 , 0.1270 , - 0.0230 ] , [ 0.0950 , 0.9200 , - 0.1520 , 0 , 0.0510 , - 0.0200 ] , [ 0.4640 , 0.4210 , - 0.2800 , 0.0640 , - 0.0510 , - 0.0020 ] , [ 0.7590 , - 0.0090 , - 0.3730 , 0.2010 , - 0.3820 , 0.0100 ] , [ 0.9760 , - 0.4000 , - 0.4360 , 0.2710 , - 0.6380 , 0.0510 ] , [ 1.1760 , - 1.2540 , - 0.4620 , 0.2950 , - 0.9750 , 0.1290 ] , [ 1.1060 , - 1.5630 , - 0.3980 , 0.3010 , - 1.4420 , 0.2120 ] , [ 0.9340 , - 1.5010 , - 0.2710 , 0.4200 , - 2.9170 , 0.2490 ] ] , 'phoenix1988' : [ [ - 0.0030 , 0.7280 , - 0.0970 , - 0.0750 , 0.1420 , - 0.0430 ] , [ 0.2790 , 0.3540 , - 0.1760 , 0.0300 , - 0.0550 , - 0.0540 ] , [ 0.4690 , 0.1680 , - 0.2460 , 0.0480 , - 0.0420 , - 0.0570 ] , [ 0.8560 , - 0.5190 , - 0.3400 , 0.1760 , - 0.3800 , - 0.0310 ] , [ 0.9410 , - 0.6250 , - 0.3910 , 0.1880 , - 0.3600 , - 0.0490 ] , [ 1.0560 , - 1.1340 , - 0.4100 , 0.2810 , - 0.7940 , - 0.0650 ] , [ 0.9010 , - 2.1390 , - 0.2690 , 0.1180 , - 0.6650 , 0.0460 ] , [ 0.1070 , 0.4810 , 0.1430 , - 0.1110 , - 0.1370 , 0.2340 ] ] , 'elmonte1988' : [ [ 0.0270 , 0.7010 , - 0.1190 , - 0.0580 , 0.1070 , - 0.0600 ] , [ 0.1810 , 0.6710 , - 0.1780 , - 0.0790 , 0.1940 , - 0.0350 ] , [ 0.4760 , 0.4070 , - 0.2880 , 0.0540 , - 0.0320 , - 0.0550 ] , [ 0.8750 , - 0.2180 , - 0.4030 , 0.1870 , - 0.3090 , - 0.0610 ] , [ 1.1660 , - 1.0140 , - 0.4540 , 0.2110 , - 0.4100 , - 0.0440 ] , [ 1.1430 , - 2.0640 , - 0.2910 , 0.0970 , - 0.3190 , 0.0530 ] , [ 1.0940 , - 2.6320 , - 0.2590 , 0.0290 , - 0.4220 , 0.1470 ] , [ 0.1550 , 1.7230 , 0.1630 , - 0.1310 , - 0.0190 , 0.2770 ] ] , 'osage1988' : [ [ - 0.3530 , 1.4740 , 0.0570 , - 0.1750 , 0.3120 , 0.0090 ] , [ 0.3630 , 0.2180 , - 0.2120 , 0.0190 , - 0.0340 , - 0.0590 ] , [ - 0.0310 , 1.2620 , - 0.0840 , - 0.0820 , 0.2310 , - 0.0170 ] , [ 0.6910 , 0.0390 , - 0.2950 , 0.0910 , - 0.1310 , - 0.0350 ] , [ 1.1820 , - 1.3500 , - 0.3210 , 0.4080 , - 0.9850 , - 0.0880 ] , [ 0.7640 , 0.0190 , - 0.2030 , 0.2170 , - 0.2940 , - 0.1030 ] , [ 0.2190 , 1.4120 , 0.2440 , 0.4710 , - 2.9880 , 0.0340 ] , [ 3.5780 , 22.2310 , - 10.7450 , 2.4260 , 4.8920 , - 5.6870 ] ] , 'albuquerque1988' : [ [ 0.0340 , 0.5010 , - 0.0940 , - 0.0630 , 0.1060 , - 0.0440 ] , [ 0.2290 , 0.4670 , - 0.1560 , - 0.0050 , - 0.0190 , - 0.0230 ] , [ 0.4860 , 0.2410 , - 0.2530 , 0.0530 , - 0.0640 , - 0.0220 ] , [ 0.8740 , - 0.3930 , - 0.3970 , 0.1810 , - 0.3270 , - 0.0370 ] , [ 1.1930 , - 1.2960 , - 0.5010 , 0.2810 , - 0.6560 , - 0.0450 ] , [ 1.0560 , - 1.7580 , - 0.3740 , 0.2260 , - 0.7590 , 0.0340 ] , [ 0.9010 , - 4.7830 , - 0.1090 , 0.0630 , - 0.9700 , 0.1960 ] , [ 0.8510 , - 7.0550 , - 0.0530 , 0.0600 , - 2.8330 , 0.3300 ] ] , 'capecanaveral1988' : [ [ 0.0750 , 0.5330 , - 0.1240 , - 0.0670 , 0.0420 , - 0.0200 ] , [ 0.2950 , 0.4970 , - 0.2180 , - 0.0080 , 0.0030 , - 0.0290 ] , [ 0.5140 , 0.0810 , - 0.2610 , 0.0750 , - 0.1600 , - 0.0290 ] , [ 0.7470 , - 0.3290 , - 0.3250 , 0.1810 , - 0.4160 , - 0.0300 ] , [ 0.9010 , - 0.8830 , - 0.2970 , 0.1780 , - 0.4890 , 0.0080 ] , [ 0.5910 , - 0.0440 , - 0.1160 , 0.2350 , - 0.9990 , 0.0980 ] , [ 0.5370 , - 2.4020 , 0.3200 , 0.1690 , - 1.9710 , 0.3100 ] , [ - 0.8050 , 4.5460 , 1.0720 , - 0.2580 , - 0.9500 , 0.7530 ] ] , 'albany1988' : [ [ 0.0120 , 0.5540 , - 0.0760 , - 0.0520 , 0.0840 , - 0.0290 ] , [ 0.2670 , 0.4370 , - 0.1940 , 0.0160 , 0.0220 , - 0.0360 ] , [ 0.4200 , 0.3360 , - 0.2370 , 0.0740 , - 0.0520 , - 0.0320 ] , [ 0.6380 , - 0.0010 , - 0.2810 , 0.1380 , - 0.1890 , - 0.0120 ] , [ 1.0190 , - 1.0270 , - 0.3420 , 0.2710 , - 0.6280 , 0.0140 ] , [ 1.1490 , - 1.9400 , - 0.3310 , 0.3220 , - 1.0970 , 0.0800 ] , [ 1.4340 , - 3.9940 , - 0.4920 , 0.4530 , - 2.3760 , 0.1170 ] , [ 1.0070 , - 2.2920 , - 0.4820 , 0.3900 , - 3.3680 , 0.2290 ] ] , }
array = np . array ( coeffdict [ perezmodel ] )
F1coeffs = array [ : , 0 : 3 ]
F2coeffs = array [ : , 3 : 7 ]
return F1coeffs , F2coeffs
|
def copy_heroku_to_local ( id ) :
"""Copy a Heroku database locally ."""
|
heroku_app = HerokuApp ( dallinger_uid = id )
try :
subprocess . call ( [ "dropdb" , heroku_app . name ] )
except Exception :
pass
heroku_app . pg_pull ( )
|
def assert_json_subset ( first , second ) :
"""Assert that a JSON object or array is a subset of another JSON object
or array .
The first JSON object or array must be supplied as a JSON - compatible
dict or list , the JSON object or array to check must be a string , an
UTF - 8 bytes object , or a JSON - compatible list or dict .
A JSON non - object , non - array value is the subset of another JSON value ,
if they are equal .
A JSON object is the subset of another JSON object if for each name / value
pair in the former there is a name / value pair in the latter with the same
name . Additionally the value of the former pair must be a subset of the
value of the latter pair .
A JSON array is the subset of another JSON array , if they have the same
number of elements and each element in the former is a subset of the
corresponding element in the latter .
> > > assert _ json _ subset ( { } , ' { } ' )
> > > assert _ json _ subset ( { } , ' { " foo " : " bar " } ' )
> > > assert _ json _ subset ( { " foo " : " bar " } , ' { } ' )
Traceback ( most recent call last ) :
AssertionError : element ' foo ' missing from element $
> > > assert _ json _ subset ( [ 1 , 2 ] , ' [ 1 , 2 ] ' )
> > > assert _ json _ subset ( [ 2 , 1 ] , ' [ 1 , 2 ] ' )
Traceback ( most recent call last ) :
AssertionError : element $ [ 0 ] differs : 2 ! = 1
> > > assert _ json _ subset ( [ { } ] , ' [ { " foo " : " bar " } ] ' )
> > > assert _ json _ subset ( { } , " INVALID JSON " )
Traceback ( most recent call last ) :
json . decoder . JSONDecodeError : Expecting value : line 1 column 1 ( char 0)"""
|
if not isinstance ( second , ( dict , list , str , bytes ) ) :
raise TypeError ( "second must be dict, list, str, or bytes" )
if isinstance ( second , bytes ) :
second = second . decode ( "utf-8" )
if isinstance ( second , _Str ) :
parsed_second = json_loads ( second )
else :
parsed_second = second
if not isinstance ( parsed_second , ( dict , list ) ) :
raise AssertionError ( "second must decode to dict or list, not {}" . format ( type ( parsed_second ) ) )
comparer = _JSONComparer ( _JSONPath ( "$" ) , first , parsed_second )
comparer . assert_ ( )
|
def get_imports ( fname ) :
"""get a list of imports from a Python program"""
|
txt = ''
with open ( fname , 'r' ) as f :
for line in f :
if line [ 0 : 6 ] == 'import' :
txt += '<PRE>' + strip_text_after_string ( line [ 7 : ] , ' as ' ) + '</PRE>\n'
return txt + '<BR>'
|
def _Rzderiv ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ Rzderiv
PURPOSE :
evaluate the mixed R , z derivative for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
d2phi / dR / dz
HISTORY :
2013-08-28 - Written - Bovy ( IAS )"""
|
Rz = R ** 2. + z ** 2.
sqrtRz = numpy . sqrt ( Rz )
return - R * z * ( - 4. * Rz - 3. * self . a * sqrtRz + 3. * ( self . a ** 2. + Rz + 2. * self . a * sqrtRz ) * numpy . log ( 1. + sqrtRz / self . a ) ) * Rz ** - 2.5 * ( self . a + sqrtRz ) ** - 2.
|
def eliminate_repeats ( text ) :
'''Returns a list of words that occur in the text . Eliminates stopwords .'''
|
bannedwords = read_file ( 'stopwords.txt' )
alphabet = 'abcdefghijklmnopqrstuvwxyz'
words = text . split ( )
standardwords = [ ]
for word in words :
newstr = ''
for char in word :
if char in alphabet or char in alphabet . upper ( ) :
newstr += char
if newstr not in standardwords and newstr != '' and newstr not in bannedwords :
standardwords . append ( newstr )
return map ( lambda x : x . lower ( ) , standardwords )
|
def trainClassifier ( self ) :
"""Self - consistently train the classifier"""
|
self . loadPopulationMetadata ( )
self . loadSimResults ( )
cut_geometry , flags_geometry = self . applyGeometry ( self . data_population [ 'RA' ] , self . data_population [ 'DEC' ] )
cut_detect_sim_results_sig = ( np . logical_or ( np . logical_and ( self . data_sim [ 'SIG' ] >= self . config [ self . algorithm ] [ 'sig_threshold' ] , self . data_sim [ 'DIFFICULTY' ] == 0 ) , np . logical_or ( self . data_sim [ 'DIFFICULTY' ] == 1 , self . data_sim [ 'DIFFICULTY' ] == 4 ) ) )
cut_detect_sim_results_ts = ( np . logical_or ( np . logical_and ( self . data_sim [ 'TS' ] >= self . config [ self . algorithm ] [ 'ts_threshold' ] , self . data_sim [ 'DIFFICULTY' ] == 0 ) , np . logical_or ( self . data_sim [ 'DIFFICULTY' ] == 1 , self . data_sim [ 'DIFFICULTY' ] == 4 ) ) )
mc_source_id_detect = self . data_sim [ 'MC_SOURCE_ID' ] [ cut_detect_sim_results_sig & cut_detect_sim_results_ts ]
cut_detect = np . in1d ( self . data_population [ 'MC_SOURCE_ID' ] , mc_source_id_detect )
features = [ ]
for key , operation in self . config [ 'operation' ] [ 'params_intrinsic' ] :
assert operation . lower ( ) in [ 'linear' , 'log' ] , 'ERROR'
if operation . lower ( ) == 'linear' :
features . append ( self . data_population [ key ] )
else :
features . append ( np . log10 ( self . data_population [ key ] ) )
X = np . vstack ( features ) . T
X = X [ cut_geometry ]
Y = cut_detect [ cut_geometry ]
# Create training and test sets
indices = np . arange ( len ( X ) )
X_train , X_test , Y_train , Y_test , cut_train , cut_test = train_test_split ( X , Y , indices , test_size = 0.2 )
# Train random forest classifier
if True :
print 'Training the machine learning classifier. This may take a while ...'
t_start = time . time ( )
parameters = { 'n_estimators' : ( 500 , 1000 ) }
# , ' criterion ' : [ " gini " , " entropy " ] , " min _ samples _ leaf " : [ 1,2,4 ] }
rf = RandomForestClassifier ( oob_score = True )
rf_tuned = GridSearchCV ( rf , parameters , cv = 10 , verbose = 1 )
self . classifier = rf_tuned . fit ( X_train , Y_train )
# self . classifier = sklearn . gaussian _ process . GaussianProcessClassifier ( 1.0 * sklearn . gaussian _ process . kernels . RBF ( 0.5 ) )
# self . classifier = sklearn . neighbors . KNeighborsClassifier ( 3 , weights = ' uniform ' )
# self . classifier = sklearn . neighbors . KNeighborsClassifier ( 2 , weights = ' distance ' )
# self . classifier = sklearn . svm . SVC ( gamma = 2 , C = 1)
# Print the best score and estimator :
print ( 'Best Score:' , self . classifier . best_score_ )
print ( self . classifier . best_estimator_ )
print ( self . classifier . best_params_ )
t_end = time . time ( )
print ' ... training took %.2f seconds' % ( t_end - t_start )
# Save the trained classifier
classifier_data = pickle . dumps ( self . classifier )
writer = open ( self . config [ self . algorithm ] [ 'classifier' ] , 'w' )
writer . write ( classifier_data )
writer . close ( )
print 'Saving machine learning classifier to %s ...' % ( self . config [ self . algorithm ] [ 'classifier' ] )
else :
self . loadClassifier ( )
y_pred = self . classifier . predict_proba ( X_test ) [ : , 1 ]
# Confusion matrix
y_pred_label = self . classifier . predict ( X_test )
cm = confusion_matrix ( Y_test , y_pred_label )
nondet_frac = cm [ 0 ] [ 0 ] / ( 1.0 * cm [ 0 ] [ 0 ] + 1.0 * cm [ 0 ] [ 1 ] )
det_frac = cm [ 1 ] [ 1 ] / ( 1.0 * cm [ 1 ] [ 0 ] + 1.0 * cm [ 1 ] [ 1 ] )
print ( 'Fraction of non-detections test set labeled correctly: %0.2f' % nondet_frac )
print ( 'Fraction of detections in test set labeled correctly: %0.2f' % det_frac )
plt . figure ( figsize = ( 8 , 6 ) )
plt . matshow ( cm )
plt . title ( 'Confusion Matrix' , fontsize = 18 , position = ( 0.5 , 1.1 ) )
plt . colorbar ( )
plt . ylabel ( 'True label' , fontsize = 16 )
plt . xlabel ( 'Predicted label' , fontsize = 16 , position = ( 0.5 , - 10.5 ) )
plt . tick_params ( labelsize = 12 )
plt . show ( )
# Compute ROC curve and area under curve ( AUC ) for each class :
BestRFselector = self . classifier . best_estimator_
y_pred_best = BestRFselector . predict_proba ( X_test )
labels = BestRFselector . classes_
fpr = dict ( )
tpr = dict ( )
roc_auc = dict ( )
for i , label in enumerate ( labels ) :
fpr [ label ] , tpr [ label ] , _ = roc_curve ( Y_test , y_pred_best [ : , i ] , pos_label = label )
roc_auc [ label ] = auc ( fpr [ label ] , tpr [ label ] )
plt . figure ( figsize = ( 8 , 6 ) )
plt . plot ( [ 0 , 1 ] , [ 1 , 1 ] , color = 'red' , linestyle = '-' , linewidth = 3 , label = 'Perfect Classifier (AUC = %0.2f)' % ( 1.0 ) )
plt . plot ( fpr [ 1 ] , tpr [ 1 ] , lw = 3 , label = 'Random Forest (AUC = %0.2f)' % ( roc_auc [ 1 ] ) , color = 'blue' )
plt . plot ( [ 0 , 1 ] , [ 0 , 1 ] , color = 'black' , linestyle = ':' , linewidth = 2.5 , label = 'Random Classifier (AUC = %0.2f)' % ( 0.5 ) )
plt . xlim ( [ 0.0 , 1.0 ] )
plt . ylim ( [ 0.0 , 1.025 ] )
plt . tick_params ( labelsize = 16 )
plt . xlabel ( 'False Positive Rate' , fontsize = 20 , labelpad = 8 )
plt . ylabel ( 'True Positive Rate' , fontsize = 20 , labelpad = 8 )
plt . legend ( loc = "lower right" , fontsize = 16 )
plt . show ( )
self . validateClassifier ( cut_detect , cut_train , cut_geometry , y_pred )
|
def parseTlvProperties ( response ) :
"""return the GET _ TLV _ PROPERTIES structure
@ param response : result of L { FEATURE _ GET _ TLV _ PROPERTIES }
@ rtype : dict
@ return : a dict"""
|
d = { 'raw' : response , }
# create a new list to consume it
tmp = list ( response )
while tmp :
tag = tmp [ 0 ]
len = tmp [ 1 ]
data = tmp [ 2 : 2 + len ]
if PCSCv2_PART10_PROPERTY_sFirmwareID == tag : # convert to a string
data = "" . join ( [ chr ( c ) for c in data ] )
# we now suppose the value is an integer
elif 1 == len : # byte
data = data [ 0 ]
elif 2 == len : # 16 bits value
data = data [ 1 ] * 256 + data [ 0 ]
elif 4 == len : # 32 bits value
data = ( ( data [ 3 ] * 256 + data [ 2 ] ) * 256 + data [ 1 ] ) * 256 + data [ 0 ]
# store the value in the dictionnary
try :
d [ Properties [ tag ] ] = data
except KeyError :
d [ "UNKNOWN" ] = data
del tmp [ 0 : 2 + len ]
return d
|
def node_conv ( obj ) :
"""This is the " string conversion " routine that we have our substitutions
use to return Nodes , not strings . This relies on the fact that an
EntryProxy object has a get ( ) method that returns the underlying
Node that it wraps , which is a bit of architectural dependence
that we might need to break or modify in the future in response to
additional requirements ."""
|
try :
get = obj . get
except AttributeError :
if isinstance ( obj , SCons . Node . Node ) or SCons . Util . is_Sequence ( obj ) :
result = obj
else :
result = str ( obj )
else :
result = get ( )
return result
|
def get_levels_and_coordinates_names ( self ) :
"""Get the current level of the high level mean plot and the name of
the corrisponding site , study , etc . As well as the code for the
current coordinate system .
Returns
( high _ level _ type , high _ level _ name , coordinate _ system ) : tuple object
containing current high level type , name , and coordinate system
being analyzed"""
|
if self . COORDINATE_SYSTEM == "geographic" :
dirtype = 'DA-DIR-GEO'
elif self . COORDINATE_SYSTEM == "tilt-corrected" :
dirtype = 'DA-DIR-TILT'
else :
dirtype = 'DA-DIR'
if self . level_box . GetValue ( ) == 'sample' :
high_level_type = 'samples'
if self . level_box . GetValue ( ) == 'site' :
high_level_type = 'sites'
if self . level_box . GetValue ( ) == 'location' :
high_level_type = 'locations'
if self . level_box . GetValue ( ) == 'study' :
high_level_type = 'study'
high_level_name = str ( self . level_names . GetValue ( ) )
return ( high_level_type , high_level_name , dirtype )
|
def urls ( self ) :
"Returns a list of ( value , URL ) tuples ."
|
# First , check the urls ( ) method for each plugin .
plugin_urls = [ ]
for plugin_name , plugin in self . model . model_databrowse ( ) . plugins . items ( ) :
urls = plugin . urls ( plugin_name , self )
if urls is not None : # plugin _ urls . append ( urls )
values = self . values ( )
return zip ( self . values ( ) , urls )
if self . field . rel :
m = EasyModel ( self . model . site , self . field . rel . to )
if self . field . rel . to in self . model . model_list :
lst = [ ]
for value in self . values ( ) :
if value is None :
continue
url = mark_safe ( '%s%s/%s/objects/%s/' % ( self . model . site . root_url , m . model . _meta . app_label , m . model . _meta . model_name , iri_to_uri ( value . _get_pk_val ( ) ) ) )
lst . append ( ( smart_text ( value ) , url ) )
else :
lst = [ ( value , None ) for value in self . values ( ) ]
elif self . field . choices :
lst = [ ]
for value in self . values ( ) :
url = mark_safe ( '%s%s/%s/fields/%s/%s/' % ( self . model . site . root_url , self . model . model . _meta . app_label , self . model . model . _meta . model_name , self . field . name , iri_to_uri ( self . raw_value ) ) )
lst . append ( ( value , url ) )
elif isinstance ( self . field , models . URLField ) :
val = self . values ( ) [ 0 ]
lst = [ ( val , iri_to_uri ( val ) ) ]
else :
lst = [ ( self . values ( ) [ 0 ] , None ) ]
return lst
|
def fromJSON ( value ) :
"""loads the GP object from a JSON string"""
|
j = json . loads ( value )
v = GPLinearUnit ( )
if "defaultValue" in j :
v . value = j [ 'defaultValue' ]
else :
v . value = j [ 'value' ]
if 'paramName' in j :
v . paramName = j [ 'paramName' ]
elif 'name' in j :
v . paramName = j [ 'name' ]
return v
|
def get_config_values ( config_path , section , default = 'default' ) :
"""Parse ini config file and return a dict of values .
The provided section overrides any values in default section ."""
|
values = { }
if not os . path . isfile ( config_path ) :
raise IpaUtilsException ( 'Config file not found: %s' % config_path )
config = configparser . ConfigParser ( )
try :
config . read ( config_path )
except Exception :
raise IpaUtilsException ( 'Config file format invalid.' )
try :
values . update ( config . items ( default ) )
except Exception :
pass
try :
values . update ( config . items ( section ) )
except Exception :
pass
return values
|
def _counts_to_coverages ( sample_data , counts_in_1x ) :
"""If the user specified read length and genome size in the config ,
convert the raw counts / bases into the depth of coverage ."""
|
if not counts_in_1x :
return { None : None }
return OrderedDict ( ( _count_to_coverage ( x , counts_in_1x ) , _count_to_coverage ( y , counts_in_1x ) ) for x , y in sample_data . items ( ) )
|
def _verifyHostKey ( self , hostKey , fingerprint ) :
"""Called when ssh transport requests us to verify a given host key .
Return a deferred that callback if we accept the key or errback if we
decide to reject it ."""
|
if fingerprint in self . knownHosts :
return defer . succeed ( True )
return defer . fail ( UnknownHostKey ( hostKey , fingerprint ) )
|
def transaction_location_id ( self , transaction_location_id ) :
"""Sets the transaction _ location _ id of this AdditionalRecipientReceivableRefund .
The ID of the location that created the receivable . This is the location ID on the associated transaction .
: param transaction _ location _ id : The transaction _ location _ id of this AdditionalRecipientReceivableRefund .
: type : str"""
|
if transaction_location_id is None :
raise ValueError ( "Invalid value for `transaction_location_id`, must not be `None`" )
if len ( transaction_location_id ) < 1 :
raise ValueError ( "Invalid value for `transaction_location_id`, length must be greater than or equal to `1`" )
self . _transaction_location_id = transaction_location_id
|
def vector ( x , y = None , z = 0.0 ) :
"""Return a 3D numpy array representing a vector ( of type ` numpy . float64 ` ) .
If ` y ` is ` ` None ` ` , assume input is already in the form ` [ x , y , z ] ` ."""
|
if y is None : # assume x is already [ x , y , z ]
return np . array ( x , dtype = np . float64 )
return np . array ( [ x , y , z ] , dtype = np . float64 )
|
def find_request ( ) :
'''Inspect running environment for request object . There should be one ,
but don ' t rely on it .'''
|
frame = inspect . currentframe ( )
request = None
f = frame
while not request and f :
if 'request' in f . f_locals and isinstance ( f . f_locals [ 'request' ] , HttpRequest ) :
request = f . f_locals [ 'request' ]
f = f . f_back
del frame
return request
|
def get_auth_token_login_url ( self , auth_token_ticket , authenticator , private_key , service_url , username , ) :
'''Build an auth token login URL .
See https : / / github . com / rbCAS / CASino / wiki / Auth - Token - Login for details .'''
|
auth_token , auth_token_signature = self . _build_auth_token_data ( auth_token_ticket , authenticator , private_key , username = username , )
logging . debug ( '[CAS] AuthToken: {}' . format ( auth_token ) )
url = self . _get_auth_token_login_url ( auth_token = auth_token , auth_token_signature = auth_token_signature , service_url = service_url , )
logging . debug ( '[CAS] AuthToken Login URL: {}' . format ( url ) )
return url
|
def write_main_jobwrappers ( self ) :
'''Writes out ' jobs ' as wrapped toil objects in preparation for calling .
: return : A string representing this .'''
|
main_section = ''
# toil cannot technically start with multiple jobs , so an empty
# ' initialize _ jobs ' function is always called first to get around this
main_section = main_section + '\n job0 = Job.wrapJobFn(initialize_jobs)\n'
# declare each job in main as a wrapped toil function in order of priority
for wf in self . workflows_dictionary :
for assignment in self . workflows_dictionary [ wf ] :
if assignment . startswith ( 'call' ) :
main_section += ' job0 = job0.encapsulate()\n'
main_section += self . write_main_jobwrappers_call ( self . workflows_dictionary [ wf ] [ assignment ] )
if assignment . startswith ( 'scatter' ) :
main_section += ' job0 = job0.encapsulate()\n'
main_section += self . write_main_jobwrappers_scatter ( self . workflows_dictionary [ wf ] [ assignment ] , assignment )
if assignment . startswith ( 'if' ) :
main_section += ' if {}:\n' . format ( self . workflows_dictionary [ wf ] [ assignment ] [ 'expression' ] )
main_section += self . write_main_jobwrappers_if ( self . workflows_dictionary [ wf ] [ assignment ] [ 'body' ] )
main_section += '\n fileStore.start(job0)\n'
return main_section
|
def _convert_to ( maybe_device , convert_to ) :
'''Convert a device name , UUID or LABEL to a device name , UUID or
LABEL .
Return the fs _ spec required for fstab .'''
|
# Fast path . If we already have the information required , we can
# save one blkid call
if not convert_to or ( convert_to == 'device' and maybe_device . startswith ( '/' ) ) or maybe_device . startswith ( '{}=' . format ( convert_to . upper ( ) ) ) :
return maybe_device
# Get the device information
if maybe_device . startswith ( '/' ) :
blkid = __salt__ [ 'disk.blkid' ] ( maybe_device )
else :
blkid = __salt__ [ 'disk.blkid' ] ( token = maybe_device )
result = None
if len ( blkid ) == 1 :
if convert_to == 'device' :
result = list ( blkid . keys ( ) ) [ 0 ]
else :
key = convert_to . upper ( )
result = '{}={}' . format ( key , list ( blkid . values ( ) ) [ 0 ] [ key ] )
return result
|
def extract_js_links ( bs4 ) :
"""Extracting js links from BeautifulSoup object
: param bs4 : ` BeautifulSoup `
: return : ` list ` List of links"""
|
links = extract_links ( bs4 )
real_js = [ anchor for anchor in links if anchor . endswith ( ( '.js' , '.JS' ) ) ]
js_tags = [ anchor [ 'src' ] for anchor in bs4 . select ( 'script[type="text/javascript"]' ) if anchor . has_attr ( 'src' ) ]
return list ( set ( real_js + js_tags ) )
|
def add ( self , key , item ) :
"""Add a new key / item pair to the dictionary . Resets an existing
key value only if this is an exact match to a known key ."""
|
mmkeys = self . mmkeys
if mmkeys is not None and not ( key in self . data ) : # add abbreviations as short as minkeylength
# always add at least one entry ( even for key = " " )
lenkey = len ( key )
start = min ( self . minkeylength , lenkey )
# cache references to speed up loop a bit
mmkeysGet = mmkeys . setdefault
for i in range ( start , lenkey + 1 ) :
mmkeysGet ( key [ 0 : i ] , [ ] ) . append ( key )
self . data [ key ] = item
|
def loadgrants ( source = None , setspec = None , all_grants = False ) :
"""Harvest grants from OpenAIRE .
: param source : Load the grants from a local sqlite db ( offline ) .
The value of the parameter should be a path to the local file .
: type source : str
: param setspec : Harvest specific set through OAI - PMH
Creates a remote connection to OpenAIRE .
: type setspec : str
: param all _ grants : Harvest all sets through OAI - PMH ,
as specified in the configuration OPENAIRE _ GRANTS _ SPEC . Sets are
harvested sequentially in the order specified in the configuration .
Creates a remote connection to OpenAIRE .
: type all _ grants : bool"""
|
assert all_grants or setspec or source , "Either '--all', '--setspec' or '--source' is required parameter."
if all_grants :
harvest_all_openaire_projects . delay ( )
elif setspec :
click . echo ( "Remote grants loading sent to queue." )
harvest_openaire_projects . delay ( setspec = setspec )
else : # if source
loader = LocalOAIRELoader ( source = source )
loader . _connect ( )
cnt = loader . _count ( )
click . echo ( "Sending grants to queue." )
with click . progressbar ( loader . iter_grants ( ) , length = cnt ) as grants_bar :
for grant_json in grants_bar :
register_grant . delay ( grant_json )
|
def create_cache ( directory , compress_level = 6 , value_type_is_binary = False , ** kwargs ) :
"""Create a html cache . Html string will be automatically compressed .
: param directory : path for the cache directory .
: param compress _ level : 0 ~ 9 , 9 is slowest and smallest .
: param kwargs : other arguments .
: return : a ` diskcache . Cache ( ) `"""
|
cache = diskcache . Cache ( directory , disk = CompressedDisk , disk_compress_level = compress_level , disk_value_type_is_binary = value_type_is_binary , ** kwargs )
return cache
|
def _GetStat ( self ) :
"""Retrieves a stat object .
Returns :
VFSStat : a stat object .
Raises :
BackEndError : when the encoded stream is missing ."""
|
stat_object = vfs_stat . VFSStat ( )
# File data stat information .
stat_object . size = self . path_spec . range_size
# File entry type stat information .
stat_object . type = stat_object . TYPE_FILE
return stat_object
|
def run ( self , * args , ** kwargs ) :
"""Run this program with Parameters , Redirects , and Pipes . If shell = True , this command is
executed as a string directly on the shell ; otherwise , it ' s executed using Popen processes
and appropriate streams .
: param args : 0 or more of Parameter , Redirect , and Pipe
: param kwargs : shell = Bool
: return : None"""
|
# Get default for kwarg shell
shell = kwargs . get ( 'shell' , False )
# Output log info for this command
run_cmd = self . __generate_cmd ( * args , shell = True )
log_header = 'Running {}\n{}\n' . format ( self . software_name , run_cmd )
if _Settings . logger . _is_active ( ) :
_Settings . logger . _write ( log_header )
else :
sys . stdout . write ( log_header )
# If shell is True , execute this command directly as a string
if shell :
subprocess . call ( run_cmd , shell = True , executable = os . environ [ 'SHELL' ] )
# TODO Check to see if this works on Windows
else : # Get the command blueprint for this call
cmd_blueprint = self . __generate_cmd ( * args , shell = False )
output_stream_filehandles = [ ]
blueprint_processes = [ ]
# For each command in the blueprint , set up streams and Popen object to execute
for i , cmd in enumerate ( cmd_blueprint ) :
stdin_stream = None if i == 0 else blueprint_processes [ i - 1 ] . stdout
stdout_filehandle = None
stderr_filehandle = None
# If this command isn ' t the last in the list , that means the output
# is being piped into the next command
if i + 1 < len ( cmd_blueprint ) :
stdout_filehandle = subprocess . PIPE
# If this is the last command in the list , stdout may be redirected to a file . . .
elif cmd [ 'stdout' ] :
redir = cmd [ 'stdout' ]
stdout_filehandle = open ( redir . dest , redir . mode )
output_stream_filehandles . append ( stdout_filehandle )
# . . . or it may be set out to the main log file
elif _Settings . logger . log_stdout and _Settings . logger . destination :
stdout_filehandle = subprocess . PIPE
# stderr can be redirected regardless of piping
if cmd [ 'stderr' ] :
redir = cmd [ 'stderr' ]
stderr_filehandle = open ( redir . dest , redir . mode )
output_stream_filehandles . append ( stderr_filehandle )
# Or it may be sent out to a log file
elif ( _Settings . logger . log_stderr and ( _Settings . logger . destination_stderr or _Settings . logger . destination ) ) :
stderr_filehandle = subprocess . PIPE
# Create this process as a Popen object , with appropriate streams
process = subprocess . Popen ( cmd [ 'cmd' ] , stdin = stdin_stream , stdout = stdout_filehandle , stderr = stderr_filehandle )
blueprint_processes . append ( process )
# If this is the last command in the list , wait for it to finish
if i + 1 == len ( cmd_blueprint ) :
process . wait ( )
# If logging is set , capture stdout ( or stderr ) to log file
# TODO I think the logic here can be expressed more concisely
if _Settings . logger . log_stdout and _Settings . logger . destination :
for line in process . stdout :
_Settings . logger . _write ( line )
if ( _Settings . logger . log_stderr and ( _Settings . logger . destination_stderr or _Settings . logger . destination ) ) :
for line in process . stderr :
_Settings . logger . _write ( line , bool ( _Settings . logger . destination_stderr ) )
# Close all the file handles created for redirects
map ( lambda f : f . close ( ) , output_stream_filehandles )
|
def fingerprint_from_raw_ssh_pub_key ( key ) :
"""Encode a raw SSH key ( string of bytes , as from
` str ( paramiko . AgentKey ) ` ) to a fingerprint in the typical
'54 : c7:4c : 93 : cf : ff : e3:32:68 : bc : 89:6e : 5e : 22 : b5:9c ' form ."""
|
fp_plain = hashlib . md5 ( key ) . hexdigest ( )
return ':' . join ( a + b for a , b in zip ( fp_plain [ : : 2 ] , fp_plain [ 1 : : 2 ] ) )
|
def valueAt ( self , point ) :
"""Returns the value within the chart for the given point .
: param point | < QPoint >
: return { < str > axis name : < variant > value , . . }"""
|
chart_point = self . uiChartVIEW . mapFromParent ( point )
scene_point = self . uiChartVIEW . mapToScene ( chart_point )
return self . renderer ( ) . valueAt ( self . axes ( ) , scene_point )
|
def _get_k ( self ) :
'''Accessing self . k indirectly allows for creating the kvstore table
if necessary .'''
|
if not self . ready :
self . k . create ( )
# create table if it does not exist .
self . ready = True
return self . k
|
def responseReceived ( self , response , tag ) :
"""Receives some characters of a netstring .
Whenever a complete response is received , this method calls the
deferred associated with it .
@ param response : A complete response generated by exiftool .
@ type response : C { bytes }
@ param tag : The tag associated with the response
@ type tag : C { int }"""
|
self . _queue . pop ( tag ) . callback ( response )
|
def _dispatch ( self , textgroup , directory ) :
"""Run the dispatcher over a textgroup .
: param textgroup : Textgroup object that needs to be dispatched
: param directory : Directory in which the textgroup was found"""
|
if textgroup . id in self . dispatcher . collection :
self . dispatcher . collection [ textgroup . id ] . update ( textgroup )
else :
self . dispatcher . dispatch ( textgroup , path = directory )
for work_urn , work in textgroup . works . items ( ) :
if work_urn in self . dispatcher . collection [ textgroup . id ] . works :
self . dispatcher . collection [ work_urn ] . update ( work )
|
def avgwaittime_get ( self , service_staff_id , start_date , end_date , session ) :
'''taobao . wangwang . eservice . avgwaittime . get 平均等待时长
根据客服ID和日期 , 获取该客服 " 当日接待的所有客户的平均等待时长 " 。 备注 :
- 1 、 如果是操作者ID = 被查者ID , 返回被查者ID的 " 当日接待的所有客户的平均等待时长 " 。
- 2 、 如果操作者是组管理员 , 他可以查询他的组中的所有子帐号的 " 当日接待的所有客户的平均等待时长 " 。
- 3 、 如果操作者是主账户 , 他可以查询所有子帐号的 " 当日接待的所有客户的平均等待时长 " 。
- 4 、 被查者ID可以是多个 , 用 " , " 隔开 , id数不能超过30。
- 5 、 开始时间与结束时间之间的间隔不能超过7天
- 6 、 不能查询90天以前的数据 7 、 不能查询当天的记录'''
|
request = TOPRequest ( 'taobao.wangwang.eservice.avgwaittime.get' )
request [ 'service_staff_id' ] = service_staff_id
request [ 'start_date' ] = start_date
request [ 'end_date' ] = end_date
self . create ( self . execute ( request , session ) )
return self . waiting_time_list_on_days
|
def get_input_channel ( entity ) :
"""Similar to : meth : ` get _ input _ peer ` , but for : tl : ` InputChannel ` ' s alone ."""
|
try :
if entity . SUBCLASS_OF_ID == 0x40f202fd : # crc32 ( b ' InputChannel ' )
return entity
except AttributeError :
_raise_cast_fail ( entity , 'InputChannel' )
if isinstance ( entity , ( types . Channel , types . ChannelForbidden ) ) :
return types . InputChannel ( entity . id , entity . access_hash or 0 )
if isinstance ( entity , types . InputPeerChannel ) :
return types . InputChannel ( entity . channel_id , entity . access_hash )
_raise_cast_fail ( entity , 'InputChannel' )
|
def _parse_extra ( self , fp ) :
"""Parse and store the config comments and create maps for dot notion lookup"""
|
comment = ''
section = ''
fp . seek ( 0 )
for line in fp :
line = line . rstrip ( )
if not line :
if comment :
comment += '\n'
continue
if line . startswith ( '#' ) : # Comment
comment += line + '\n'
continue
if line . startswith ( '[' ) : # Section
section = line . strip ( '[]' )
self . _add_dot_key ( section )
if comment :
self . _comments [ section ] = comment . rstrip ( )
elif CONFIG_KEY_RE . match ( line ) : # Config
key = line . split ( '=' , 1 ) [ 0 ] . strip ( )
self . _add_dot_key ( section , key )
if comment :
self . _comments [ ( section , key ) ] = comment . rstrip ( )
comment = ''
if comment :
self . _comments [ self . LAST_COMMENT_KEY ] = comment
|
def organizations_create_many ( self , data , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / organizations # create - many - organizations"
|
api_path = "/api/v2/organizations/create_many.json"
return self . call ( api_path , method = "POST" , data = data , ** kwargs )
|
def list_locations ( self , provider = None ) :
'''List all available locations in configured cloud systems'''
|
mapper = salt . cloud . Map ( self . _opts_defaults ( ) )
return salt . utils . data . simple_types_filter ( mapper . location_list ( provider ) )
|
def color_string ( color , string ) :
"""Colorizes a given string , if coloring is available ."""
|
if not color_available :
return string
return color + string + colorama . Fore . RESET
|
def stop ( self ) :
"""Tell the sender thread to finish and wait for it to stop sending
( should be at most " timeout " seconds ) ."""
|
if self . interval is not None :
self . _queue . put_nowait ( None )
self . _thread . join ( )
self . interval = None
|
def _flatten_subsection ( subsection , _type , offset , parent ) :
'''Flatten a subsection from its nested version
Args :
subsection : Nested subsection as produced by _ parse _ section , except one level in
_ type : type of section , ie : AXON , etc
parent : first element has this as it ' s parent
offset : position in the final array of the first element
Returns :
Generator of values corresponding to [ X , Y , Z , R , TYPE , ID , PARENT _ ID ]'''
|
for row in subsection : # TODO : Figure out what these correspond to in neurolucida
if row in ( 'Low' , 'Generated' , 'High' , ) :
continue
elif isinstance ( row [ 0 ] , StringType ) :
if len ( row ) in ( 4 , 5 , ) :
if len ( row ) == 5 :
assert row [ 4 ] [ 0 ] == 'S' , 'Only known usage of a fifth member is Sn, found: %s' % row [ 4 ] [ 0 ]
yield ( float ( row [ 0 ] ) , float ( row [ 1 ] ) , float ( row [ 2 ] ) , float ( row [ 3 ] ) / 2. , _type , offset , parent )
parent = offset
offset += 1
elif isinstance ( row [ 0 ] , list ) :
split_parent = offset - 1
start_offset = 0
slices = [ ]
start = 0
for i , value in enumerate ( row ) :
if value == '|' :
slices . append ( slice ( start + start_offset , i ) )
start = i + 1
slices . append ( slice ( start + start_offset , len ( row ) ) )
for split_slice in slices :
for _row in _flatten_subsection ( row [ split_slice ] , _type , offset , split_parent ) :
offset += 1
yield _row
|
def delete_script ( self , script_id ) :
"""Deletes a stored script .
script _ id : = id of stored script .
status = pi . delete _ script ( sid )"""
|
res = yield from self . _pigpio_aio_command ( _PI_CMD_PROCD , script_id , 0 )
return _u2i ( res )
|
def do_youtube_dl ( worker , site , page ) :
'''Runs youtube - dl configured for ` worker ` and ` site ` to download videos from
` page ` .
Args :
worker ( brozzler . BrozzlerWorker ) : the calling brozzler worker
site ( brozzler . Site ) : the site we are brozzling
page ( brozzler . Page ) : the page we are brozzling
Returns :
tuple with two entries :
` list ` of ` dict ` : with info about urls fetched :
' url ' : . . . ,
' method ' : . . . ,
' response _ code ' : . . . ,
' response _ headers ' : . . . ,
` list ` of ` str ` : outlink urls'''
|
with tempfile . TemporaryDirectory ( prefix = 'brzl-ydl-' ) as tempdir :
ydl = _build_youtube_dl ( worker , tempdir , site )
ie_result = _try_youtube_dl ( worker , ydl , site , page )
outlinks = set ( )
if ie_result and ie_result . get ( 'extractor' ) == 'youtube:playlist' : # youtube watch pages as outlinks
outlinks = { 'https://www.youtube.com/watch?v=%s' % e [ 'id' ] for e in ie_result . get ( 'entries_no_dl' , [ ] ) }
# any outlinks for other cases ?
return ydl . fetch_spy . fetches , outlinks
|
def add_surf ( self , surf , color = SKIN_COLOR , vertex_colors = None , values = None , limits_c = None , colormap = COLORMAP , alpha = 1 , colorbar = False ) :
"""Add surfaces to the visualization .
Parameters
surf : instance of wonambi . attr . anat . Surf
surface to be plotted
color : tuple or ndarray , optional
4 - element tuple , representing RGB and alpha , between 0 and 1
vertex _ colors : ndarray
ndarray with n vertices x 4 to specify color of each vertex
values : ndarray , optional
vector with values for each vertex
limits _ c : tuple of 2 floats , optional
min and max values to normalize the color
colormap : str
one of the colormaps in vispy
alpha : float
transparency ( 1 = opaque )
colorbar : bool
add a colorbar at the back of the surface"""
|
colors , limits = _prepare_colors ( color = color , values = values , limits_c = limits_c , colormap = colormap , alpha = alpha )
# meshdata uses numpy array , in the correct dimension
vertex_colors = colors . rgba
if vertex_colors . shape [ 0 ] == 1 :
vertex_colors = tile ( vertex_colors , ( surf . n_vert , 1 ) )
meshdata = MeshData ( vertices = surf . vert , faces = surf . tri , vertex_colors = vertex_colors )
mesh = SurfaceMesh ( meshdata )
self . _add_mesh ( mesh )
# adjust camera
surf_center = mean ( surf . vert , axis = 0 )
if surf_center [ 0 ] < 0 :
azimuth = 270
else :
azimuth = 90
self . _view . camera . azimuth = azimuth
self . _view . camera . center = surf_center
self . _surf . append ( mesh )
if colorbar :
self . _view . add ( _colorbar_for_surf ( colormap , limits ) )
|
def snake_case ( a_string ) :
"""Returns a snake cased version of a string .
: param a _ string : any : class : ` str ` object .
Usage :
> > > snake _ case ( ' FooBar ' )
" foo _ bar " """
|
partial = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , a_string )
return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , partial ) . lower ( )
|
def _handle_amqp_frame ( self , data_in ) :
"""Unmarshal a single AMQP frame and return the result .
: param data _ in : socket data
: return : data _ in , channel _ id , frame"""
|
if not data_in :
return data_in , None , None
try :
byte_count , channel_id , frame_in = pamqp_frame . unmarshal ( data_in )
return data_in [ byte_count : ] , channel_id , frame_in
except pamqp_exception . UnmarshalingException :
pass
except specification . AMQPFrameError as why :
LOGGER . error ( 'AMQPFrameError: %r' , why , exc_info = True )
except ValueError as why :
LOGGER . error ( why , exc_info = True )
self . exceptions . append ( AMQPConnectionError ( why ) )
return data_in , None , None
|
def fit ( self , t , y , dy = None ) :
"""Fit the multiterm Periodogram model to the data .
Parameters
t : array _ like , one - dimensional
sequence of observation times
y : array _ like , one - dimensional
sequence of observed values
dy : float or array _ like ( optional )
errors on observed values"""
|
# For linear models , dy = 1 is equivalent to no errors
if dy is None :
dy = 1
self . t , self . y , self . dy = np . broadcast_arrays ( t , y , dy )
self . _fit ( self . t , self . y , self . dy )
self . _best_period = None
# reset best period in case of refitting
if self . fit_period :
self . _best_period = self . _calc_best_period ( )
return self
|
def get_stats_monthly ( start = None , end = None , ** kwargs ) :
"""MOVED to iexfinance . iexdata . get _ stats _ summary"""
|
import warnings
warnings . warn ( WNG_MSG % ( "get_stats_monthly" , "iexdata.get_stats_summary" ) )
return MonthlySummaryReader ( start = start , end = end , ** kwargs ) . fetch ( )
|
def add_self_edges ( self , weight = None , copy = False ) :
'''Adds all i - > i edges . weight may be a scalar or 1d array .'''
|
ii = np . arange ( self . num_vertices ( ) )
return self . add_edges ( ii , ii , weight = weight , symmetric = False , copy = copy )
|
def managed ( name , peers = None , servers = None ) :
'''Manages the configuration of NTP peers and servers on the device , as specified in the state SLS file .
NTP entities not specified in these lists will be removed whilst entities not configured on the device will be set .
SLS Example :
. . code - block : : yaml
netntp _ example :
netntp . managed :
- peers :
- 192.168.0.1
- 172.17.17.1
- servers :
- 24.124.0.251
- 138.236.128.36
Output example :
. . code - block : : python
' edge01 . nrt04 ' : {
' netntp _ | - netntp _ example _ | - netntp _ example _ | - managed ' : {
' comment ' : ' NTP servers already configured as needed . ' ,
' name ' : ' netntp _ example ' ,
' start _ time ' : ' 12:45:24.056659 ' ,
' duration ' : 2938.857,
' changes ' : {
' peers ' : {
' removed ' : [
'192.168.0.2 ' ,
'192.168.0.3'
' added ' : [
'192.168.0.1 ' ,
'172.17.17.1'
' result ' : None'''
|
ret = _default_ret ( name )
result = ret . get ( 'result' , False )
comment = ret . get ( 'comment' , '' )
changes = ret . get ( 'changes' , { } )
if not ( isinstance ( peers , list ) or isinstance ( servers , list ) ) : # none of the is a list
return ret
# just exit
if isinstance ( peers , list ) and not _check ( peers ) : # check and clean peers
ret [ 'comment' ] = 'NTP peers must be a list of valid IP Addresses or Domain Names'
return ret
if isinstance ( servers , list ) and not _check ( servers ) : # check and clean servers
ret [ 'comment' ] = 'NTP servers must be a list of valid IP Addresses or Domain Names'
return ret
# - - - - - Retrieve existing NTP peers and determine peers to be added / removed - - - - - >
successfully_changed = True
expected_config_change = False
if isinstance ( peers , list ) :
_peers_ret = _check_diff_and_configure ( name , peers , name = 'peers' )
expected_config_change = _peers_ret . get ( 'expected_config_change' , False )
successfully_changed = _peers_ret . get ( 'successfully_changed' , True )
result = result and _peers_ret . get ( 'result' , False )
comment += ( '\n' + _peers_ret . get ( 'comment' , '' ) )
_changed_peers = _peers_ret . get ( 'changes' , { } )
if _changed_peers :
changes [ 'peers' ] = _changed_peers
if isinstance ( servers , list ) :
_servers_ret = _check_diff_and_configure ( name , servers , name = 'servers' )
expected_config_change = expected_config_change or _servers_ret . get ( 'expected_config_change' , False )
successfully_changed = successfully_changed and _servers_ret . get ( 'successfully_changed' , True )
result = result and _servers_ret . get ( 'result' , False )
comment += ( '\n' + _servers_ret . get ( 'comment' , '' ) )
_changed_servers = _servers_ret . get ( 'changes' , { } )
if _changed_servers :
changes [ 'servers' ] = _changed_servers
ret . update ( { 'changes' : changes } )
if not ( changes or expected_config_change ) :
ret . update ( { 'result' : True , 'comment' : 'Device configured properly.' } )
return ret
if __opts__ [ 'test' ] is True :
ret . update ( { 'result' : None , 'comment' : 'This is in testing mode, the device configuration was not changed!' } )
return ret
# < - - - - Call _ set _ ntp _ peers and _ delete _ ntp _ peers as needed - - - - -
# - - - - - Try to commit changes - - - - - >
if expected_config_change : # commit only in case there ' s something to update
config_result , config_comment = __salt__ [ 'net.config_control' ] ( )
result = config_result and successfully_changed
comment += config_comment
# < - - - - Try to commit changes - - - - -
ret . update ( { 'result' : result , 'comment' : comment } )
return ret
|
async def parseResults ( self , api_data ) :
"""See CoverSource . parseResults ."""
|
results = [ ]
# get xml results list
xml_text = api_data . decode ( "utf-8" )
xml_root = xml . etree . ElementTree . fromstring ( xml_text )
status = xml_root . get ( "status" )
if status != "ok" :
raise Exception ( "Unexpected Last.fm response status: %s" % ( status ) )
img_elements = xml_root . findall ( "album/image" )
# build results from xml
thumbnail_url = None
thumbnail_size = None
for img_element in img_elements :
img_url = img_element . text
if not img_url : # last . fm returns empty image tag for size it does not have
continue
lfm_size = img_element . get ( "size" )
if lfm_size == "mega" :
check_metadata = CoverImageMetadata . SIZE
else :
check_metadata = CoverImageMetadata . NONE
try :
size = __class__ . SIZES [ lfm_size ]
except KeyError :
continue
if ( size [ 0 ] <= MAX_THUMBNAIL_SIZE ) and ( ( thumbnail_size is None ) or ( size [ 0 ] < thumbnail_size ) ) :
thumbnail_url = img_url
thumbnail_size = size [ 0 ]
format = os . path . splitext ( img_url ) [ 1 ] [ 1 : ] . lower ( )
format = SUPPORTED_IMG_FORMATS [ format ]
results . append ( LastFmCoverSourceResult ( img_url , size , format , thumbnail_url = thumbnail_url , source = self , check_metadata = check_metadata ) )
return results
|
def module_settings ( self ) :
"""Get Module settings . Uses GET to / settings / modules interface .
: Returns : ( dict ) Module settings as shown ` here < https : / / cloud . knuverse . com / docs / api / # api - Module _ Settings - Get _ the _ module _ settings > ` _ ."""
|
response = self . _get ( url . settings_modules )
self . _check_response ( response , 200 )
return self . _create_response ( response )
|
def disco_query ( self ) :
"""Makes a request to the discovery server
: type context : satosa . context . Context
: type internal _ req : satosa . internal . InternalData
: rtype : satosa . response . SeeOther
: param context : The current context
: param internal _ req : The request
: return : Response"""
|
return_url = self . sp . config . getattr ( "endpoints" , "sp" ) [ "discovery_response" ] [ 0 ] [ 0 ]
loc = self . sp . create_discovery_service_request ( self . discosrv , self . sp . config . entityid , ** { "return" : return_url } )
return SeeOther ( loc )
|
def Parse ( self , conditions , host_data ) :
"""Runs methods that evaluate whether collected host _ data has an issue .
Args :
conditions : A list of conditions to determine which Methods to trigger .
host _ data : A map of artifacts and rdf data .
Returns :
A CheckResult populated with Anomalies if an issue exists ."""
|
result = CheckResult ( check_id = self . check_id )
methods = self . SelectChecks ( conditions )
result . ExtendAnomalies ( [ m . Parse ( conditions , host_data ) for m in methods ] )
return result
|
def get_network_adapter ( self , slot ) :
"""Returns the network adapter associated with the given slot .
Slots are numbered sequentially , starting with zero . The total
number of adapters per machine is defined by the
: py : func : ` ISystemProperties . get _ max _ network _ adapters ` property ,
so the maximum slot number is one less than that property ' s value .
in slot of type int
return adapter of type : class : ` INetworkAdapter `
raises : class : ` OleErrorInvalidarg `
Invalid @ a slot number ."""
|
if not isinstance ( slot , baseinteger ) :
raise TypeError ( "slot can only be an instance of type baseinteger" )
adapter = self . _call ( "getNetworkAdapter" , in_p = [ slot ] )
adapter = INetworkAdapter ( adapter )
return adapter
|
def get_log_entries_by_search ( self , log_entry_query , log_entry_search ) :
"""Pass through to provider LogEntrySearchSession . get _ log _ entries _ by _ search"""
|
# Implemented from azosid template for -
# osid . resource . ResourceSearchSession . get _ resources _ by _ search _ template
if not self . _can ( 'search' ) :
raise PermissionDenied ( )
return self . _provider_session . get_log_entries_by_search ( log_entry_query , log_entry_search )
|
def _get_more_data ( self , file , timeout ) :
"""Return data from the file , if available . If no data is received
by the timeout , then raise RuntimeError ."""
|
timeout = datetime . timedelta ( seconds = timeout )
timer = Stopwatch ( )
while timer . split ( ) < timeout :
data = file . read ( )
if data :
return data
raise RuntimeError ( "Timeout" )
|
def from_dataset ( cls , dataset , constraints = ( ) , ** kwargs ) :
"""Construct a optimized inverse model from an existing dataset .
A LWLR forward model is constructed by default ."""
|
fm = LWLRForwardModel ( dataset . dim_x , dataset . dim_y , ** kwargs )
fm . dataset = dataset
im = cls . from_forward ( fm , constraints = constraints , ** kwargs )
return im
|
def product_of_three_primes ( n : int ) -> bool :
"""Checks if the given number is a product of three distinct prime numbers . Returns true if it is , false otherwise .
Assumption : The input number ( n ) is less than 100.
Example :
product _ of _ three _ primes ( 30 ) returns True
30 = 2 * 3 * 5
Args :
n ( int ) : input number to be checked
Returns :
Boolean value indicating if the number is the product of three primes"""
|
# Check if a number is prime
def check_prime ( p ) :
for item in range ( 2 , p ) :
if p % item == 0 :
return False
return True
# Iterate for possible prime factors
for first in range ( 2 , 101 ) :
if check_prime ( first ) :
for second in range ( 2 , 101 ) :
if check_prime ( second ) :
for third in range ( 2 , 101 ) :
if check_prime ( third ) :
if first * second * third == n :
return True
return False
|
def next ( self ) :
"""Gets next entry as a dictionary .
Returns :
object - Object key / value pair representing a row .
{ key1 : value1 , key2 : value2 , . . . }"""
|
try :
entry = { }
row = self . _csv_reader . next ( )
for i in range ( 0 , len ( row ) ) :
entry [ self . _headers [ i ] ] = row [ i ]
return entry
except Exception as e : # close our file when we ' re done reading .
self . _file . close ( )
raise e
|
def unpunctuate ( s , * , char_blacklist = string . punctuation ) :
"""Remove punctuation from string s ."""
|
# remove punctuation
s = "" . join ( c for c in s if c not in char_blacklist )
# remove consecutive spaces
return " " . join ( filter ( None , s . split ( " " ) ) )
|
def plot2dhist ( xdata , ydata , cmap = 'binary' , interpolation = 'nearest' , fig = None , logscale = True , xbins = None , ybins = None , nbins = 50 , pts_only = False , ** kwargs ) :
"""Plots a 2d density histogram of provided data
: param xdata , ydata : ( array - like )
Data to plot .
: param cmap : ( optional )
Colormap to use for density plot .
: param interpolation : ( optional )
Interpolation scheme for display ( passed to ` ` plt . imshow ` ` ) .
: param fig : ( optional )
Argument passed to : func : ` setfig ` .
: param logscale : ( optional )
If ` ` True ` ` then the colormap will be based on a logarithmic
scale , rather than linear .
: param xbins , ybins : ( optional )
Bin edges to use ( if ` ` None ` ` , then use ` ` np . histogram2d ` ` to
find bins automatically ) .
: param nbins : ( optional )
Number of bins to use ( if ` ` None ` ` , then use ` ` np . histogram2d ` ` to
find bins automatically ) .
: param pts _ only : ( optional )
If ` ` True ` ` , then just a scatter plot of the points is made ,
rather than the density plot .
: param * * kwargs :
Keyword arguments passed either to ` ` plt . plot ` ` or ` ` plt . imshow ` `
depending upon whether ` ` pts _ only ` ` is set to ` ` True ` ` or not ."""
|
setfig ( fig )
if pts_only :
plt . plot ( xdata , ydata , ** kwargs )
return
ok = ( ~ np . isnan ( xdata ) & ~ np . isnan ( ydata ) & ~ np . isinf ( xdata ) & ~ np . isinf ( ydata ) )
if ~ ok . sum ( ) > 0 :
logging . warning ( '{} x values and {} y values are nan' . format ( np . isnan ( xdata ) . sum ( ) , np . isnan ( ydata ) . sum ( ) ) )
logging . warning ( '{} x values and {} y values are inf' . format ( np . isinf ( xdata ) . sum ( ) , np . isinf ( ydata ) . sum ( ) ) )
if xbins is not None and ybins is not None :
H , xs , ys = np . histogram2d ( xdata [ ok ] , ydata [ ok ] , bins = ( xbins , ybins ) )
else :
H , xs , ys = np . histogram2d ( xdata [ ok ] , ydata [ ok ] , bins = nbins )
H = H . T
if logscale :
H = np . log ( H )
extent = [ xs [ 0 ] , xs [ - 1 ] , ys [ 0 ] , ys [ - 1 ] ]
plt . imshow ( H , extent = extent , interpolation = interpolation , aspect = 'auto' , cmap = cmap , origin = 'lower' , ** kwargs )
|
def snipstr ( string , width = 79 , snipat = None , ellipsis = '...' ) :
"""Return string cut to specified length .
> > > snipstr ( ' abcdefghijklmnop ' , 8)
' abc . . . op '"""
|
if snipat is None :
snipat = 0.5
if ellipsis is None :
if isinstance ( string , bytes ) :
ellipsis = b'...'
else :
ellipsis = u'\u2026'
# does not print on win - py3.5
esize = len ( ellipsis )
splitlines = string . splitlines ( )
# TODO : finish and test multiline snip
result = [ ]
for line in splitlines :
if line is None :
result . append ( ellipsis )
continue
linelen = len ( line )
if linelen <= width :
result . append ( string )
continue
split = snipat
if split is None or split == 1 :
split = linelen
elif 0 < abs ( split ) < 1 :
split = int ( math . floor ( linelen * split ) )
if split < 0 :
split += linelen
if split < 0 :
split = 0
if esize == 0 or width < esize + 1 :
if split <= 0 :
result . append ( string [ - width : ] )
else :
result . append ( string [ : width ] )
elif split <= 0 :
result . append ( ellipsis + string [ esize - width : ] )
elif split >= linelen or width < esize + 4 :
result . append ( string [ : width - esize ] + ellipsis )
else :
splitlen = linelen - width + esize
end1 = split - splitlen // 2
end2 = end1 + splitlen
result . append ( string [ : end1 ] + ellipsis + string [ end2 : ] )
if isinstance ( string , bytes ) :
return b'\n' . join ( result )
return '\n' . join ( result )
|
def coords ( self ) :
"""Returns a tuple representing the location of the address in a
GIS coords format , i . e . ( longitude , latitude ) ."""
|
x , y = ( "lat" , "lng" ) if self . order == "lat" else ( "lng" , "lat" )
try :
return ( self [ "location" ] [ x ] , self [ "location" ] [ y ] )
except KeyError :
return None
|
def _ensure_tree ( path ) :
"""Create a directory ( and any ancestor directories required ) .
: param path : Directory to create"""
|
try :
os . makedirs ( path )
except OSError as e :
if e . errno == errno . EEXIST :
if not os . path . isdir ( path ) :
raise
else :
return False
elif e . errno == errno . EISDIR :
return False
else :
raise
else :
return True
|
def load ( self , filename , ** kwargs ) :
"""Parse a file specified with the filename and return an numpy array
Parameters
filename : string
A path of a file
Returns
ndarray
An instance of numpy array"""
|
with open ( filename , 'r' ) as f :
return self . parse ( f , ** kwargs )
|
def from_dict ( cls , data ) :
"""Converts this from a dictionary to a object ."""
|
data = dict ( data )
cause = data . get ( 'cause' )
if cause is not None :
data [ 'cause' ] = cls . from_dict ( cause )
return cls ( ** data )
|
def hash_evidence ( text : str , type : str , reference : str ) -> str :
"""Create a hash for an evidence and its citation .
: param text : The evidence text
: param type : The corresponding citation type
: param reference : The citation reference"""
|
s = u'{type}:{reference}:{text}' . format ( type = type , reference = reference , text = text )
return hashlib . sha512 ( s . encode ( 'utf8' ) ) . hexdigest ( )
|
def run ( self ) :
"""Run the plugin ."""
|
if self . workflow . builder . base_from_scratch :
self . log . info ( "Skipping comparing components: unsupported for FROM-scratch images" )
return
worker_metadatas = self . workflow . postbuild_results . get ( PLUGIN_FETCH_WORKER_METADATA_KEY )
comp_list = self . get_component_list_from_workers ( worker_metadatas )
if not comp_list :
raise ValueError ( "No components to compare" )
package_comparison_exceptions = get_package_comparison_exceptions ( self . workflow )
# master compare list
master_comp = { }
# The basic strategy is to start with empty lists and add new component
# versions as we find them . Upon next iteration , we should notice
# duplicates and be able to compare them . If the match fails , we raise
# an exception . If the component name does not exist , assume it was an
# arch dependency , add it to list and continue . By the time we get to
# the last arch , we should have every possible component in the master
# list to compare with .
# Keep everything separated by component type
failed_components = set ( )
for components in comp_list :
for component in components :
t = component [ 'type' ]
name = component [ 'name' ]
if name in package_comparison_exceptions :
self . log . info ( "Ignoring comparison of package %s" , name )
continue
if t not in SUPPORTED_TYPES :
raise ValueError ( "Type %s not supported" % t )
if name in failed_components : # report a failed component only once
continue
identifier = ( t , name )
if identifier not in master_comp :
master_comp [ identifier ] = component
continue
if t == T_RPM :
mc = master_comp [ identifier ]
try :
self . rpm_compare ( mc , component )
except ValueError as ex :
self . log . debug ( "Mismatch details: %s" , ex )
self . log . warning ( "Comparison mismatch for component %s:" , name )
# use all components to provide complete list
for comp in filter_components_by_name ( name , comp_list ) :
self . log_rpm_component ( comp )
failed_components . add ( name )
if failed_components :
raise ValueError ( "Failed component comparison for components: " "{components}" . format ( components = ', ' . join ( sorted ( failed_components ) ) ) )
|
def sync_state_context ( self , state , context ) :
"""sync state context ."""
|
if isinstance ( state , NDArray ) :
return state . as_in_context ( context )
elif isinstance ( state , ( tuple , list ) ) :
synced_state = ( self . sync_state_context ( i , context ) for i in state )
if isinstance ( state , tuple ) :
return tuple ( synced_state )
else :
return list ( synced_state )
else :
return state
|
def getConnectorVersion ( self ) :
"""GET the current Connector version .
: returns : asyncResult object , populates error and result fields
: rtype : asyncResult"""
|
result = asyncResult ( )
data = self . _getURL ( "/" , versioned = False )
result . fill ( data )
if data . status_code == 200 :
result . error = False
else :
result . error = response_codes ( "get_mdc_version" , data . status_code )
result . is_done = True
return result
|
def expand_brackets ( s ) :
"""Remove whitespace and expand all brackets ."""
|
s = '' . join ( s . split ( ) )
while True :
start = s . find ( '(' )
if start == - 1 :
break
count = 1
# Number of hanging open brackets
p = start + 1
while p < len ( s ) :
if s [ p ] == '(' :
count += 1
if s [ p ] == ')' :
count -= 1
if not count :
break
p += 1
if count :
raise ValueError ( "Unbalanced parenthesis in '{0}'." . format ( s ) )
if start == 0 or s [ start - 1 ] != '*' :
s = s [ 0 : start ] + s [ start + 1 : p ] + s [ p + 1 : ]
else :
m = BRACKET_RE . search ( s )
if m :
factor = int ( m . group ( 'factor' ) )
matchstart = m . start ( 'factor' )
s = s [ 0 : matchstart ] + ( factor - 1 ) * ( s [ start + 1 : p ] + ',' ) + s [ start + 1 : p ] + s [ p + 1 : ]
else :
raise ValueError ( "Failed to parse '{0}'." . format ( s ) )
return s
|
def get_reservation_ports ( session , reservation_id , model_name = 'Generic Traffic Generator Port' ) :
"""Get all Generic Traffic Generator Port in reservation .
: return : list of all Generic Traffic Generator Port resource objects in reservation"""
|
reservation_ports = [ ]
reservation = session . GetReservationDetails ( reservation_id ) . ReservationDescription
for resource in reservation . Resources :
if resource . ResourceModelName == model_name :
reservation_ports . append ( resource )
return reservation_ports
|
def get_page_children_dict ( self , page_qs = None ) :
"""Returns a dictionary of lists , where the keys are ' path ' values for
pages , and the value is a list of children pages for that page ."""
|
children_dict = defaultdict ( list )
for page in page_qs or self . pages_for_display :
children_dict [ page . path [ : - page . steplen ] ] . append ( page )
return children_dict
|
def update_workspace_config ( namespace , workspace , cnamespace , configname , body ) :
"""Update method configuration in workspace .
Args :
namespace ( str ) : project to which workspace belongs
workspace ( str ) : Workspace name
cnamespace ( str ) : Configuration namespace
configname ( str ) : Configuration name
body ( json ) : new body ( definition ) of the method config
Swagger :
https : / / api . firecloud . org / # ! / Method _ Configurations / updateWorkspaceMethodConfig"""
|
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}" . format ( namespace , workspace , cnamespace , configname )
return __post ( uri , json = body )
|
def remove_trailing_white_spaces ( self ) :
"""Removes document trailing white spaces .
: return : Method success .
: rtype : bool"""
|
cursor = self . textCursor ( )
block = self . document ( ) . findBlockByLineNumber ( 0 )
while block . isValid ( ) :
cursor . setPosition ( block . position ( ) )
if re . search ( r"\s+$" , block . text ( ) ) :
cursor . movePosition ( QTextCursor . EndOfBlock )
cursor . movePosition ( QTextCursor . StartOfBlock , QTextCursor . KeepAnchor )
cursor . insertText ( foundations . strings . to_string ( block . text ( ) ) . rstrip ( ) )
block = block . next ( )
cursor . movePosition ( QTextCursor . End , QTextCursor . MoveAnchor )
if not cursor . block ( ) . text ( ) . isEmpty ( ) :
cursor . insertText ( "\n" )
return True
|
def create_configmap ( name , namespace , data , source = None , template = None , saltenv = 'base' , ** kwargs ) :
'''Creates the kubernetes configmap as defined by the user .
CLI Examples : :
salt ' minion1 ' kubernetes . create _ configmap settings default ' { " example . conf " : " # example file " } '
salt ' minion2 ' kubernetes . create _ configmap name = settings namespace = default data = ' { " example . conf " : " # example file " } ' '''
|
if source :
data = __read_and_render_yaml_file ( source , template , saltenv )
elif data is None :
data = { }
data = __enforce_only_strings_dict ( data )
body = kubernetes . client . V1ConfigMap ( metadata = __dict_to_object_meta ( name , namespace , { } ) , data = data )
cfg = _setup_conn ( ** kwargs )
try :
api_instance = kubernetes . client . CoreV1Api ( )
api_response = api_instance . create_namespaced_config_map ( namespace , body )
return api_response . to_dict ( )
except ( ApiException , HTTPError ) as exc :
if isinstance ( exc , ApiException ) and exc . status == 404 :
return None
else :
log . exception ( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' )
raise CommandExecutionError ( exc )
finally :
_cleanup ( ** cfg )
|
def _add_new_methods ( cls ) :
"""Add all generated methods to result class ."""
|
for name , method in cls . context . new_methods . items ( ) :
if hasattr ( cls . context . new_class , name ) :
raise ValueError ( "Name collision in state machine class - '{name}'." . format ( name ) )
setattr ( cls . context . new_class , name , method )
|
def get_description_by_type ( self , type_p ) :
"""This is the same as : py : func : ` get _ description ` except that you can specify which types
should be returned .
in type _ p of type : class : ` VirtualSystemDescriptionType `
out types of type : class : ` VirtualSystemDescriptionType `
out refs of type str
out ovf _ values of type str
out v _ box _ values of type str
out extra _ config _ values of type str"""
|
if not isinstance ( type_p , VirtualSystemDescriptionType ) :
raise TypeError ( "type_p can only be an instance of type VirtualSystemDescriptionType" )
( types , refs , ovf_values , v_box_values , extra_config_values ) = self . _call ( "getDescriptionByType" , in_p = [ type_p ] )
types = [ VirtualSystemDescriptionType ( a ) for a in types ]
return ( types , refs , ovf_values , v_box_values , extra_config_values )
|
def build_inventory ( setup ) :
'''Builds an inventory for use as part of an
` dynamic Ansible inventory < http : / / docs . ansible . com / ansible / intro _ dynamic _ inventory . html > ` _
according to the
` script conventions < http : / / docs . ansible . com / ansible / developing _ inventory . html # script - conventions > ` _ : :
" _ meta " : {
" hostvars " : {
" host1 " : { } ,
" host2 " : { } ,
" host3 " : { } ,
" all " : {
" vars " : { }
" group1 " : {
" hosts " : [ " host1 " , " host2 " ] ,
" vars " : { }
" group2 " : {
" hosts " : [ " host3 " ] ,
" vars " : { }
Parameters
setup : tmdeploy . config . Setup
setup configuration
Returns
dict
mapping of groups to hosts'''
|
inventory = dict ( )
inventory [ 'all' ] = dict ( )
inventory [ 'all' ] [ 'vars' ] = { 'provider' : setup . cloud . provider , 'region' : setup . cloud . region , 'key_name' : setup . cloud . key_name , 'key_file' : os . path . expandvars ( os . path . expanduser ( setup . cloud . key_file_public ) ) , 'network' : setup . cloud . network , 'subnetwork' : setup . cloud . subnetwork , 'ip_range' : setup . cloud . ip_range , 'proxy_env' : { 'http_proxy' : setup . cloud . proxy , 'https_proxy' : setup . cloud . proxy } }
inventory [ '_meta' ] = dict ( )
inventory [ '_meta' ] [ 'hostvars' ] = dict ( )
if not isinstance ( setup , Setup ) :
raise TypeError ( 'Argument "setup" must have type {0}.' . format ( '.' . join ( [ Setup . __module__ , Setup . __name__ ] ) ) )
for cluster in setup . architecture . clusters :
logger . info ( 'configure cluster "%s"' , cluster . name )
for node_type in cluster . node_types :
logger . info ( 'configure node type "%s"' , node_type . name )
for i in range ( node_type . count ) :
host_name = HOSTNAME_FORMAT . format ( name = setup . architecture . name , cluster = cluster . name , node_type = node_type . name , index = i + 1 )
host_vars = dict ( )
for k , v in node_type . instance . to_dict ( ) . items ( ) :
if k == 'tags' : # Every server is part of the " compute - storage "
# security group , which is important for servers to be
# able to connect to each other when part of a cluster .
security_groups = 'compute-storage'
if 'web' in v :
host_vars [ 'assign_public_ip' ] = 'yes'
security_groups = ',' . join ( [ security_groups , 'web' ] )
else :
host_vars [ 'assign_public_ip' ] = 'no'
host_vars [ 'security_groups' ] = security_groups
if isinstance ( v , list ) :
v = ',' . join ( v )
host_vars [ k ] = v
inventory [ '_meta' ] [ 'hostvars' ] [ host_name ] = host_vars
for group in node_type . groups :
logger . info ( 'add group "%s"' , group . name )
if group . name not in inventory :
inventory [ group . name ] = { 'hosts' : list ( ) }
inventory [ group . name ] [ 'hosts' ] . append ( host_name )
inventory [ group . name ] [ 'vars' ] = dict ( )
if group . vars is not None :
inventory [ group . name ] [ 'vars' ] . update ( group . vars )
if node_type . vars is not None :
inventory [ group . name ] [ 'vars' ] . update ( node_type . vars )
return inventory
|
def theme ( self , value ) :
"""Setter for * * self . _ _ theme * * attribute .
: param value : Attribute value .
: type value : dict"""
|
if value is not None :
assert type ( value ) is dict , "'{0}' attribute: '{1}' type is not 'dict'!" . format ( "theme" , value )
self . __theme = value
|
def translate ( self ) :
"""Compile the variable lookup ."""
|
ident = self . ident
expr = ex_rvalue ( VARIABLE_PREFIX + ident )
return [ expr ] , set ( [ ident ] ) , set ( )
|
def _parse_commit_response ( commit_response_pb ) :
"""Extract response data from a commit response .
: type commit _ response _ pb : : class : ` . datastore _ pb2 . CommitResponse `
: param commit _ response _ pb : The protobuf response from a commit request .
: rtype : tuple
: returns : The pair of the number of index updates and a list of
: class : ` . entity _ pb2 . Key ` for each incomplete key
that was completed in the commit ."""
|
mut_results = commit_response_pb . mutation_results
index_updates = commit_response_pb . index_updates
completed_keys = [ mut_result . key for mut_result in mut_results if mut_result . HasField ( "key" ) ]
# Message field ( Key )
return index_updates , completed_keys
|
def annotate_snvs ( adapter , vcf_obj ) :
"""Annotate all variants in a VCF
Args :
adapter ( loqusdb . plugin . adapter )
vcf _ obj ( cyvcf2 . VCF )
Yields :
variant ( cyvcf2 . Variant ) : Annotated variant"""
|
variants = { }
for nr_variants , variant in enumerate ( vcf_obj , 1 ) : # Add the variant to current batch
variants [ get_variant_id ( variant ) ] = variant
# If batch len = = 1000 we annotate the batch
if ( nr_variants % 1000 ) == 0 :
for var_obj in adapter . search_variants ( list ( variants . keys ( ) ) ) :
var_id = var_obj [ '_id' ]
if var_id in variants :
annotate_variant ( variants [ var_id ] , var_obj )
for variant_id in variants :
yield variants [ variant_id ]
variants = { }
for var_obj in adapter . search_variants ( list ( variants . keys ( ) ) ) :
var_id = var_obj [ '_id' ]
if var_id in variants :
annotate_variant ( variants [ var_id ] , var_obj )
for variant_id in variants :
yield variants [ variant_id ]
|
def existing ( self ) :
"""find existing content assigned to this layout"""
|
catalog = api . portal . get_tool ( 'portal_catalog' )
results = [ ]
layout_path = self . _get_layout_path ( self . request . form . get ( 'layout' , '' ) )
for brain in catalog ( layout = layout_path ) :
results . append ( { 'title' : brain . Title , 'url' : brain . getURL ( ) } )
return json . dumps ( { 'total' : len ( results ) , 'data' : results } )
|
def preparse ( template_text , lookup = None ) :
"""Do any special processing of a template , including recognizing the templating language
and resolving file : references , then return an appropriate wrapper object .
Currently Tempita and Python string interpolation are supported .
` lookup ` is an optional callable that resolves any ambiguous template path ."""
|
# First , try to resolve file : references to their contents
template_path = None
try :
is_file = template_text . startswith ( "file:" )
except ( AttributeError , TypeError ) :
pass
# not a string
else :
if is_file :
template_path = template_text [ 5 : ]
if template_path . startswith ( '/' ) :
template_path = '/' + template_path . lstrip ( '/' )
elif template_path . startswith ( '~' ) :
template_path = os . path . expanduser ( template_path )
elif lookup :
template_path = lookup ( template_path )
with closing ( open ( template_path , "r" ) ) as handle :
template_text = handle . read ( ) . rstrip ( )
if hasattr ( template_text , "__engine__" ) : # Already preparsed
template = template_text
else :
if template_text . startswith ( "{{" ) :
import tempita
# only on demand
template = tempita . Template ( template_text , name = template_path )
template . __engine__ = "tempita"
else :
template = InterpolationTemplate ( template_text )
template . __file__ = template_path
template . __text__ = template_text
return template
|
def add_bundle ( name , scripts = [ ] , files = [ ] , scriptsdir = SCRIPTSDIR , filesdir = FILESDIR ) :
"""High level , simplified interface for creating a bundle which
takes the bundle name , a list of script file names in a common
scripts directory , and a list of absolute target file paths , of
which the basename is also located in a common files directory .
It converts those lists into maps and then calls new _ bundle ( ) to
actually create the Bundle and add it to BUNDLEMAP"""
|
scriptmap = makemap ( scripts , join ( PATH , scriptsdir ) )
filemap = dict ( zip ( files , [ join ( PATH , filesdir , os . path . basename ( f ) ) for f in files ] ) )
new_bundle ( name , scriptmap , filemap )
|
def aws_cli ( * cmd ) :
"""Invoke aws command ."""
|
old_env = dict ( os . environ )
try : # Environment
env = os . environ . copy ( )
env [ 'LC_CTYPE' ] = u'en_US.UTF'
os . environ . update ( env )
# Run awscli in the same process
exit_code = create_clidriver ( ) . main ( * cmd )
# Deal with problems
if exit_code > 0 :
raise RuntimeError ( 'AWS CLI exited with code {}' . format ( exit_code ) )
finally :
os . environ . clear ( )
os . environ . update ( old_env )
|
def _decode_signature ( self , signature ) :
"""Decode the internal fields of the base64 - encoded signature ."""
|
sig = a2b_base64 ( signature )
if len ( sig ) != 65 :
raise EncodingError ( "Wrong length, expected 65" )
# split into the parts .
first = byte2int ( sig )
r = from_bytes_32 ( sig [ 1 : 33 ] )
s = from_bytes_32 ( sig [ 33 : 33 + 32 ] )
# first byte encodes a bits we need to know about the point used in signature
if not ( 27 <= first < 35 ) :
raise EncodingError ( "First byte out of range" )
# NOTE : The first byte encodes the " recovery id " , or " recid " which is a 3 - bit values
# which selects compressed / not - compressed and one of 4 possible public pairs .
first -= 27
is_compressed = bool ( first & 0x4 )
return is_compressed , ( first & 0x3 ) , r , s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.