signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def get ( expr , key , default = None ) :
"""Return the mapped value for this key , or the default
if the key does not exist
Parameters
key : any
default : any"""
|
return ops . MapValueOrDefaultForKey ( expr , key , default ) . to_expr ( )
|
def start ( self ) :
'''Startup the zmq consumer .'''
|
zmq_uri = '{protocol}://{address}:{port}' . format ( protocol = self . protocol , address = self . address , port = self . port ) if self . port else '{protocol}://{address}' . format ( # noqa
protocol = self . protocol , address = self . address )
log . debug ( 'ZMQ URI: %s' , zmq_uri )
self . ctx = zmq . Context ( )
if hasattr ( zmq , self . type ) :
skt_type = getattr ( zmq , self . type )
else :
skt_type = zmq . PULL
self . sub = self . ctx . socket ( skt_type )
self . sub . connect ( zmq_uri )
if self . hwm is not None :
try :
self . sub . setsockopt ( zmq . HWM , self . hwm )
except AttributeError :
self . sub . setsockopt ( zmq . RCVHWM , self . hwm )
if self . recvtimeout is not None :
log . debug ( 'Setting RCVTIMEO to %d' , self . recvtimeout )
self . sub . setsockopt ( zmq . RCVTIMEO , self . recvtimeout )
if self . keepalive is not None :
log . debug ( 'Setting TCP_KEEPALIVE to %d' , self . keepalive )
self . sub . setsockopt ( zmq . TCP_KEEPALIVE , self . keepalive )
if self . keepalive_idle is not None :
log . debug ( 'Setting TCP_KEEPALIVE_IDLE to %d' , self . keepalive_idle )
self . sub . setsockopt ( zmq . TCP_KEEPALIVE_IDLE , self . keepalive_idle )
if self . keepalive_interval is not None :
log . debug ( 'Setting TCP_KEEPALIVE_INTVL to %d' , self . keepalive_interval )
self . sub . setsockopt ( zmq . TCP_KEEPALIVE_INTVL , self . keepalive_interval )
|
def discrete_index ( self , indices ) :
"""get elements by discrete indices
: param indices : list
discrete indices
: return : elements"""
|
elements = [ ]
for i in indices :
elements . append ( self [ i ] )
return elements
|
def from_json_dict ( cls , json_dict # type : Dict [ str , Any ]
) : # type : ( . . . ) - > IntegerSpec
"""Make a IntegerSpec object from a dictionary containing its
properties .
: param dict json _ dict : This dictionary may contain
` ' minimum ' ` and ` ' maximum ' ` keys . In addition , it must
contain a ` ' hashing ' ` key , whose contents are passed to
: class : ` FieldHashingProperties ` .
: param dict json _ dict : The properties dictionary ."""
|
# noinspection PyCompatibility
result = cast ( IntegerSpec , # For Mypy .
super ( ) . from_json_dict ( json_dict ) )
format_ = json_dict [ 'format' ]
result . minimum = format_ . get ( 'minimum' )
result . maximum = format_ . get ( 'maximum' )
return result
|
def signal ( signal = None ) :
'''Signals nginx to start , reload , reopen or stop .
CLI Example :
. . code - block : : bash
salt ' * ' nginx . signal reload'''
|
valid_signals = ( 'start' , 'reopen' , 'stop' , 'quit' , 'reload' )
if signal not in valid_signals :
return
# Make sure you use the right arguments
if signal == "start" :
arguments = ''
else :
arguments = ' -s {0}' . format ( signal )
cmd = __detect_os ( ) + arguments
out = __salt__ [ 'cmd.run_all' ] ( cmd )
# A non - zero return code means fail
if out [ 'retcode' ] and out [ 'stderr' ] :
ret = out [ 'stderr' ] . strip ( )
# ' nginxctl configtest ' returns ' Syntax OK ' to stderr
elif out [ 'stderr' ] :
ret = out [ 'stderr' ] . strip ( )
elif out [ 'stdout' ] :
ret = out [ 'stdout' ] . strip ( )
# No output for something like : nginxctl graceful
else :
ret = 'Command: "{0}" completed successfully!' . format ( cmd )
return ret
|
def verify_is_none ( self , expr , msg = None ) :
"""Soft assert for whether the expr is None
: params want : the object to compare against
: params second : the object to compare with
: params msg : ( Optional ) msg explaining the difference"""
|
try :
self . assert_is_none ( expr , msg )
except AssertionError , e :
if msg :
m = "%s:\n%s" % ( msg , str ( e ) )
else :
m = str ( e )
self . verification_erorrs . append ( m )
|
def get_proof_generator ( self , tx_id , chain = Chain . bitcoin_mainnet ) :
"""Returns a generator ( 1 - time iterator ) of proofs in insertion order .
: param tx _ id : blockchain transaction id
: return :"""
|
root = ensure_string ( self . tree . get_merkle_root ( ) )
node_count = len ( self . tree . leaves )
for index in range ( 0 , node_count ) :
proof = self . tree . get_proof ( index )
proof2 = [ ]
for p in proof :
dict2 = dict ( )
for key , value in p . items ( ) :
dict2 [ key ] = ensure_string ( value )
proof2 . append ( dict2 )
target_hash = ensure_string ( self . tree . get_leaf ( index ) )
merkle_proof = { "type" : [ 'MerkleProof2017' , 'Extension' ] , "merkleRoot" : root , "targetHash" : target_hash , "proof" : proof2 , "anchors" : [ { "sourceId" : to_source_id ( tx_id , chain ) , "type" : chain . blockchain_type . external_display_value , "chain" : chain . external_display_value } ] }
yield merkle_proof
|
def format ( self ) :
"""Crop and resize the supplied image . Return the image and the crop _ box used .
If the input format is JPEG and in EXIF there is information about rotation , use it and rotate resulting image ."""
|
if hasattr ( self . image , '_getexif' ) :
self . rotate_exif ( )
crop_box = self . crop_to_ratio ( )
self . resize ( )
return self . image , crop_box
|
def read ( self , size ) :
"""Read raw bytes from the instrument .
: param size : amount of bytes to be sent to the instrument
: type size : integer
: return : received bytes
: return type : bytes"""
|
raw_read = super ( USBRawDevice , self ) . read
received = bytearray ( )
while not len ( received ) >= size :
resp = raw_read ( self . RECV_CHUNK )
received . extend ( resp )
return bytes ( received )
|
def pt_rotate ( pt = ( 0.0 , 0.0 ) , angle = [ 0.0 ] , center = ( 0.0 , 0.0 ) ) :
'''Return given point rotated around a center point in N dimensions .
Angle is list of rotation in radians for each pair of axis .'''
|
assert isinstance ( pt , tuple )
l_pt = len ( pt )
assert l_pt > 1
for i in pt :
assert isinstance ( i , float )
assert isinstance ( angle , list )
l_angle = len ( angle )
assert l_angle == l_pt - 1
for i in angle :
assert isinstance ( i , float )
assert abs ( i ) <= 2 * pi
assert isinstance ( center , tuple )
assert len ( center ) == l_pt
for i in center :
assert isinstance ( i , float )
# Get vector from center to point and use to get relative polar coordinate .
v_cart = [ pt [ i ] - center [ i ] for i in range ( l_pt ) ]
# Length of vector needs to stay constant for new point .
v_pol_l = [ sqrt ( v_cart [ i ] ** 2 + v_cart [ i + 1 ] ** 2 ) for i in range ( l_angle ) ]
v_pol_a = [ ( atan ( v_cart [ i + 1 ] / v_cart [ i ] ) if v_cart [ i ] != 0.0 else pi / 2 ) + pi * int ( pt [ i ] < center [ i ] ) for i in range ( l_angle ) ]
# Add rotation angle then convert back to cartesian vector .
n_pol_a = [ v_pol_a [ i ] + angle [ i ] for i in range ( l_angle ) ]
n_cart = [ v_pol_l [ 0 ] * cos ( n_pol_a [ 0 ] ) ] + [ v_pol_l [ i ] * sin ( n_pol_a [ i ] ) for i in range ( l_angle ) ]
# Add in the centre offset to get original offset from c .
r = [ n_cart [ i ] + center [ i ] for i in range ( l_pt ) ]
return tuple ( r )
|
def to_networkx_graph ( H ) :
"""Returns a NetworkX Graph object that is the graph decomposition of
the given H .
See " to _ graph _ decomposition ( ) " for more details .
: param H : the H to decompose into a graph .
: returns : nx . Graph - - NetworkX Graph object representing the
decomposed H .
: raises : TypeError - - Transformation only applicable to
undirected Hs"""
|
import networkx as nx
if not isinstance ( H , UndirectedHypergraph ) :
raise TypeError ( "Transformation only applicable to \
undirected Hs" )
G = to_graph_decomposition ( H )
nx_graph = nx . Graph ( )
for node in G . node_iterator ( ) :
nx_graph . add_node ( node , G . get_node_attributes ( node ) )
for hyperedge_id in G . hyperedge_id_iterator ( ) :
edge_nodes = G . get_hyperedge_nodes ( hyperedge_id )
edge_attributes = G . get_hyperedge_attributes ( hyperedge_id )
nx_graph . add_edge ( edge_nodes [ 0 ] , edge_nodes [ 1 ] , edge_attributes )
return nx_graph
|
def red ( self , value ) :
"""gets / sets the red value"""
|
if value != self . _red and isinstance ( value , int ) :
self . _red = value
|
def login_oauth2 ( self , username , password , mfa_code = None ) :
'''Login using username and password'''
|
data = { "grant_type" : "password" , "scope" : "internal" , "client_id" : CLIENT_ID , "expires_in" : 86400 , "password" : password , "username" : username }
if mfa_code is not None :
data [ 'mfa_code' ] = mfa_code
url = "https://api.robinhood.com/oauth2/token/"
res = self . post ( url , payload = data , retry = False )
if res is None :
if mfa_code is None :
msg = ( "Client.login_oauth2(). Could not authenticate. Check " + "username and password." )
raise AuthenticationError ( msg )
else :
msg = ( "Client.login_oauth2(). Could not authenticate. Check" + "username and password, and enter a valid MFA code." )
raise AuthenticationError ( msg )
elif res . get ( 'mfa_required' ) is True :
msg = "Client.login_oauth2(). Couldn't authenticate. MFA required."
raise AuthenticationError ( msg )
self . access_token = res [ "access_token" ]
self . refresh_token = res [ "refresh_token" ]
self . mfa_code = res [ "mfa_code" ]
self . scope = res [ "scope" ]
self . __set_account_info ( )
return self . authenticated
|
def _no_access ( basedir ) :
'''Return True if the given base dir is not accessible or writeable'''
|
import os
return not os . access ( basedir , os . W_OK | os . X_OK )
|
def is_valid_int_param ( param ) :
"""Verifica se o parâmetro é um valor inteiro válido .
: param param : Valor para ser validado .
: return : True se o parâmetro tem um valor inteiro válido , ou False , caso contrário ."""
|
if param is None :
return False
try :
param = int ( param )
if param < 0 :
return False
except ( TypeError , ValueError ) :
return False
return True
|
def cover ( self , minAcc , maxAcc , groupBy = None , new_reg_fields = None , cover_type = "normal" ) :
"""* Wrapper of * ` ` COVER ` `
COVER is a GMQL operator that takes as input a dataset ( of usually ,
but not necessarily , multiple samples ) and returns another dataset
( with a single sample , if no groupby option is specified ) by “ collapsing ”
the input samples and their regions according to certain rules
specified by the COVER parameters . The attributes of the output regions
are only the region coordinates , plus in case , when aggregate functions
are specified , new attributes with aggregate values over attribute values
of the contributing input regions ; output metadata are the union of the
input ones , plus the metadata attributes JaccardIntersect and
JaccardResult , representing global Jaccard Indexes for the considered
dataset , computed as the correspondent region Jaccard Indexes but on
the whole sample regions .
: param cover _ type : the kind of cover variant you want [ ' normal ' , ' flat ' , ' summit ' , ' histogram ' ]
: param minAcc : minimum accumulation value , i . e . the minimum number
of overlapping regions to be considered during COVER execution . It can be any positive
number or the strings { ' ALL ' , ' ANY ' } .
: param maxAcc : maximum accumulation value , i . e . the maximum number
of overlapping regions to be considered during COVER execution . It can be any positive
number or the strings { ' ALL ' , ' ANY ' } .
: param groupBy : optional list of metadata attributes
: param new _ reg _ fields : dictionary of the type
{ ' new _ region _ attribute ' : AGGREGATE _ FUNCTION ( ' field ' ) , . . . }
: return : a new GMQLDataset
An example of usage : :
cell _ tf = narrow _ peak . cover ( " normal " , minAcc = 1 , maxAcc = " Any " ,
groupBy = [ ' cell ' , ' antibody _ target ' ] )"""
|
if isinstance ( cover_type , str ) :
coverFlag = self . opmng . getCoverTypes ( cover_type )
else :
raise TypeError ( "type must be a string. " "{} was provided" . format ( type ( cover_type ) ) )
if isinstance ( minAcc , str ) :
minAccParam = self . opmng . getCoverParam ( minAcc . lower ( ) )
elif isinstance ( minAcc , int ) :
minAccParam = self . opmng . getCoverParam ( str ( minAcc ) . lower ( ) )
else :
raise TypeError ( "minAcc must be a string or an integer. " "{} was provided" . format ( type ( minAcc ) ) )
if isinstance ( maxAcc , str ) :
maxAccParam = self . opmng . getCoverParam ( maxAcc . lower ( ) )
elif isinstance ( maxAcc , int ) :
maxAccParam = self . opmng . getCoverParam ( str ( maxAcc ) . lower ( ) )
else :
raise TypeError ( "maxAcc must be a string or an integer. " "{} was provided" . format ( type ( minAcc ) ) )
if isinstance ( groupBy , list ) and all ( [ isinstance ( x , str ) for x in groupBy ] ) :
groupBy_result = Some ( groupBy )
elif groupBy is None :
groupBy_result = none ( )
else :
raise TypeError ( "groupBy must be a list of string. " "{} was provided" . format ( type ( groupBy ) ) )
aggregates = [ ]
if isinstance ( new_reg_fields , dict ) :
expBuild = self . pmg . getNewExpressionBuilder ( self . __index )
for k in new_reg_fields . keys ( ) :
if isinstance ( k , str ) :
item = new_reg_fields [ k ]
if isinstance ( item , ( SUM , MIN , MAX , AVG , BAG , BAGD , MEDIAN , COUNT ) ) :
op_name = item . get_aggregate_name ( )
op_argument = item . get_argument ( )
if op_argument is None :
op_argument = none ( )
else :
op_argument = Some ( op_argument )
regsToReg = expBuild . getRegionsToRegion ( op_name , k , op_argument )
aggregates . append ( regsToReg )
else :
raise TypeError ( "The items in new_reg_fields must be Aggregates (SUM, MIN, MAX, AVG, BAG, " "BAGD, MEDIAN, COUNT)" " {} was provided" . format ( type ( item ) ) )
else :
raise TypeError ( "The key of new_reg_fields must be a string. " "{} was provided" . format ( type ( k ) ) )
elif new_reg_fields is None :
pass
else :
raise TypeError ( "new_reg_fields must be a list of dictionary. " "{} was provided" . format ( type ( new_reg_fields ) ) )
new_index = self . opmng . cover ( self . __index , coverFlag , minAccParam , maxAccParam , groupBy_result , aggregates )
return GMQLDataset ( index = new_index , location = self . location , local_sources = self . _local_sources , remote_sources = self . _remote_sources , meta_profile = self . meta_profile )
|
def unindent ( self ) :
"""Un - indents text at cursor position ."""
|
_logger ( ) . debug ( 'unindent' )
cursor = self . editor . textCursor ( )
_logger ( ) . debug ( 'cursor has selection %r' , cursor . hasSelection ( ) )
if cursor . hasSelection ( ) :
cursor . beginEditBlock ( )
self . unindent_selection ( cursor )
cursor . endEditBlock ( )
self . editor . setTextCursor ( cursor )
else :
tab_len = self . editor . tab_length
indentation = cursor . positionInBlock ( )
indentation -= self . min_column
if indentation == 0 :
return
max_spaces = indentation % tab_len
if max_spaces == 0 :
max_spaces = tab_len
spaces = self . count_deletable_spaces ( cursor , max_spaces )
_logger ( ) . info ( 'deleting %d space before cursor' % spaces )
cursor . beginEditBlock ( )
for _ in range ( spaces ) :
cursor . deletePreviousChar ( )
cursor . endEditBlock ( )
self . editor . setTextCursor ( cursor )
_logger ( ) . debug ( cursor . block ( ) . text ( ) )
|
def setup ( self , universe ) :
"""Setup Security with universe . Speeds up future runs .
Args :
* universe ( DataFrame ) : DataFrame of prices with security ' s name as
one of the columns ."""
|
# if we already have all the prices , we will store them to speed up
# future updates
try :
prices = universe [ self . name ]
except KeyError :
prices = None
# setup internal data
if prices is not None :
self . _prices = prices
self . data = pd . DataFrame ( index = universe . index , columns = [ 'value' , 'position' ] , data = 0.0 )
self . _prices_set = True
else :
self . data = pd . DataFrame ( index = universe . index , columns = [ 'price' , 'value' , 'position' ] )
self . _prices = self . data [ 'price' ]
self . _prices_set = False
self . _values = self . data [ 'value' ]
self . _positions = self . data [ 'position' ]
# add _ outlay
self . data [ 'outlay' ] = 0.
self . _outlays = self . data [ 'outlay' ]
|
def _create_dir ( path ) :
'''Creates necessary directories for the given path or does nothing
if the directories already exist .'''
|
try :
os . makedirs ( path )
except OSError , exc :
if exc . errno == errno . EEXIST :
pass
else :
raise
|
def rates_for_location ( self , postal_code , location_deets = None ) :
"""Shows the sales tax rates for a given location ."""
|
request = self . _get ( "rates/" + postal_code , location_deets )
return self . responder ( request )
|
def close ( self ) :
"""Called to clean all possible tmp files created during the process ."""
|
if self . read_option ( 'save_pointer' ) :
self . _update_last_pointer ( )
super ( S3Writer , self ) . close ( )
|
def _set_interface_hello_padding ( self , v , load = False ) :
"""Setter method for interface _ hello _ padding , mapped from YANG variable / routing _ system / interface / ve / intf _ isis / interface _ isis / interface _ hello / interface _ hello _ padding ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ interface _ hello _ padding is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ interface _ hello _ padding ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = interface_hello_padding . interface_hello_padding , is_container = 'container' , presence = False , yang_name = "interface-hello-padding" , rest_name = "padding" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Pad hello packets on this interface' , u'alt-name' : u'padding' } } , namespace = 'urn:brocade.com:mgmt:brocade-isis' , defining_module = 'brocade-isis' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """interface_hello_padding must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=interface_hello_padding.interface_hello_padding, is_container='container', presence=False, yang_name="interface-hello-padding", rest_name="padding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Pad hello packets on this interface', u'alt-name': u'padding'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""" , } )
self . __interface_hello_padding = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def make_parameter_dict ( pdict , fixed_par = False , rescale = True , update_bounds = False ) :
"""Update a parameter dictionary . This function will automatically
set the parameter scale and bounds if they are not defined .
Bounds are also adjusted to ensure that they encompass the
parameter value ."""
|
o = copy . deepcopy ( pdict )
o . setdefault ( 'scale' , 1.0 )
if rescale :
value , scale = utils . scale_parameter ( o [ 'value' ] * o [ 'scale' ] )
o [ 'value' ] = np . abs ( value ) * np . sign ( o [ 'value' ] )
o [ 'scale' ] = np . abs ( scale ) * np . sign ( o [ 'scale' ] )
if 'error' in o :
o [ 'error' ] /= np . abs ( scale )
if update_bounds :
o [ 'min' ] = o [ 'value' ] * 1E-3
o [ 'max' ] = o [ 'value' ] * 1E3
if fixed_par :
o [ 'min' ] = o [ 'value' ]
o [ 'max' ] = o [ 'value' ]
if float ( o [ 'min' ] ) > float ( o [ 'value' ] ) :
o [ 'min' ] = o [ 'value' ]
if float ( o [ 'max' ] ) < float ( o [ 'value' ] ) :
o [ 'max' ] = o [ 'value' ]
return o
|
def validate_bam ( self , input_bam ) :
"""Wrapper for Picard ' s ValidateSamFile .
: param str input _ bam : Path to file to validate .
: return str : Command to run for the validation ."""
|
cmd = self . tools . java + " -Xmx" + self . pm . javamem
cmd += " -jar " + self . tools . picard + " ValidateSamFile"
cmd += " INPUT=" + input_bam
return cmd
|
def update_cluster ( cluster_dict , datacenter = None , cluster = None , service_instance = None ) :
'''Updates a cluster .
config _ dict
Dictionary with the config values of the new cluster .
datacenter
Name of datacenter containing the cluster .
Ignored if already contained by proxy details .
Default value is None .
cluster
Name of cluster .
Ignored if already contained by proxy details .
Default value is None .
service _ instance
Service instance ( vim . ServiceInstance ) of the vCenter .
Default is None .
. . code - block : : bash
# esxdatacenter proxy
salt ' * ' vsphere . update _ cluster cluster _ dict = $ cluster _ dict cluster = cl1
# esxcluster proxy
salt ' * ' vsphere . update _ cluster cluster _ dict = $ cluster _ dict'''
|
# Validate cluster dictionary
schema = ESXClusterConfigSchema . serialize ( )
try :
jsonschema . validate ( cluster_dict , schema )
except jsonschema . exceptions . ValidationError as exc :
raise InvalidConfigError ( exc )
# Get required details from the proxy
proxy_type = get_proxy_type ( )
if proxy_type == 'esxdatacenter' :
datacenter = __salt__ [ 'esxdatacenter.get_details' ] ( ) [ 'datacenter' ]
dc_ref = _get_proxy_target ( service_instance )
if not cluster :
raise ArgumentValueError ( '\'cluster\' needs to be specified' )
elif proxy_type == 'esxcluster' :
datacenter = __salt__ [ 'esxcluster.get_details' ] ( ) [ 'datacenter' ]
dc_ref = salt . utils . vmware . get_datacenter ( service_instance , datacenter )
cluster = __salt__ [ 'esxcluster.get_details' ] ( ) [ 'cluster' ]
if cluster_dict . get ( 'vsan' ) and not salt . utils . vsan . vsan_supported ( service_instance ) :
raise VMwareApiError ( 'VSAN operations are not supported' )
cluster_ref = salt . utils . vmware . get_cluster ( dc_ref , cluster )
cluster_spec = vim . ClusterConfigSpecEx ( )
props = salt . utils . vmware . get_properties_of_managed_object ( cluster_ref , properties = [ 'configurationEx' ] )
# Copy elements we want to update to spec
for p in [ 'dasConfig' , 'drsConfig' ] :
setattr ( cluster_spec , p , getattr ( props [ 'configurationEx' ] , p ) )
if props [ 'configurationEx' ] . vsanConfigInfo :
cluster_spec . vsanConfig = props [ 'configurationEx' ] . vsanConfigInfo
vsan_spec = None
vsan_61 = None
if cluster_dict . get ( 'vsan' ) : # XXX The correct way of retrieving the VSAN data ( on the if branch )
# is not supported before 60u2 vcenter
vcenter_info = salt . utils . vmware . get_service_info ( service_instance )
if float ( vcenter_info . apiVersion ) >= 6.0 and int ( vcenter_info . build ) >= 3634794 : # 60u2
vsan_61 = False
vsan_info = salt . utils . vsan . get_cluster_vsan_info ( cluster_ref )
vsan_spec = vim . vsan . ReconfigSpec ( modify = True )
# Only interested in the vsanClusterConfig and the
# dataEfficiencyConfig
# vsan _ spec . vsanClusterConfig = vsan _ info
vsan_spec . dataEfficiencyConfig = vsan_info . dataEfficiencyConfig
vsan_info . dataEfficiencyConfig = None
else :
vsan_61 = True
_apply_cluster_dict ( cluster_spec , cluster_dict , vsan_spec , vsan_61 )
# We try to reconfigure vsan first as it fails if HA is enabled so the
# command will abort not having any side - effects
# also if HA was previously disabled it can be enabled automatically if
# desired
if vsan_spec :
log . trace ( 'vsan_spec = %s' , vsan_spec )
salt . utils . vsan . reconfigure_cluster_vsan ( cluster_ref , vsan_spec )
# We need to retrieve again the properties and reapply them
# As the VSAN configuration has changed
cluster_spec = vim . ClusterConfigSpecEx ( )
props = salt . utils . vmware . get_properties_of_managed_object ( cluster_ref , properties = [ 'configurationEx' ] )
# Copy elements we want to update to spec
for p in [ 'dasConfig' , 'drsConfig' ] :
setattr ( cluster_spec , p , getattr ( props [ 'configurationEx' ] , p ) )
if props [ 'configurationEx' ] . vsanConfigInfo :
cluster_spec . vsanConfig = props [ 'configurationEx' ] . vsanConfigInfo
# We only need to configure the cluster _ spec , as if it were a vsan _ 61
# cluster
_apply_cluster_dict ( cluster_spec , cluster_dict )
salt . utils . vmware . update_cluster ( cluster_ref , cluster_spec )
return { 'update_cluster' : True }
|
def get_service ( raw_xml ) :
"""Set a service object based on the XML metadata
< dct : references scheme = " OGC : WMS " > http : / / ngamaps . geointapps . org / arcgis
/ services / RIO / Rio _ Foundation _ Transportation / MapServer / WMSServer
< / dct : references >
: param instance :
: return : Layer"""
|
from pycsw . core . etree import etree
parsed = etree . fromstring ( raw_xml , etree . XMLParser ( resolve_entities = False ) )
# < dc : format > OGC : WMS < / dc : format >
source_tag = parsed . find ( "{http://purl.org/dc/elements/1.1/}source" )
# < dc : source >
# http : / / ngamaps . geointapps . org / arcgis / services / RIO / Rio _ Foundation _ Transportation / MapServer / WMSServer
# < / dc : source >
format_tag = parsed . find ( "{http://purl.org/dc/elements/1.1/}format" )
service_url = None
service_type = None
if hasattr ( source_tag , 'text' ) :
service_url = source_tag . text
if hasattr ( format_tag , 'text' ) :
service_type = format_tag . text
if hasattr ( format_tag , 'text' ) :
service_type = format_tag . text
service , created = Service . objects . get_or_create ( url = service_url , is_monitored = False , type = service_type )
# TODO : dont hardcode SRS , get them from the parsed XML .
srs , created = SpatialReferenceSystem . objects . get_or_create ( code = "EPSG:4326" )
service . srs . add ( srs )
return service
|
def replant_tree ( self , config = None , exclude = None ) :
'''Replant the tree with a different config setup
Parameters :
config ( str ) :
The config name to reload
exclude ( list ) :
A list of environment variables to exclude
from forced updates'''
|
# reinitialize a new Tree with a new config
self . __init__ ( key = self . key , config = config , update = True , exclude = exclude )
|
def get_login_password ( site_name = "github.com" , netrc_file = "~/.netrc" , git_credential_file = "~/.git-credentials" ) :
"""Read a . netrc file and return login / password for LWN ."""
|
try :
n = netrc . netrc ( os . path . expanduser ( netrc_file ) )
except OSError :
pass
else :
if site_name in n . hosts :
return n . hosts [ site_name ] [ 0 ] , n . hosts [ site_name ] [ 2 ]
try :
with open ( os . path . expanduser ( git_credential_file ) ) as f :
for line in f :
parsed = parse . urlparse ( line . strip ( ) )
if parsed . hostname == site_name :
return ( parse . unquote ( parsed . username ) , parse . unquote ( parsed . password ) )
except OSError :
pass
return None , None
|
def subdir_path ( directory , relative ) :
"""Returns a file path relative to another path ."""
|
item_bits = directory . split ( os . sep )
relative_bits = relative . split ( os . sep )
for i , _item in enumerate ( item_bits ) :
if i == len ( relative_bits ) - 1 :
return os . sep . join ( item_bits [ i : ] )
else :
if item_bits [ i ] != relative_bits [ i ] :
return None
return None
|
def list_operations ( self , name , filter_ , page_size = 0 , options = None ) :
"""Lists operations that match the specified filter in the request . If the
server doesn ' t support this method , it returns ` ` UNIMPLEMENTED ` ` .
NOTE : the ` ` name ` ` binding below allows API services to override the binding
to use different resource name schemes , such as ` ` users / * / operations ` ` .
Example :
> > > from google . gapic . longrunning import operations _ client
> > > from google . gax import CallOptions , INITIAL _ PAGE
> > > api = operations _ client . OperationsClient ( )
> > > name = ' '
> > > filter _ = ' '
> > > # Iterate over all results
> > > for element in api . list _ operations ( name , filter _ ) :
> > > # process element
> > > pass
> > > # Or iterate over results one page at a time
> > > for page in api . list _ operations ( name , filter _ , options = CallOptions ( page _ token = INITIAL _ PAGE ) ) :
> > > for element in page :
> > > # process element
> > > pass
Args :
name ( string ) : The name of the operation collection .
filter _ ( string ) : The standard list filter .
page _ size ( int ) : The maximum number of resources contained in the
underlying API response . If page streaming is performed per -
resource , this parameter does not affect the return value . If page
streaming is performed per - page , this determines the maximum number
of resources in a page .
options ( : class : ` google . gax . CallOptions ` ) : Overrides the default
settings for this call , e . g , timeout , retries etc .
Returns :
A : class : ` google . gax . PageIterator ` instance . By default , this
is an iterable of : class : ` google . longrunning . operations _ pb2 . Operation ` instances .
This object can also be configured to iterate over the pages
of the response through the ` CallOptions ` parameter .
Raises :
: exc : ` google . gax . errors . GaxError ` if the RPC is aborted .
: exc : ` ValueError ` if the parameters are invalid ."""
|
# Create the request object .
request = operations_pb2 . ListOperationsRequest ( name = name , filter = filter_ , page_size = page_size )
return self . _list_operations ( request , options )
|
def release ( self ) :
"""Release file and thread locks . If in ' degraded ' mode , close the
stream to reduce contention until the log files can be rotated ."""
|
try :
if self . _rotateFailed :
self . _close ( )
except Exception :
self . handleError ( NullLogRecord ( ) )
finally :
try :
if self . stream_lock and not self . stream_lock . closed :
unlock ( self . stream_lock )
except Exception :
self . handleError ( NullLogRecord ( ) )
finally : # release thread lock
if Handler :
Handler . release ( self )
|
async def _find_trigger ( self , request : Request , origin : Optional [ Text ] = None , internal : bool = False ) -> Tuple [ Optional [ BaseTrigger ] , Optional [ Type [ BaseState ] ] , Optional [ bool ] , ] :
"""Find the best trigger for this request , or go away ."""
|
reg = request . register
if not origin :
origin = reg . get ( Register . STATE )
logger . debug ( 'From state: %s' , origin )
results = await asyncio . gather ( * ( x . rank ( request , origin ) for x in self . transitions if x . internal == internal ) )
if len ( results ) :
score , trigger , state , dnr = max ( results , key = lambda x : x [ 0 ] )
if score >= settings . MINIMAL_TRIGGER_SCORE :
return trigger , state , dnr
return None , None , None
|
def update ( self , table , values , identifier ) :
"""Updates a table row with specified data by given identifier .
: param table : the expression of the table to update quoted or unquoted
: param values : a dictionary containing column - value pairs
: param identifier : the update criteria ; a dictionary containing column - value pairs
: return : the number of affected rows
: rtype : int"""
|
with self . locked ( ) as conn :
return conn . update ( table , values , identifier )
|
def get ( self , position ) :
"""Gets value at index
: param position : index
: return : value at position"""
|
counter = 0
current_node = self . head
while current_node is not None and counter <= position :
if counter == position :
return current_node . val
current_node = current_node . next_node
counter += 1
return None
|
def load_model ( model_name , epoch_num , data_shapes , label_shapes , label_names , gpus = '' ) :
"""Returns a module loaded with the provided model .
Parameters
model _ name : str
Prefix of the MXNet model name as stored on the local directory .
epoch _ num : int
Epoch number of model we would like to load .
input _ shape : tuple
The shape of the input data in the form of ( batch _ size , channels , height , width )
files : list of strings
List of URLs pertaining to files that need to be downloaded in order to use the model .
data _ shapes : list of tuples .
List of tuples where each tuple is a pair of input variable name and its shape .
label _ shapes : list of ( str , tuple )
Typically is ` ` data _ iter . provide _ label ` ` .
label _ names : list of str
Name of the output labels in the MXNet symbolic graph .
gpus : str
Comma separated string of gpu ids on which inferences are executed . E . g . 3,5,6 would refer to GPUs 3 , 5 and 6.
If empty , we use CPU .
Returns
MXNet module"""
|
sym , arg_params , aux_params = mx . model . load_checkpoint ( model_name , epoch_num )
mod = create_module ( sym , data_shapes , label_shapes , label_names , gpus )
mod . set_params ( arg_params = arg_params , aux_params = aux_params , allow_missing = True )
return mod
|
def model_action_q_dist ( self ) :
"""Action values for selected actions in the rollout"""
|
q = self . get ( 'model:q_dist' )
actions = self . get ( 'rollout:actions' )
return q [ range ( q . size ( 0 ) ) , actions ]
|
def set_attribute_string_array ( target , name , string_list ) :
"""Sets an attribute to an array of string on a Dataset or Group .
If the attribute ` name ` doesn ' t exist yet , it is created . If it
already exists , it is overwritten with the list of string
` string _ list ` ( they will be vlen strings ) .
Notes
` ` set _ attributes _ all ` ` is the fastest way to set and delete
Attributes in bulk .
Parameters
target : Dataset or Group
Dataset or Group to set the string array attribute of .
name : str
Name of the attribute to set .
string _ list : list of str
List of strings to set the attribute to . Strings must be ` ` str ` `
See Also
set _ attributes _ all"""
|
s_list = [ convert_to_str ( s ) for s in string_list ]
if sys . hexversion >= 0x03000000 :
target . attrs . create ( name , s_list , dtype = h5py . special_dtype ( vlen = str ) )
else :
target . attrs . create ( name , s_list , dtype = h5py . special_dtype ( vlen = unicode ) )
|
def remove ( self , builder , model ) :
"""Remove the scope from a given query builder .
: param builder : The query builder
: type builder : eloquent . orm . builder . Builder
: param model : The model
: type model : eloquent . orm . Model"""
|
column = model . get_qualified_deleted_at_column ( )
query = builder . get_query ( )
wheres = [ ]
for where in query . wheres : # If the where clause is a soft delete date constraint ,
# we will remove it from the query and reset the keys
# on the wheres . This allows the developer to include
# deleted model in a relationship result set that is lazy loaded .
if not self . _is_soft_delete_constraint ( where , column ) :
wheres . append ( where )
query . wheres = wheres
|
def list_entries ( self , projects = None , filter_ = None , order_by = None , page_size = None , page_token = None , ) :
"""Return a page of log entries .
See
https : / / cloud . google . com / logging / docs / reference / v2 / rest / v2 / entries / list
: type projects : list of strings
: param projects : project IDs to include . If not passed ,
defaults to the project bound to the client .
: type filter _ : str
: param filter _ :
a filter expression . See
https : / / cloud . google . com / logging / docs / view / advanced _ filters
: type order _ by : str
: param order _ by : One of : data : ` ~ google . cloud . logging . ASCENDING `
or : data : ` ~ google . cloud . logging . DESCENDING ` .
: type page _ size : int
: param page _ size :
Optional . The maximum number of entries in each page of results
from this request . Non - positive values are ignored . Defaults
to a sensible value set by the API .
: type page _ token : str
: param page _ token :
Optional . If present , return the next batch of entries , using
the value , which must correspond to the ` ` nextPageToken ` ` value
returned in the previous response . Deprecated : use the ` ` pages ` `
property of the returned iterator instead of manually passing
the token .
: rtype : : class : ` ~ google . api _ core . page _ iterator . Iterator `
: returns : Iterator of log entries accessible to the current logger .
See : class : ` ~ google . cloud . logging . entries . LogEntry ` ."""
|
log_filter = "logName=%s" % ( self . full_name , )
if filter_ is not None :
filter_ = "%s AND %s" % ( filter_ , log_filter )
else :
filter_ = log_filter
return self . client . list_entries ( projects = projects , filter_ = filter_ , order_by = order_by , page_size = page_size , page_token = page_token , )
|
def file_list ( blockchain_id , config_path = CONFIG_PATH , wallet_keys = None ) :
"""List all files uploaded to a particular blockchain ID
Return { ' status ' : True , ' listing ' : list } on success
Return { ' error ' : . . . } on error"""
|
config_dir = os . path . dirname ( config_path )
client_config_path = os . path . join ( config_dir , blockstack_client . CONFIG_FILENAME )
proxy = blockstack_client . get_default_proxy ( config_path = client_config_path )
res = blockstack_client . data_list ( blockchain_id , wallet_keys = wallet_keys , proxy = proxy )
if 'error' in res :
log . error ( "Failed to list data: %s" % res [ 'error' ] )
return { 'error' : 'Failed to list data' }
listing = [ ]
# find the ones that this app put there
for rec in res [ 'listing' ] :
if not file_is_fq_data_name ( rec [ 'data_id' ] ) :
continue
listing . append ( rec )
return { 'status' : True , 'listing' : listing }
|
def set_item_class_name_on_custom_generator_class ( cls ) :
"""Set the attribute ` cls . _ _ tohu _ items _ name _ _ ` to a string which defines the name
of the namedtuple class which will be used to produce items for the custom
generator .
By default this will be the first part of the class name ( before ' . . . Generator ' ) ,
for example :
FoobarGenerator - > Foobar
QuuxGenerator - > Quux
However , it can be set explicitly by the user by defining ` _ _ tohu _ items _ name _ _ `
in the class definition , for example :
class Quux ( CustomGenerator ) :
_ _ tohu _ items _ name _ _ = ' MyQuuxItem '"""
|
if '__tohu__items__name__' in cls . __dict__ :
logger . debug ( f"Using item class name '{cls.__tohu_items_name__}' (derived from attribute '__tohu_items_name__')" )
else :
m = re . match ( '^(.*)Generator$' , cls . __name__ )
if m is not None :
cls . __tohu_items_name__ = m . group ( 1 )
logger . debug ( f"Using item class name '{cls.__tohu_items_name__}' (derived from custom generator name)" )
else :
raise ValueError ( "Cannot derive class name for items to be produced by custom generator. " "Please set '__tohu_items_name__' at the top of the custom generator's " "definition or change its name so that it ends in '...Generator'" )
|
def ValidateTimezone ( timezone , column_name = None , problems = None ) :
"""Validates a non - required timezone string value using IsValidTimezone ( ) :
- if invalid adds InvalidValue error ( if problems accumulator is provided )
- an empty timezone string is regarded as valid ! Otherwise we might end up
with many duplicate errors because of the required field checks ."""
|
if IsEmpty ( timezone ) or IsValidTimezone ( timezone ) :
return True
else :
if problems : # if we get here pytz has already been imported successfully in
# IsValidTimezone ( ) . So a try - except block is not needed here .
import pytz
problems . InvalidValue ( column_name , timezone , '"%s" is not a common timezone name according to pytz version %s' % ( timezone , pytz . VERSION ) )
return False
|
def addTile ( self , value = None , choices = None ) :
"""add a random tile in an empty cell
value : value of the tile to add .
choices : a list of possible choices for the value of the tile . if
` ` None ` ` ( the default ) , it uses
` ` [ 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 2 , 4 ] ` ` ."""
|
if choices is None :
choices = [ 2 ] * 9 + [ 4 ]
if value :
choices = [ value ]
v = random . choice ( choices )
empty = self . getEmptyCells ( )
if empty :
x , y = random . choice ( empty )
self . setCell ( x , y , v )
|
def parse_header ( filename ) :
'''Returns a list of : attr : ` VariableSpec ` , : attr : ` FunctionSpec ` ,
: attr : ` StructSpec ` , : attr : ` EnumSpec ` , : attr : ` EnumMemberSpec ` , and
: attr : ` TypeDef ` instances representing the c header file .'''
|
with open ( filename , 'rb' ) as fh :
content = '\n' . join ( fh . read ( ) . splitlines ( ) )
content = sub ( '\t' , ' ' , content )
content = strip_comments ( content )
# first get the functions
content = split ( func_pat_short , content )
for i , s in enumerate ( content ) :
if i % 2 and content [ i ] . strip ( ) : # matched a prototype
try :
content [ i ] = parse_prototype ( content [ i ] )
except Exception as e :
traceback . print_exc ( )
# now process structs
res = [ ]
for i , item in enumerate ( content ) :
if not isinstance ( item , str ) : # if it ' s already a func etc . skip it
res . append ( item )
continue
items = split ( struct_pat , item )
j = 0
while j < len ( items ) :
if not j % 5 :
res . append ( items [ j ] )
j += 1
else :
if items [ j ] . strip ( ) == 'enum' :
res . append ( parse_enum ( * items [ j + 1 : j + 4 ] ) )
else :
res . append ( parse_struct ( * items [ j + 1 : j + 4 ] ) )
j += 4
# now do remaining simple typedefs
content = res
res = [ ]
for i , item in enumerate ( content ) :
if not isinstance ( item , str ) : # if it ' s already processed skip it
res . append ( item )
continue
items = split ( typedef_pat , item )
for j , item in enumerate ( items ) :
res . append ( TypeDef ( item . strip ( ) ) if j % 2 else item )
content = [ c for c in res if not isinstance ( c , str ) or c . strip ( ) ]
return content
|
def authenticationAndCipheringReject ( ) :
"""AUTHENTICATION AND CIPHERING REJECT Section 9.4.11"""
|
a = TpPd ( pd = 0x3 )
b = MessageType ( mesType = 0x14 )
# 00010100
packet = a / b
return packet
|
def connection ( self ) :
"""identify the remote connection parameters"""
|
self . getPorts ( )
# acquire if necessary
self . getIPaddresses ( )
# acquire if necessary
return ( self . ipAddress , self . ports )
|
def update ( self , f ) :
"""Copy another files properties into this one ."""
|
for p in self . __mapper__ . attrs :
if p . key == 'oid' :
continue
try :
setattr ( self , p . key , getattr ( f , p . key ) )
except AttributeError : # The dict ( ) method copies data property values into the main dict ,
# and these don ' t have associated class properties .
continue
|
def autobuild_documentation ( tile ) :
"""Generate documentation for this module using a combination of sphinx and breathe"""
|
docdir = os . path . join ( '#doc' )
docfile = os . path . join ( docdir , 'conf.py' )
outdir = os . path . join ( 'build' , 'output' , 'doc' , tile . unique_id )
outfile = os . path . join ( outdir , '%s.timestamp' % tile . unique_id )
env = Environment ( ENV = os . environ , tools = [ ] )
# Only build doxygen documentation if we have C firmware to build from
if os . path . exists ( 'firmware' ) :
autobuild_doxygen ( tile )
env . Depends ( outfile , 'doxygen' )
# There is no / dev / null on Windows
# Also disable color output on Windows since it seems to leave powershell
# in a weird state .
if platform . system ( ) == 'Windows' :
action = 'sphinx-build --no-color -b html %s %s > NUL' % ( docdir [ 1 : ] , outdir )
else :
action = 'sphinx-build -b html %s %s > /dev/null' % ( docdir [ 1 : ] , outdir )
env . Command ( outfile , docfile , action = env . Action ( action , "Building Component Documentation" ) )
Alias ( 'documentation' , outdir )
env . Clean ( outfile , outdir )
|
def staged_rewards ( self ) :
"""Helper function to return staged rewards based on current physical states .
Returns :
r _ reach ( float ) : reward for reaching and grasping
r _ lift ( float ) : reward for lifting and aligning
r _ stack ( float ) : reward for stacking"""
|
# reaching is successful when the gripper site is close to
# the center of the cube
cubeA_pos = self . sim . data . body_xpos [ self . cubeA_body_id ]
cubeB_pos = self . sim . data . body_xpos [ self . cubeB_body_id ]
gripper_site_pos = self . sim . data . site_xpos [ self . eef_site_id ]
dist = np . linalg . norm ( gripper_site_pos - cubeA_pos )
r_reach = ( 1 - np . tanh ( 10.0 * dist ) ) * 0.25
# collision checking
touch_left_finger = False
touch_right_finger = False
touch_cubeA_cubeB = False
for i in range ( self . sim . data . ncon ) :
c = self . sim . data . contact [ i ]
if c . geom1 in self . l_finger_geom_ids and c . geom2 == self . cubeA_geom_id :
touch_left_finger = True
if c . geom1 == self . cubeA_geom_id and c . geom2 in self . l_finger_geom_ids :
touch_left_finger = True
if c . geom1 in self . r_finger_geom_ids and c . geom2 == self . cubeA_geom_id :
touch_right_finger = True
if c . geom1 == self . cubeA_geom_id and c . geom2 in self . r_finger_geom_ids :
touch_right_finger = True
if c . geom1 == self . cubeA_geom_id and c . geom2 == self . cubeB_geom_id :
touch_cubeA_cubeB = True
if c . geom1 == self . cubeB_geom_id and c . geom2 == self . cubeA_geom_id :
touch_cubeA_cubeB = True
# additional grasping reward
if touch_left_finger and touch_right_finger :
r_reach += 0.25
# lifting is successful when the cube is above the table top
# by a margin
cubeA_height = cubeA_pos [ 2 ]
table_height = self . table_full_size [ 2 ]
cubeA_lifted = cubeA_height > table_height + 0.04
r_lift = 1.0 if cubeA_lifted else 0.0
# Aligning is successful when cubeA is right above cubeB
if cubeA_lifted :
horiz_dist = np . linalg . norm ( np . array ( cubeA_pos [ : 2 ] ) - np . array ( cubeB_pos [ : 2 ] ) )
r_lift += 0.5 * ( 1 - np . tanh ( horiz_dist ) )
# stacking is successful when the block is lifted and
# the gripper is not holding the object
r_stack = 0
not_touching = not touch_left_finger and not touch_right_finger
if not_touching and r_lift > 0 and touch_cubeA_cubeB :
r_stack = 2.0
return ( r_reach , r_lift , r_stack )
|
def export_users ( path_prefix = '/' , region = None , key = None , keyid = None , profile = None ) :
'''Get all IAM user details . Produces results that can be used to create an
sls file .
. . versionadded : : 2016.3.0
CLI Example :
salt - call boto _ iam . export _ users - - out = txt | sed " s / local : / / " > iam _ users . sls'''
|
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if not conn :
return None
results = odict . OrderedDict ( )
users = get_all_users ( path_prefix , region , key , keyid , profile )
for user in users :
name = user . user_name
_policies = conn . get_all_user_policies ( name , max_items = 100 )
_policies = _policies . list_user_policies_response . list_user_policies_result . policy_names
policies = { }
for policy_name in _policies :
_policy = conn . get_user_policy ( name , policy_name )
_policy = salt . utils . json . loads ( _unquote ( _policy . get_user_policy_response . get_user_policy_result . policy_document ) )
policies [ policy_name ] = _policy
user_sls = [ ]
user_sls . append ( { "name" : name } )
user_sls . append ( { "policies" : policies } )
user_sls . append ( { "path" : user . path } )
results [ "manage user " + name ] = { "boto_iam.user_present" : user_sls }
return __utils__ [ 'yaml.safe_dump' ] ( results , default_flow_style = False , indent = 2 )
|
def get_settings ( profile , section , store = 'local' ) :
'''Get the firewall property from the specified profile in the specified store
as returned by ` ` netsh advfirewall ` ` .
. . versionadded : : 2018.3.4
. . versionadded : : 2019.2.0
Args :
profile ( str ) :
The firewall profile to query . Valid options are :
- domain
- public
- private
section ( str ) :
The property to query within the selected profile . Valid options
are :
- firewallpolicy : inbound / outbound behavior
- logging : firewall logging settings
- settings : firewall properties
- state : firewalls state ( on | off )
store ( str ) :
The store to use . This is either the local firewall policy or the
policy defined by local group policy . Valid options are :
- lgpo
- local
Default is ` ` local ` `
Returns :
dict : A dictionary containing the properties for the specified profile
Raises :
CommandExecutionError : If an error occurs
ValueError : If the parameters are incorrect
CLI Example :
. . code - block : : bash
# Get the inbound / outbound firewall settings for connections on the
# local domain profile
salt * win _ firewall . get _ settings domain firewallpolicy
# Get the inbound / outbound firewall settings for connections on the
# domain profile as defined by local group policy
salt * win _ firewall . get _ settings domain firewallpolicy lgpo'''
|
return salt . utils . win_lgpo_netsh . get_settings ( profile = profile , section = section , store = store )
|
def add_link ( self ) :
"Create a new internal link"
|
n = len ( self . links ) + 1
self . links [ n ] = ( 0 , 0 )
return n
|
def search_titles ( self , title ) :
"""Search for titles matching the ` title ` .
: param str title : the title to search for .
: return : found titles .
: rtype : dict"""
|
# make the query
logger . info ( 'Searching title %r' , title )
r = self . session . get ( self . server_url + 'legenda/sugestao/{}' . format ( title ) , timeout = 10 )
r . raise_for_status ( )
results = json . loads ( r . text )
# loop over results
titles = { }
for result in results :
source = result [ '_source' ]
# extract id
title_id = int ( source [ 'id_filme' ] )
# extract type and title
title = { 'type' : type_map [ source [ 'tipo' ] ] , 'title' : source [ 'dsc_nome' ] }
# extract year
if source [ 'dsc_data_lancamento' ] and source [ 'dsc_data_lancamento' ] . isdigit ( ) :
title [ 'year' ] = int ( source [ 'dsc_data_lancamento' ] )
# extract imdb _ id
if source [ 'id_imdb' ] != '0' :
if not source [ 'id_imdb' ] . startswith ( 'tt' ) :
title [ 'imdb_id' ] = 'tt' + source [ 'id_imdb' ] . zfill ( 7 )
else :
title [ 'imdb_id' ] = source [ 'id_imdb' ]
# extract season
if title [ 'type' ] == 'episode' :
if source [ 'temporada' ] and source [ 'temporada' ] . isdigit ( ) :
title [ 'season' ] = int ( source [ 'temporada' ] )
else :
match = season_re . search ( source [ 'dsc_nome_br' ] )
if match :
title [ 'season' ] = int ( match . group ( 'season' ) )
else :
logger . warning ( 'No season detected for title %d' , title_id )
# add title
titles [ title_id ] = title
logger . debug ( 'Found %d titles' , len ( titles ) )
return titles
|
def _container_candidates ( self ) :
"""Generate container candidate list
Returns :
tuple list : [ ( width1 , height1 ) , ( width2 , height2 ) , . . . ]"""
|
if not self . _rectangles :
return [ ]
if self . _rotation :
sides = sorted ( side for rect in self . _rectangles for side in rect )
max_height = sum ( max ( r [ 0 ] , r [ 1 ] ) for r in self . _rectangles )
min_width = max ( min ( r [ 0 ] , r [ 1 ] ) for r in self . _rectangles )
max_width = max_height
else :
sides = sorted ( r [ 0 ] for r in self . _rectangles )
max_height = sum ( r [ 1 ] for r in self . _rectangles )
min_width = max ( r [ 0 ] for r in self . _rectangles )
max_width = sum ( sides )
if self . _max_width and self . _max_width < max_width :
max_width = self . _max_width
if self . _max_height and self . _max_height < max_height :
max_height = self . _max_height
assert ( max_width > min_width )
# Generate initial container widths
candidates = [ max_width , min_width ]
width = 0
for s in reversed ( sides ) :
width += s
candidates . append ( width )
width = 0
for s in sides :
width += s
candidates . append ( width )
candidates . append ( max_width )
candidates . append ( min_width )
# Remove duplicates and widths too big or small
seen = set ( )
seen_add = seen . add
candidates = [ x for x in candidates if not ( x in seen or seen_add ( x ) ) ]
candidates = [ x for x in candidates if not ( x > max_width or x < min_width ) ]
# Remove candidates too small to fit all the rectangles
min_area = sum ( r [ 0 ] * r [ 1 ] for r in self . _rectangles )
return [ ( c , max_height ) for c in candidates if c * max_height >= min_area ]
|
def make_ring_dicts ( ** kwargs ) :
"""Build and return the information about the Galprop rings"""
|
library_yamlfile = kwargs . get ( 'library' , 'models/library.yaml' )
gmm = kwargs . get ( 'GalpropMapManager' , GalpropMapManager ( ** kwargs ) )
if library_yamlfile is None or library_yamlfile == 'None' :
return gmm
diffuse_comps = DiffuseModelManager . read_diffuse_component_yaml ( library_yamlfile )
for diffuse_value in diffuse_comps . values ( ) :
if diffuse_value is None :
continue
if diffuse_value [ 'model_type' ] != 'galprop_rings' :
continue
versions = diffuse_value [ 'versions' ]
for version in versions :
gmm . make_ring_dict ( version )
return gmm
|
def add_task ( self , task ) :
"""Schedule a task to run later , after the loop has started .
Different from asyncio . ensure _ future in that it does not
also return a future , and the actual ensure _ future call
is delayed until before server start .
: param task : future , couroutine or awaitable"""
|
try :
if callable ( task ) :
try :
self . loop . create_task ( task ( self ) )
except TypeError :
self . loop . create_task ( task ( ) )
else :
self . loop . create_task ( task )
except SanicException :
@ self . listener ( "before_server_start" )
def run ( app , loop ) :
if callable ( task ) :
try :
loop . create_task ( task ( self ) )
except TypeError :
loop . create_task ( task ( ) )
else :
loop . create_task ( task )
|
def teletex_search_function ( name ) :
"""Search function for teletex codec that is passed to codecs . register ( )"""
|
if name != 'teletex' :
return None
return codecs . CodecInfo ( name = 'teletex' , encode = TeletexCodec ( ) . encode , decode = TeletexCodec ( ) . decode , incrementalencoder = TeletexIncrementalEncoder , incrementaldecoder = TeletexIncrementalDecoder , streamreader = TeletexStreamReader , streamwriter = TeletexStreamWriter , )
|
def dagger ( self , inv_dict = None , suffix = "-INV" ) :
"""Creates the conjugate transpose of the Quil program . The program must not
contain any irreversible actions ( measurement , control flow , qubit allocation ) .
: return : The Quil program ' s inverse
: rtype : Program"""
|
if not self . is_protoquil ( ) :
raise ValueError ( "Program must be valid Protoquil" )
daggered = Program ( )
for gate in self . _defined_gates :
if inv_dict is None or gate . name not in inv_dict :
if gate . parameters :
raise TypeError ( "Cannot auto define daggered version of parameterized gates" )
daggered . defgate ( gate . name + suffix , gate . matrix . T . conj ( ) )
for gate in reversed ( self . _instructions ) :
if gate . name in QUANTUM_GATES :
if gate . name == "S" :
daggered . inst ( QUANTUM_GATES [ "PHASE" ] ( - pi / 2 , * gate . qubits ) )
elif gate . name == "T" :
daggered . inst ( QUANTUM_GATES [ "RZ" ] ( pi / 4 , * gate . qubits ) )
elif gate . name == "ISWAP" :
daggered . inst ( QUANTUM_GATES [ "PSWAP" ] ( pi / 2 , * gate . qubits ) )
else :
negated_params = list ( map ( lambda x : - 1 * x , gate . params ) )
daggered . inst ( QUANTUM_GATES [ gate . name ] ( * ( negated_params + gate . qubits ) ) )
else :
if inv_dict is None or gate . name not in inv_dict :
gate_inv_name = gate . name + suffix
else :
gate_inv_name = inv_dict [ gate . name ]
daggered . inst ( Gate ( gate_inv_name , gate . params , gate . qubits ) )
return daggered
|
def tokenize_argument ( text ) :
"""Process both optional and required arguments .
: param Buffer text : iterator over line , with current position"""
|
for delim in ARG_TOKENS :
if text . startswith ( delim ) :
return text . forward ( len ( delim ) )
|
def service_per_endpoint ( self , context = None ) :
"""List all endpoint this entity publishes and which service and binding
that are behind the endpoint
: param context : Type of entity
: return : Dictionary with endpoint url as key and a tuple of
service and binding as value"""
|
endps = self . getattr ( "endpoints" , context )
res = { }
for service , specs in endps . items ( ) :
for endp , binding in specs :
res [ endp ] = ( service , binding )
return res
|
def get ( cls , dbname = "perfdump" ) :
"""Returns the singleton connection to the SQLite3 database .
: param dbname : The database name
: type dbname : str"""
|
try :
return cls . connection
except :
cls . connect ( dbname )
return cls . connection
|
def set_size ( self , name : str , size : int ) :
"""Set the size of a resource in the RSTB ."""
|
if self . _needs_to_be_in_name_map ( name ) :
if len ( name ) >= 128 :
raise ValueError ( "Name is too long" )
self . name_map [ name ] = size
else :
crc32 = binascii . crc32 ( name . encode ( ) )
self . crc32_map [ crc32 ] = size
|
def getMoviesFromJSON ( jsonURL ) :
"""Main function for this library
Returns list of Movie classes from apple . com / trailers json URL
such as : http : / / trailers . apple . com / trailers / home / feeds / just _ added . json
The Movie classes use lazy loading mechanisms so that data not
directly available from JSON are loaded on demand . Currently these
lazy loaded parts are :
* poster
* trailerLinks
* description
Be warned that accessing these fields can take long time due to
network access . Therefore do the loading in thread separate from
UI thread or your users will notice .
There are optional fields that may or may not be present in every
Movie instance . These include :
* actors ( list )
* directors ( list )
* rating ( string )
* genre ( string )
* studio ( string )
* releasedate ( sring )
Please take care when trying to access these fields as they may
not exist ."""
|
response = urllib . request . urlopen ( jsonURL )
jsonData = response . read ( ) . decode ( 'utf-8' )
objects = json . loads ( jsonData )
# make it work for search urls
if jsonURL . find ( 'quickfind' ) != - 1 :
objects = objects [ 'results' ]
optionalInfo = [ 'actors' , 'directors' , 'rating' , 'genre' , 'studio' , 'releasedate' ]
movies = [ ]
for obj in objects :
movie = Movie ( )
movie . title = obj [ 'title' ]
movie . baseURL = obj [ 'location' ]
movie . posterURL = obj [ 'poster' ]
# sometimes posters don ' t have http part
if movie . posterURL . find ( 'http:' ) == - 1 :
movie . posterURL = "http://apple.com%s" % movie . posterURL
movie . trailers = obj [ 'trailers' ]
for i in optionalInfo :
if i in obj :
setattr ( movie , i , obj [ i ] )
movies . append ( movie )
return movies
|
def sanitize_type ( raw_type ) :
"""Sanitize the raw type string ."""
|
cleaned = get_printable ( raw_type ) . strip ( )
for bad in [ r'__drv_aliasesMem' , r'__drv_freesMem' , r'__drv_strictTypeMatch\(\w+\)' , r'__out_data_source\(\w+\)' , r'_In_NLS_string_\(\w+\)' , r'_Frees_ptr_' , r'_Frees_ptr_opt_' , r'opt_' , r'\(Mem\) ' ] :
cleaned = re . sub ( bad , '' , cleaned ) . strip ( )
if cleaned in [ '_EXCEPTION_RECORD *' , '_EXCEPTION_POINTERS *' ] :
cleaned = cleaned . strip ( '_' )
cleaned = cleaned . replace ( '[]' , '*' )
return cleaned
|
def accuracy ( y , z ) :
"""Classification accuracy ` ( tp + tn ) / ( tp + tn + fp + fn ) `"""
|
tp , tn , fp , fn = contingency_table ( y , z )
return ( tp + tn ) / ( tp + tn + fp + fn )
|
def get_unread_topics ( self , topics , user ) :
"""Returns a list of unread topics for the given user from a given set of topics ."""
|
unread_topics = [ ]
# A user which is not authenticated will never see a topic as unread .
# If there are no topics to consider , we stop here .
if not user . is_authenticated or topics is None or not len ( topics ) :
return unread_topics
# A topic can be unread if a track for itself exists with a mark time that
# is less important than its update date .
topic_ids = [ topic . id for topic in topics ]
topic_tracks = TopicReadTrack . objects . filter ( topic__in = topic_ids , user = user )
tracked_topics = dict ( topic_tracks . values_list ( 'topic__pk' , 'mark_time' ) )
if tracked_topics :
for topic in topics :
topic_last_modification_date = topic . last_post_on or topic . created
if ( topic . id in tracked_topics . keys ( ) and topic_last_modification_date > tracked_topics [ topic . id ] ) :
unread_topics . append ( topic )
# A topic can be unread if a track for its associated forum exists with
# a mark time that is less important than its creation or update date .
forum_ids = [ topic . forum_id for topic in topics ]
forum_tracks = ForumReadTrack . objects . filter ( forum_id__in = forum_ids , user = user )
tracked_forums = dict ( forum_tracks . values_list ( 'forum__pk' , 'mark_time' ) )
if tracked_forums :
for topic in topics :
topic_last_modification_date = topic . last_post_on or topic . created
if ( ( topic . forum_id in tracked_forums . keys ( ) and topic . id not in tracked_topics ) and topic_last_modification_date > tracked_forums [ topic . forum_id ] ) :
unread_topics . append ( topic )
# A topic can be unread if no tracks exists for it
for topic in topics :
if topic . forum_id not in tracked_forums and topic . id not in tracked_topics :
unread_topics . append ( topic )
return list ( set ( unread_topics ) )
|
def _load_generic ( packname , package , section , target ) :
"""Loads the settings for generic options that take FQDN and a boolean value
(1 or 0 ) .
Args :
packname ( str ) : name of the package to get config settings for .
package : actual package object ."""
|
from acorn . config import settings
spack = settings ( packname )
if spack . has_section ( section ) :
secitems = dict ( spack . items ( section ) )
for fqdn , active in secitems . items ( ) :
target [ fqdn ] = active == "1"
|
def remember ( self , key , minutes , callback ) :
"""Get an item from the cache , or store the default value .
: param key : The cache key
: type key : str
: param minutes : The lifetime in minutes of the cached value
: type minutes : int or datetime
: param callback : The default function
: type callback : mixed
: rtype : mixed"""
|
# If the item exists in the cache we will just return this immediately
# otherwise we will execute the given callback and cache the result
# of that execution for the given number of minutes in storage .
val = self . get ( key )
if val is not None :
return val
val = value ( callback )
self . put ( key , val , minutes )
return val
|
def fit_freq_std_dev ( self , training_signal ) :
"""Defines a spectral mask based on training data using the standard deviation values of
each frequency component
Args :
training _ signal : Training data"""
|
window_length = len ( self . window )
window_weight = sum ( self . window )
num_of_windows = len ( training_signal ) - window_length - 1
mean = np . zeros ( int ( window_length / 2 ) + 1 )
pow = np . zeros ( int ( window_length / 2 ) + 1 )
temp = np . zeros ( int ( window_length / 2 ) + 1 )
rfft = np . fft . rfft ( training_signal [ 0 : 0 + window_length ] * self . window )
max = np . abs ( rfft ) / window_weight
min = max
for i in range ( 0 , num_of_windows ) :
rfft = np . fft . rfft ( training_signal [ i : i + window_length ] * self . window )
temp = np . abs ( rfft ) / window_weight
max = np . maximum ( temp , max )
min = np . minimum ( temp , min )
mean = mean + temp
pow = pow + np . power ( temp , 2 )
mean = mean / num_of_windows
pow = pow / num_of_windows
std_dev = np . sqrt ( pow - np . power ( mean , 2 ) )
self . mask_top = mean + self . gain * std_dev
self . mask_bottom = np . maximum ( mean - self . gain * std_dev , np . zeros ( int ( window_length / 2 ) + 1 ) )
|
def get_template_name ( self , template_name ) :
"""Returns template path , either from a shortcut built - in template
name ( ` form _ as _ p ` ) or a template path ( ` my _ sitegate / my _ tpl . html ` ) .
Note that template path can include one ` % s ` placeholder . In that case
it will be replaced with flow type ( ` signin ` or ` signup ` ) ."""
|
if template_name is None : # Build default template path .
template_name = self . default_form_template
if '.html' not in template_name : # Shortcut , e . g . : .
template_name = '%s%s.html' % ( 'sitegate/%s/' , template_name )
if '%s' in template_name : # Fill in the flow type placeholder .
template_name = template_name % self . flow_type
return template_name
|
def _get_query ( self , query , params ) :
"""Submit a GET request to the VT API
: param query : The query ( see https : / / www . virustotal . com / en / documentation / private - api / for types of queries )
: param params : parameters of the query
: return : JSON formatted response from the API"""
|
if "apikey" not in params :
params [ "apikey" ] = self . api_key
response = requests . get ( query , params = params )
return response . json ( )
|
def blogurl ( parser , token ) :
"""Compatibility tag to allow django - fluent - blogs to operate stand - alone .
Either the app can be hooked in the URLconf directly , or it can be added as a pagetype of django - fluent - pages .
For the former , URL resolving works via the normal ' { % url " viewname " arg1 arg2 % } ' syntax .
For the latter , the URL resolving works via ' { % appurl " viewname " arg1 arg2 % } ' syntax ."""
|
if HAS_APP_URLS :
from fluent_pages . templatetags . appurl_tags import appurl
return appurl ( parser , token )
else :
from django . template . defaulttags import url
return url ( parser , token )
|
def get_label_cls ( self , labels , label_cls : Callable = None , label_delim : str = None , ** kwargs ) :
"Return ` label _ cls ` or guess one from the first element of ` labels ` ."
|
if label_cls is not None :
return label_cls
if self . label_cls is not None :
return self . label_cls
if label_delim is not None :
return MultiCategoryList
it = index_row ( labels , 0 )
if isinstance ( it , ( float , np . float32 ) ) :
return FloatList
if isinstance ( try_int ( it ) , ( str , Integral ) ) :
return CategoryList
if isinstance ( it , Collection ) :
return MultiCategoryList
return ItemList
|
def merge_vertices ( self , digits = None ) :
"""Merges vertices which are identical and replace references .
Parameters
digits : None , or int
How many digits to consider when merging vertices
Alters
self . entities : entity . points re - referenced
self . vertices : duplicates removed"""
|
if len ( self . vertices ) == 0 :
return
if digits is None :
digits = util . decimal_to_digits ( tol . merge * self . scale , min_digits = 1 )
unique , inverse = grouping . unique_rows ( self . vertices , digits = digits )
self . vertices = self . vertices [ unique ]
entities_ok = np . ones ( len ( self . entities ) , dtype = np . bool )
for index , entity in enumerate ( self . entities ) : # what kind of entity are we dealing with
kind = type ( entity ) . __name__
# entities that don ' t need runs merged
# don ' t screw up control - point - knot relationship
if kind in 'BSpline Bezier Text' :
entity . points = inverse [ entity . points ]
continue
# if we merged duplicate vertices , the entity may
# have multiple references to the same vertex
points = grouping . merge_runs ( inverse [ entity . points ] )
# if there are three points and two are identical fix it
if kind == 'Line' :
if len ( points ) == 3 and points [ 0 ] == points [ - 1 ] :
points = points [ : 2 ]
elif len ( points ) < 2 : # lines need two or more vertices
entities_ok [ index ] = False
elif kind == 'Arc' and len ( points ) != 3 : # three point arcs need three points
entities_ok [ index ] = False
# store points in entity
entity . points = points
# remove degenerate entities
self . entities = self . entities [ entities_ok ]
|
def get_any_nt_unit_rule ( g ) :
"""Returns a non - terminal unit rule from ' g ' , or None if there is none ."""
|
for rule in g . rules :
if len ( rule . rhs ) == 1 and isinstance ( rule . rhs [ 0 ] , NT ) :
return rule
return None
|
def _construct_nx_tree ( self , thisTree , thatTree = None ) :
"""A function for creating networkx instances that can be used
more efficiently for graph manipulation than the MergeTree
class .
@ In , thisTree , a MergeTree instance for which we will
construct a networkx graph
@ In , thatTree , a MergeTree instance optionally used to
speed up the processing by bypassing the fully augmented
search and only focusing on the partially augmented
split and join trees
@ Out , nxTree , a networkx . Graph instance matching the
details of the input tree ."""
|
if self . debug :
sys . stdout . write ( "Networkx Tree construction: " )
start = time . clock ( )
nxTree = nx . DiGraph ( )
nxTree . add_edges_from ( thisTree . edges )
nodesOfThatTree = [ ]
if thatTree is not None :
nodesOfThatTree = thatTree . nodes . keys ( )
# Fully or partially augment the join tree
for ( superNode , _ ) , nodes in thisTree . augmentedEdges . items ( ) :
superNodeEdge = list ( nxTree . out_edges ( superNode ) )
if len ( superNodeEdge ) > 1 :
warnings . warn ( "The supernode {} should have only a single " "emanating edge. Merge tree is invalidly " "structured" . format ( superNode ) )
endNode = superNodeEdge [ 0 ] [ 1 ]
startNode = superNode
nxTree . remove_edge ( startNode , endNode )
for node in nodes :
if thatTree is None or node in nodesOfThatTree :
nxTree . add_edge ( startNode , node )
startNode = node
# Make sure this is not the root node trying to connect to
# itself
if startNode != endNode :
nxTree . add_edge ( startNode , endNode )
if self . debug :
end = time . clock ( )
sys . stdout . write ( "%f s\n" % ( end - start ) )
return nxTree
|
def check_thresholds ( self , service = None , use_ta = True ) :
"""Check all limits and current usage against their specified thresholds ;
return all : py : class : ` ~ . AwsLimit ` instances that have crossed
one or more of their thresholds .
If ` ` service ` ` is specified , the returned dict has one element ,
the service name , whose value is a nested dict as described below ;
otherwise it includes all known services .
The returned : py : class : ` ~ . AwsLimit ` objects can be interrogated
for their limits ( : py : meth : ` ~ . AwsLimit . get _ limit ` ) as well as
the details of usage that crossed the thresholds
( : py : meth : ` ~ . AwsLimit . get _ warnings ` and
: py : meth : ` ~ . AwsLimit . get _ criticals ` ) .
See : py : meth : ` . AwsLimit . check _ thresholds ` .
: param service : the name ( s ) of one or more service ( s ) to return
results for
: type service : list
: param use _ ta : check Trusted Advisor for information on limits
: type use _ ta : bool
: returns : dict of service name ( string ) to nested dict
of limit name ( string ) to limit ( : py : class : ` ~ . AwsLimit ` )
: rtype : dict"""
|
res = { }
to_get = self . services
if service is not None :
to_get = dict ( ( each , self . services [ each ] ) for each in service )
if use_ta :
self . ta . update_limits ( )
for sname , cls in to_get . items ( ) :
if hasattr ( cls , '_update_limits_from_api' ) :
cls . _update_limits_from_api ( )
tmp = cls . check_thresholds ( )
if len ( tmp ) > 0 :
res [ sname ] = tmp
return res
|
def _get_files_config ( src_dir , files_list ) :
"""Construct ` FileConfig ` object and return a list .
: param src _ dir : A string containing the source directory .
: param files _ list : A list of dicts containing the src / dst mapping of files
to overlay .
: return : list"""
|
FilesConfig = collections . namedtuple ( 'FilesConfig' , [ 'src' , 'dst' , 'post_commands' ] )
return [ FilesConfig ( ** d ) for d in _get_files_generator ( src_dir , files_list ) ]
|
def index ( config , date = None , directory = None , concurrency = 5 , accounts = None , tag = None , verbose = False ) :
"""index traildbs directly from s3 for multiple accounts .
context : assumes a daily traildb file in s3 with dated key path"""
|
logging . basicConfig ( level = ( verbose and logging . DEBUG or logging . INFO ) )
logging . getLogger ( 'botocore' ) . setLevel ( logging . WARNING )
logging . getLogger ( 'elasticsearch' ) . setLevel ( logging . WARNING )
logging . getLogger ( 'urllib3' ) . setLevel ( logging . WARNING )
logging . getLogger ( 'requests' ) . setLevel ( logging . WARNING )
logging . getLogger ( 'c7n.worker' ) . setLevel ( logging . INFO )
with open ( config ) as fh :
config = yaml . safe_load ( fh . read ( ) )
jsonschema . validate ( config , CONFIG_SCHEMA )
date = get_date_path ( date , delta = 24 )
directory = directory or "/tmp"
with ProcessPoolExecutor ( max_workers = concurrency ) as w :
futures = { }
jobs = [ ]
for account in config . get ( 'accounts' ) :
if accounts and account [ 'name' ] not in accounts :
continue
if tag :
found = False
for t in account [ 'tags' ] . values ( ) :
if tag == t :
found = True
break
if not found :
continue
for region in account . get ( 'regions' ) :
p = ( config , account , region , date , directory )
jobs . append ( p )
for j in jobs :
log . debug ( "submit account:{} region:{} date:{}" . format ( j [ 1 ] [ 'name' ] , j [ 2 ] , j [ 3 ] ) )
futures [ w . submit ( index_account_trails , * j ) ] = j
# Process completed
for f in as_completed ( futures ) :
config , account , region , date , directory = futures [ f ]
if f . exception ( ) :
log . warning ( "error account:{} region:{} error:{}" . format ( account [ 'name' ] , region , f . exception ( ) ) )
continue
log . info ( "complete account:{} region:{}" . format ( account [ 'name' ] , region ) )
|
def bus_line_names ( self ) :
"""Append bus injection and line flow names to ` varname `"""
|
if self . system . tds . config . compute_flows :
self . system . Bus . _varname_inj ( )
self . system . Line . _varname_flow ( )
self . system . Area . _varname_inter ( )
|
def _execute_sql_query ( self ) :
"""* execute sql query using the sdss API *
* * Key Arguments : * *
* * Return : * *
- None
. . todo : :"""
|
self . log . info ( 'starting the ``_execute_sql_query`` method' )
# generate the api call url
params = urllib . urlencode ( { 'cmd' : self . sqlQuery , 'format' : "json" } )
# grab the results
results = urllib . urlopen ( self . sdssUrl + '?%s' % params )
# report any errors
ofp = sys . stdout
results = results . read ( )
if results . startswith ( "ERROR" ) : # SQL Statement Error - > stderr
ofp = sys . stderr
ofp . write ( string . rstrip ( line ) + os . linesep )
# clean up the json response so it can be parsed
results = results . replace ( ": ," , ': "NULL",' )
regex = re . compile ( r'"photoz_err"\:\s*(\n\s*})' )
newString = regex . sub ( '"photoz_err": "NULL"\g<1>' , results )
results = newString
# parse the json results
results = json . loads ( results ) [ 0 ]
self . results = results [ "Rows" ]
self . log . info ( 'completed the ``_execute_sql_query`` method' )
return
|
def format_values ( self ) :
"""Returns a string with all args and settings and where they came from
( eg . commandline , config file , enviroment variable or default )"""
|
source_key_to_display_value_map = { _COMMAND_LINE_SOURCE_KEY : "Command Line Args: " , _ENV_VAR_SOURCE_KEY : "Environment Variables:\n" , _CONFIG_FILE_SOURCE_KEY : "Config File (%s):\n" , _DEFAULTS_SOURCE_KEY : "Defaults:\n" }
r = StringIO ( )
for source , settings in self . _source_to_settings . items ( ) :
source = source . split ( "|" )
source = source_key_to_display_value_map [ source [ 0 ] ] % tuple ( source [ 1 : ] )
r . write ( source )
for key , ( action , value ) in settings . items ( ) :
if key :
r . write ( " %-19s%s\n" % ( key + ":" , value ) )
else :
if isinstance ( value , str ) :
r . write ( " %s\n" % value )
elif isinstance ( value , list ) :
r . write ( " %s\n" % ' ' . join ( value ) )
return r . getvalue ( )
|
def preferences ( self , section = None ) :
"""Return a list of all registered preferences
or a list of preferences registered for a given section
: param section : The section name under which the preference is registered
: type section : str .
: return : a list of : py : class : ` prefs . BasePreference ` instances"""
|
if section is None :
return [ self [ section ] [ name ] for section in self for name in self [ section ] ]
else :
return [ self [ section ] [ name ] for name in self [ section ] ]
|
def trade_history ( self , from_ = None , count = None , from_id = None , end_id = None , order = None , since = None , end = None , pair = None ) :
"""Returns trade history .
To use this method you need a privilege of the info key .
: param int or None from _ : trade ID , from which the display starts ( default 0)
: param int or None count : the number of trades for display ( default 1000)
: param int or None from _ id : trade ID , from which the display starts ( default 0)
: param int or None end _ id : trade ID on which the display ends ( default inf . )
: param str or None order : sorting ( default ' DESC ' )
: param int or None since : the time to start the display ( default 0)
: param int or None end : the time to end the display ( default inf . )
: param str or None pair : pair to be displayed ( ex . ' btc _ usd ' )"""
|
return self . _trade_api_call ( 'TradeHistory' , from_ = from_ , count = count , from_id = from_id , end_id = end_id , order = order , since = since , end = end , pair = pair )
|
def get_all_domains ( self , max_domains = None , next_token = None ) :
"""Returns a : py : class : ` boto . resultset . ResultSet ` containing
all : py : class : ` boto . sdb . domain . Domain ` objects associated with
this connection ' s Access Key ID .
: keyword int max _ domains : Limit the returned
: py : class : ` ResultSet < boto . resultset . ResultSet > ` to the specified
number of members .
: keyword str next _ token : A token string that was returned in an
earlier call to this method as the ` ` next _ token ` ` attribute
on the returned : py : class : ` ResultSet < boto . resultset . ResultSet > `
object . This attribute is set if there are more than Domains than
the value specified in the ` ` max _ domains ` ` keyword . Pass the
` ` next _ token ` ` value from you earlier query in this keyword to
get the next ' page ' of domains ."""
|
params = { }
if max_domains :
params [ 'MaxNumberOfDomains' ] = max_domains
if next_token :
params [ 'NextToken' ] = next_token
return self . get_list ( 'ListDomains' , params , [ ( 'DomainName' , Domain ) ] )
|
def _build ( self , memory , query , memory_mask = None ) :
"""Perform a differentiable read .
Args :
memory : [ batch _ size , memory _ size , memory _ word _ size ] - shaped Tensor of
dtype float32 . This represents , for each example and memory slot , a
single embedding to attend over .
query : [ batch _ size , query _ word _ size ] - shaped Tensor of dtype float32.
Represents , for each example , a single embedding representing a query .
memory _ mask : None or [ batch _ size , memory _ size ] - shaped Tensor of dtype
bool . An entry of False indicates that a memory slot should not enter
the resulting weighted sum . If None , all memory is used .
Returns :
An AttentionOutput instance containing :
read : [ batch _ size , memory _ word _ size ] - shaped Tensor of dtype float32.
This represents , for each example , a weighted sum of the contents of
the memory .
weights : [ batch _ size , memory _ size ] - shaped Tensor of dtype float32 . This
represents , for each example and memory slot , the attention weights
used to compute the read .
weight _ logits : [ batch _ size , memory _ size ] - shaped Tensor of dtype float32.
This represents , for each example and memory slot , the logits of the
attention weights , that is , ` weights ` is calculated by taking the
softmax of the weight logits .
Raises :
UnderspecifiedError : if memory _ word _ size or query _ word _ size can not be
inferred .
IncompatibleShapeError : if memory , query , memory _ mask , or output of
attention _ logit _ mod do not match expected shapes ."""
|
if len ( memory . get_shape ( ) ) != 3 :
raise base . IncompatibleShapeError ( "memory must have shape [batch_size, memory_size, memory_word_size]." )
if len ( query . get_shape ( ) ) != 2 :
raise base . IncompatibleShapeError ( "query must have shape [batch_size, query_word_size]." )
if memory_mask is not None and len ( memory_mask . get_shape ( ) ) != 2 :
raise base . IncompatibleShapeError ( "memory_mask must have shape [batch_size, memory_size]." )
# Ensure final dimensions are defined , else the attention logit module will
# be unable to infer input size when constructing variables .
inferred_memory_word_size = memory . get_shape ( ) [ 2 ] . value
inferred_query_word_size = query . get_shape ( ) [ 1 ] . value
if inferred_memory_word_size is None or inferred_query_word_size is None :
raise base . UnderspecifiedError ( "memory_word_size and query_word_size must be known at graph " "construction time." )
memory_shape = tf . shape ( memory )
batch_size = memory_shape [ 0 ]
memory_size = memory_shape [ 1 ]
query_shape = tf . shape ( query )
query_batch_size = query_shape [ 0 ]
# Transform query to have same number of words as memory .
# expanded _ query : [ batch _ size , memory _ size , query _ word _ size ] .
expanded_query = tf . tile ( tf . expand_dims ( query , dim = 1 ) , [ 1 , memory_size , 1 ] )
# Compute attention weights for each memory slot .
# attention _ weight _ logits : [ batch _ size , memory _ size ]
with tf . control_dependencies ( [ tf . assert_equal ( batch_size , query_batch_size ) ] ) :
concatenated_embeddings = tf . concat ( values = [ memory , expanded_query ] , axis = 2 )
batch_apply_attention_logit = basic . BatchApply ( self . _attention_logit_mod , n_dims = 2 , name = "batch_apply_attention_logit" )
attention_weight_logits = batch_apply_attention_logit ( concatenated_embeddings )
# Note : basic . BatchApply ( ) will automatically reshape the [ batch _ size *
# memory _ size , 1 ] - shaped result of self . _ attention _ logit _ mod ( . . . ) into a
# [ batch _ size , memory _ size , 1 ] - shaped Tensor . If
# self . _ attention _ logit _ mod ( . . . ) returns something with more dimensions ,
# then attention _ weight _ logits will have extra dimensions , too .
if len ( attention_weight_logits . get_shape ( ) ) != 3 :
raise base . IncompatibleShapeError ( "attention_weight_logits must be a rank-3 Tensor. Are you sure that " "attention_logit_mod() returned [batch_size * memory_size, 1]-shaped" " Tensor?" )
# Remove final length - 1 dimension .
attention_weight_logits = tf . squeeze ( attention_weight_logits , [ 2 ] )
# Mask out ignored memory slots by assigning them very small logits . Ensures
# that every example has at least one valid memory slot , else we ' d end up
# averaging all memory slots equally .
if memory_mask is not None :
num_remaining_memory_slots = tf . reduce_sum ( tf . cast ( memory_mask , dtype = tf . int32 ) , axis = [ 1 ] )
with tf . control_dependencies ( [ tf . assert_positive ( num_remaining_memory_slots ) ] ) :
finfo = np . finfo ( np . float32 )
kept_indices = tf . cast ( memory_mask , dtype = tf . float32 )
ignored_indices = tf . cast ( tf . logical_not ( memory_mask ) , dtype = tf . float32 )
lower_bound = finfo . max * kept_indices + finfo . min * ignored_indices
attention_weight_logits = tf . minimum ( attention_weight_logits , lower_bound )
# attended _ memory : [ batch _ size , memory _ word _ size ] .
attention_weight = tf . reshape ( tf . nn . softmax ( attention_weight_logits ) , shape = [ batch_size , memory_size , 1 ] )
# The multiplication is elementwise and relies on broadcasting the weights
# across memory _ word _ size . Then we sum across the memory slots .
attended_memory = tf . reduce_sum ( memory * attention_weight , axis = [ 1 ] )
# Infer shape of result as much as possible .
inferred_batch_size , _ , inferred_memory_word_size = ( memory . get_shape ( ) . as_list ( ) )
attended_memory . set_shape ( [ inferred_batch_size , inferred_memory_word_size ] )
return AttentionOutput ( read = attended_memory , weights = tf . squeeze ( attention_weight , [ 2 ] ) , weight_logits = attention_weight_logits )
|
def submit_file_content ( self , method , url , data , headers , params , halt_on_error = True ) :
"""Submit File Content for Documents and Reports to ThreatConnect API .
Args :
method ( str ) : The HTTP method for the request ( POST , PUT ) .
url ( str ) : The URL for the request .
data ( str ; bytes ; file ) : The body ( data ) for the request .
headers ( dict ) : The headers for the request .
params ( dict ) : The query string parameters for the request .
halt _ on _ error ( bool , default : True ) : If True any exception will raise an error .
Returns :
requests . models . Response : The response from the request ."""
|
r = None
try :
r = self . tcex . session . request ( method , url , data = data , headers = headers , params = params )
except Exception as e :
self . tcex . handle_error ( 580 , [ e ] , halt_on_error )
return r
|
def _validate_x ( self , x , z ) :
"""Validates x ( column ) , raising error if invalid ."""
|
if ( x < 0 ) or ( x > ( ( 2 ** z ) - 1 ) ) :
raise InvalidColumnError ( "{} is not a valid value for x (column)" . format ( x ) )
return x
|
def set_url ( self , url ) :
"""Set the URL referring to a robots . txt file ."""
|
self . url = url
self . host , self . path = urlparse . urlparse ( url ) [ 1 : 3 ]
|
def permission_update ( self , token , id , ** kwargs ) :
"""To update an existing permission .
https : / / www . keycloak . org / docs / latest / authorization _ services / index . html # _ service _ authorization _ uma _ policy _ api
: param str token : client access token
: param str id : permission id
: rtype : dict"""
|
return self . _realm . client . put ( '{}/{}' . format ( self . well_known [ 'policy_endpoint' ] , id ) , data = self . _dumps ( kwargs ) , headers = self . get_headers ( token ) )
|
def MakePmfFromHist ( hist , name = None ) :
"""Makes a normalized PMF from a Hist object .
Args :
hist : Hist object
name : string name
Returns :
Pmf object"""
|
if name is None :
name = hist . name
# make a copy of the dictionary
d = dict ( hist . GetDict ( ) )
pmf = Pmf ( d , name )
pmf . Normalize ( )
return pmf
|
def command_ls ( self ) :
"""List names"""
|
self . parser = argparse . ArgumentParser ( description = "List names of available objects" )
self . options_select ( )
self . options_utils ( )
self . options = self . parser . parse_args ( self . arguments [ 2 : ] )
self . show ( brief = True )
|
def finalize ( self ) :
"""Connects the wires ."""
|
self . _check_finalized ( )
self . _final = True
for dest_w , values in self . dest_instrs_info . items ( ) :
mux_vals = dict ( zip ( self . instructions , values ) )
dest_w <<= sparse_mux ( self . signal_wire , mux_vals )
|
def get_file_results ( self ) :
"""Print the result and return the overall count for this file ."""
|
self . _deferred_print . sort ( )
for line_number , offset , code , text , doc in self . _deferred_print :
print ( self . _fmt % { 'path' : self . filename , 'row' : self . line_offset + line_number , 'col' : offset + 1 , 'code' : code , 'text' : text , } )
if self . _show_source :
if line_number > len ( self . lines ) :
line = ''
else :
line = self . lines [ line_number - 1 ]
print ( line . rstrip ( ) )
print ( re . sub ( r'\S' , ' ' , line [ : offset ] ) + '^' )
if self . _show_pep8 and doc :
print ( ' ' + doc . strip ( ) )
# stdout is block buffered when not stdout . isatty ( ) .
# line can be broken where buffer boundary since other processes
# write to same file .
# flush ( ) after print ( ) to avoid buffer boundary .
# Typical buffer size is 8192 . line written safely when
# len ( line ) < 8192.
sys . stdout . flush ( )
return self . file_errors
|
def _check_status ( func , read_exception , * args , ** kwargs ) :
"""Checks the status of a single component by
calling the func with the args . The func is expected to
return a dict with at least an ` available = < bool > ` key
value pair
: param func func : The function to call
: param read _ exception : If an exception is thrown
should the exception message be passed as the
message parameter . If not a generic
message parameter will be added to the dict
: param tuple args : A list of arguments to pass to
to function
: param dict kwargs : a dict of keyword arguments
to pass to the function
: return : a dictionary that includes the state
of the component . At least an ' available '
key is guaranteed
: rtype : dict"""
|
try :
return func ( * args , ** kwargs )
except Exception as e :
_LOG . exception ( e )
message = str ( e ) if read_exception else 'An error occurred while checking the status'
return dict ( message = message , available = False )
|
def printlet ( flatten = False , ** kwargs ) :
"""Print chunks of data from a chain
: param flatten : whether to flatten data chunks
: param kwargs : keyword arguments as for : py : func : ` print `
If ` ` flatten ` ` is : py : const : ` True ` , every chunk received is unpacked .
This is useful when passing around connected data , e . g . from : py : func : ` ~ . enumeratelet ` .
Keyword arguments via ` ` kwargs ` ` are equivalent to those of : py : func : ` print ` .
For example , passing ` ` file = sys . stderr ` ` is a simple way of creating a debugging element in a chain :
. . code : :
debug _ chain = chain [ : i ] > > printlet ( file = sys . stderr ) > > chain [ i : ]"""
|
chunk = yield
if flatten :
while True :
print ( * chunk , ** kwargs )
chunk = yield chunk
else :
while True :
print ( chunk , ** kwargs )
chunk = yield chunk
|
def Parse ( self , value ) :
"""Parse a ' Value ' declaration .
Args :
value : String line from a template file , must begin with ' Value ' .
Raises :
TextFSMTemplateError : Value declaration contains an error ."""
|
value_line = value . split ( ' ' )
if len ( value_line ) < 3 :
raise TextFSMTemplateError ( 'Expect at least 3 tokens on line.' )
if not value_line [ 2 ] . startswith ( '(' ) : # Options are present
options = value_line [ 1 ]
for option in options . split ( ',' ) :
self . _AddOption ( option )
# Call option OnCreateOptions callbacks
[ option . OnCreateOptions ( ) for option in self . options ]
self . name = value_line [ 2 ]
self . regex = ' ' . join ( value_line [ 3 : ] )
else : # There were no valid options , so there are no options .
# Treat this argument as the name .
self . name = value_line [ 1 ]
self . regex = ' ' . join ( value_line [ 2 : ] )
if len ( self . name ) > self . max_name_len :
raise TextFSMTemplateError ( "Invalid Value name '%s' or name too long." % self . name )
if ( not re . match ( r'^\(.*\)$' , self . regex ) or self . regex . count ( '(' ) != self . regex . count ( ')' ) ) :
raise TextFSMTemplateError ( "Value '%s' must be contained within a '()' pair." % self . regex )
self . template = re . sub ( r'^\(' , '(?P<%s>' % self . name , self . regex )
|
def _getPWMFrequency ( self , device , message ) :
"""Get the PWM frequency stored on the hardware device .
: Parameters :
device : ` int `
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol .
message : ` bool `
If set to ` True ` a text message will be returned , if set to ` False `
the integer stored in the Qik will be returned .
: Returns :
A text message or an int . See the ` message ` parameter above ."""
|
result = self . _getConfig ( self . PWM_PARAM , device )
freq , msg = self . _CONFIG_PWM . get ( result , ( result , 'Invalid Frequency' ) )
if message :
result = msg
else :
result = freq
return result
|
def as_dict ( self ) :
"""Json - serializable dict representation ."""
|
structure = self . final_structure
d = { "has_gaussian_completed" : self . properly_terminated , "nsites" : len ( structure ) }
comp = structure . composition
d [ "unit_cell_formula" ] = comp . as_dict ( )
d [ "reduced_cell_formula" ] = Composition ( comp . reduced_formula ) . as_dict ( )
d [ "pretty_formula" ] = comp . reduced_formula
d [ "is_pcm" ] = self . is_pcm
d [ "errors" ] = self . errors
d [ "Mulliken_charges" ] = self . Mulliken_charges
unique_symbols = sorted ( list ( d [ "unit_cell_formula" ] . keys ( ) ) )
d [ "elements" ] = unique_symbols
d [ "nelements" ] = len ( unique_symbols )
d [ "charge" ] = self . charge
d [ "spin_multiplicity" ] = self . spin_multiplicity
vin = { "route" : self . route_parameters , "functional" : self . functional , "basis_set" : self . basis_set , "nbasisfunctions" : self . num_basis_func , "pcm_parameters" : self . pcm }
d [ "input" ] = vin
nsites = len ( self . final_structure )
vout = { "energies" : self . energies , "final_energy" : self . final_energy , "final_energy_per_atom" : self . final_energy / nsites , "molecule" : structure . as_dict ( ) , "stationary_type" : self . stationary_type , "corrections" : self . corrections }
d [ 'output' ] = vout
d [ "@module" ] = self . __class__ . __module__
d [ "@class" ] = self . __class__ . __name__
return d
|
def ordered_uniqify ( sequence ) :
"""Uniqifies the given hashable sequence while preserving its order .
: param sequence : Sequence .
: type sequence : object
: return : Uniqified sequence .
: rtype : list"""
|
items = set ( )
return [ key for key in sequence if key not in items and not items . add ( key ) ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.