signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def pickFilepath ( self ) :
"""Prompts the user to select a filepath from the system based on the current filepath mode ."""
|
mode = self . filepathMode ( )
filepath = ''
filepaths = [ ]
curr_dir = nativestring ( self . _filepathEdit . text ( ) )
if ( not curr_dir ) :
curr_dir = QDir . currentPath ( )
if mode == XFilepathEdit . Mode . SaveFile :
filepath = QFileDialog . getSaveFileName ( self , self . windowTitle ( ) , curr_dir , self . filepathTypes ( ) )
elif mode == XFilepathEdit . Mode . OpenFile :
filepath = QFileDialog . getOpenFileName ( self , self . windowTitle ( ) , curr_dir , self . filepathTypes ( ) )
elif mode == XFilepathEdit . Mode . OpenFiles :
filepaths = QFileDialog . getOpenFileNames ( self , self . windowTitle ( ) , curr_dir , self . filepathTypes ( ) )
else :
filepath = QFileDialog . getExistingDirectory ( self , self . windowTitle ( ) , curr_dir )
if filepath :
if type ( filepath ) == tuple :
filepath = filepath [ 0 ]
self . setFilepath ( nativestring ( filepath ) )
elif filepaths :
self . setFilepaths ( map ( str , filepaths ) )
|
def job_stats_enhanced ( job_id ) :
"""Get full job and step stats for job _ id"""
|
stats_dict = { }
with os . popen ( 'bjobs -o "jobid run_time cpu_used queue slots stat exit_code start_time estimated_start_time finish_time delimiter=\'|\'" -noheader ' + str ( job_id ) ) as f :
try :
line = f . readline ( )
cols = line . split ( '|' )
stats_dict [ 'job_id' ] = cols [ 0 ]
if cols [ 1 ] != '-' :
stats_dict [ 'wallclock' ] = timedelta ( seconds = float ( cols [ 1 ] . split ( ' ' ) [ 0 ] ) )
if cols [ 2 ] != '-' :
stats_dict [ 'cpu' ] = timedelta ( seconds = float ( cols [ 2 ] . split ( ' ' ) [ 0 ] ) )
stats_dict [ 'queue' ] = cols [ 3 ]
stats_dict [ 'status' ] = cols [ 5 ]
stats_dict [ 'exit_code' ] = cols [ 6 ]
stats_dict [ 'start' ] = cols [ 7 ]
stats_dict [ 'start_time' ] = cols [ 8 ]
if stats_dict [ 'status' ] in [ 'DONE' , 'EXIT' ] :
stats_dict [ 'end' ] = cols [ 9 ]
steps = [ ]
stats_dict [ 'steps' ] = steps
except :
with os . popen ( 'bhist -l ' + str ( job_id ) ) as f :
try :
output = f . readlines ( )
for line in output :
if "Done successfully" in line :
stats_dict [ 'status' ] = 'DONE'
return stats_dict
elif "Completed <exit>" in line :
stats_dict [ 'status' ] = 'EXIT'
return stats_dict
else :
stats_dict [ 'status' ] = 'UNKNOWN'
except Exception as e :
print ( e )
print ( 'LSF: Error reading job stats' )
stats_dict [ 'status' ] = 'UNKNOWN'
return stats_dict
|
def decompressBWT ( inputDir , outputDir , numProcs , logger ) :
'''This is called for taking a BWT and decompressing it back out to it ' s original form . While unusual to do ,
it ' s included in this package for completion purposes .
@ param inputDir - the directory of the compressed BWT we plan on decompressing
@ param outputFN - the directory for the output decompressed BWT , it can be the same , we don ' t care
@ param numProcs - number of processes we ' re allowed to use
@ param logger - log all the things !'''
|
# load it , force it to be a compressed bwt also
msbwt = MultiStringBWT . CompressedMSBWT ( )
msbwt . loadMsbwt ( inputDir , logger )
# make the output file
outputFile = np . lib . format . open_memmap ( outputDir + '/msbwt.npy' , 'w+' , '<u1' , ( msbwt . getTotalSize ( ) , ) )
del outputFile
worksize = 1000000
tups = [ None ] * ( msbwt . getTotalSize ( ) / worksize + 1 )
x = 0
if msbwt . getTotalSize ( ) > worksize :
for x in xrange ( 0 , msbwt . getTotalSize ( ) / worksize ) :
tups [ x ] = ( inputDir , outputDir , x * worksize , ( x + 1 ) * worksize )
tups [ - 1 ] = ( inputDir , outputDir , ( x + 1 ) * worksize , msbwt . getTotalSize ( ) )
else :
tups [ 0 ] = ( inputDir , outputDir , 0 , msbwt . getTotalSize ( ) )
if numProcs > 1 :
myPool = multiprocessing . Pool ( numProcs )
rets = myPool . map ( decompressBWTPoolProcess , tups )
else :
rets = [ ]
for tup in tups :
rets . append ( decompressBWTPoolProcess ( tup ) )
|
def print_tree ( self , ast_obj = None ) :
"""Convert AST object to tree view of BEL AST
Returns :
prints tree of BEL AST to STDOUT"""
|
if not ast_obj :
ast_obj = self
if hasattr ( self , "bel_subject" ) :
print ( "Subject:" )
self . bel_subject . print_tree ( self . bel_subject , indent = 0 )
if hasattr ( self , "bel_relation" ) :
print ( "Relation:" , self . bel_relation )
if hasattr ( self , "bel_object" ) :
if self . bel_object . type == "BELAst" :
if hasattr ( self , "bel_subject" ) :
print ( "Nested Subject:" )
self . bel_object . bel_subject . print_tree ( indent = 0 )
if hasattr ( self , "bel_relation" ) :
print ( "Nested Relation:" , self . bel_object . bel_relation )
if hasattr ( self , "bel_object" ) :
print ( "Nested Object:" )
self . bel_object . bel_object . print_tree ( indent = 0 )
else :
print ( "Object:" )
self . bel_object . print_tree ( self . bel_object , indent = 0 )
return self
|
def send_message ( message : str , subject : str , recip : list , recip_email : list , html_message : str = None ) :
"""Sends message to specified value .
Source : Himanshu Shankar ( https : / / github . com / iamhssingh )
Parameters
message : str
Message that is to be sent to user .
subject : str
Subject that is to be sent to user , in case prop is an email .
recip : list
Recipient to whom message is being sent .
recip _ email : list
Recipient to whom EMail is being sent . This will be deprecated once
SMS feature is brought in .
html _ message : str
HTML variant of message , if any .
Returns
sent : dict"""
|
import smtplib
from django . conf import settings
from django . core . mail import send_mail
from sendsms import api
sent = { 'success' : False , 'message' : None }
if not getattr ( settings , 'EMAIL_HOST' , None ) :
raise ValueError ( 'EMAIL_HOST must be defined in django ' 'setting for sending mail.' )
if not getattr ( settings , 'EMAIL_FROM' , None ) :
raise ValueError ( 'EMAIL_FROM must be defined in django setting ' 'for sending mail. Who is sending email?' )
if not getattr ( settings , 'EMAIL_FROM' , None ) :
raise ValueError ( 'EMAIL_FROM must be defined in django setting ' 'for sending mail. Who is sending email?' )
# Check if there is any recipient
if not len ( recip ) > 0 :
raise ValueError ( 'No recipient to send message.' )
# Check if the value of recipient is valid ( min length : a @ b . c )
elif len ( recip [ 0 ] ) < 5 :
raise ValueError ( 'Invalid recipient.' )
# Check if all recipient in list are of same type
is_email = validate_email ( recip [ 0 ] )
for ind in range ( len ( recip ) ) :
if validate_email ( recip [ ind ] ) is not is_email :
raise ValueError ( 'All recipient should be of same type.' )
elif not is_email :
recip [ ind ] = get_mobile_number ( recip [ ind ] )
# Check if fallback email is indeed an email
for rcp in recip_email :
if not validate_email ( rcp ) :
raise ValueError ( 'Invalid email provided: {}' . format ( rcp ) )
if isinstance ( recip , str ) : # For backsupport
recip = [ recip ]
if isinstance ( recip_email , str ) : # For backsupport
recip_email = [ recip_email ]
if is_email :
try :
send_mail ( subject = subject , message = message , html_message = html_message , from_email = settings . EMAIL_FROM , recipient_list = recip )
except smtplib . SMTPException as ex :
sent [ 'message' ] = 'Message sending failed!' + str ( ex . args )
sent [ 'success' ] = False
else :
sent [ 'message' ] = 'Message sent successfully!'
sent [ 'success' ] = True
else :
try :
api . send_sms ( body = message , to = recip , from_phone = None )
# Django SendSMS doesn ' t provide an output of success / failure .
# Send mail either ways , just to ensure delivery .
send_message ( message = message , subject = subject , recip = recip_email , recip_email = recip_email , html_message = html_message )
except Exception as ex :
sent [ 'message' ] = 'Message sending Failed!' + str ( ex . args )
sent [ 'success' ] = False
send_message ( message = message , subject = subject , recip = recip_email , recip_email = recip_email , html_message = html_message )
else :
sent [ 'message' ] = 'Message sent successfully!'
sent [ 'success' ] = True
return sent
|
def add_items_to_tree_iter ( self , input_dict , treeiter , parent_dict_path = None ) :
"""Adds all values of the input dict to self . tree _ store
: param input _ dict : The input dictionary holds all values , which are going to be added .
: param treeiter : The pointer inside the tree store to add the input dict
: return :"""
|
if parent_dict_path is None :
parent_dict_path = [ ]
self . get_view_selection ( )
for key , value in sorted ( input_dict . items ( ) ) :
element_dict_path = copy . copy ( parent_dict_path ) + [ key ]
if isinstance ( value , dict ) :
new_iter = self . tree_store . append ( treeiter , [ key , "" , True , element_dict_path ] )
self . add_items_to_tree_iter ( value , new_iter , element_dict_path )
else :
self . tree_store . append ( treeiter , [ key , value , False , element_dict_path ] )
|
def identifier_list_cmp ( a , b ) :
"""Compare two identifier list ( pre - release / build components ) .
The rule is :
- Identifiers are paired between lists
- They are compared from left to right
- If all first identifiers match , the longest list is greater .
> > > identifier _ list _ cmp ( [ ' 1 ' , ' 2 ' ] , [ ' 1 ' , ' 2 ' ] )
> > > identifier _ list _ cmp ( [ ' 1 ' , ' 2a ' ] , [ ' 1 ' , ' 2b ' ] )
> > > identifier _ list _ cmp ( [ ' 1 ' ] , [ ' 1 ' , ' 2 ' ] )"""
|
identifier_pairs = zip ( a , b )
for id_a , id_b in identifier_pairs :
cmp_res = identifier_cmp ( id_a , id_b )
if cmp_res != 0 :
return cmp_res
# alpha1.3 < alpha1.3.1
return base_cmp ( len ( a ) , len ( b ) )
|
def slice ( self , start , stop = None , axis = 0 ) :
"""Restrict histogram to bins whose data values ( not bin numbers ) along axis are between start and stop
( both inclusive ) . Returns d dimensional histogram ."""
|
if stop is None : # Make a 1 = bin slice
stop = start
axis = self . get_axis_number ( axis )
start_bin = max ( 0 , self . get_axis_bin_index ( start , axis ) )
stop_bin = min ( len ( self . bin_centers ( axis ) ) - 1 , # TODO : test off by one !
self . get_axis_bin_index ( stop , axis ) )
new_bin_edges = self . bin_edges . copy ( )
new_bin_edges [ axis ] = new_bin_edges [ axis ] [ start_bin : stop_bin + 2 ]
# TODO : Test off by one here !
return Histdd . from_histogram ( np . take ( self . histogram , np . arange ( start_bin , stop_bin + 1 ) , axis = axis ) , bin_edges = new_bin_edges , axis_names = self . axis_names )
|
def get_packet_type ( cls , type_ ) :
"""Override method for the Length / Type field ( self . ethertype ) .
The Length / Type field means Length or Type interpretation ,
same as ethernet IEEE802.3.
If the value of Length / Type field is less than or equal to
1500 decimal ( 05DC hexadecimal ) , it means Length interpretation
and be passed to the LLC sublayer ."""
|
if type_ <= ether . ETH_TYPE_IEEE802_3 :
type_ = ether . ETH_TYPE_IEEE802_3
return cls . _TYPES . get ( type_ )
|
def render_pulp_tag ( self ) :
"""Configure the pulp _ tag plugin ."""
|
if not self . dj . dock_json_has_plugin_conf ( 'postbuild_plugins' , 'pulp_tag' ) :
return
pulp_registry = self . spec . pulp_registry . value
if pulp_registry :
self . dj . dock_json_set_arg ( 'postbuild_plugins' , 'pulp_tag' , 'pulp_registry_name' , pulp_registry )
# Verify we have either a secret or username / password
if self . spec . pulp_secret . value is None :
conf = self . dj . dock_json_get_plugin_conf ( 'postbuild_plugins' , 'pulp_tag' )
args = conf . get ( 'args' , { } )
if 'username' not in args :
raise OsbsValidationException ( "Pulp registry specified " "but no auth config" )
else : # If no pulp registry is specified , don ' t run the pulp plugin
logger . info ( "removing pulp_tag from request, " "requires pulp_registry" )
self . dj . remove_plugin ( "postbuild_plugins" , "pulp_tag" )
|
def get_config ( config_spec ) :
"""Like get _ json _ config but does not parse result as JSON"""
|
config_file = None
if config_spec . startswith ( "http" ) : # URL : fetch it
config_file = urllib . urlopen ( config_spec )
else : # string : open file with that name
config_file = open ( config_spec )
config = json . load ( config_file )
# Close any open files
try :
config_file . close ( )
except :
pass
return config
|
def p_const_vector_elem_list ( p ) :
"""const _ number _ list : expr"""
|
if p [ 1 ] is None :
return
if not is_static ( p [ 1 ] ) :
if isinstance ( p [ 1 ] , symbols . UNARY ) :
tmp = make_constexpr ( p . lineno ( 1 ) , p [ 1 ] )
else :
api . errmsg . syntax_error_not_constant ( p . lexer . lineno )
p [ 0 ] = None
return
else :
tmp = p [ 1 ]
p [ 0 ] = [ tmp ]
|
def createSessionFile ( self , file , verbose = None ) :
"""Saves the current session to a file . If successful , the session file location will be returned .
: param file : Session file location as an absolute path
: param verbose : print more
: returns : 200 : successful operation"""
|
PARAMS = set_param ( [ 'file' ] , [ file ] )
response = api ( url = self . ___url + 'session' , PARAMS = PARAMS , method = "POST" , verbose = verbose )
return response
|
def _get_objects ( self , o_type ) :
"""Get an object list from the scheduler
Returns None if the required object type ( ` o _ type ` ) is not known or an exception is raised .
Else returns the objects list
: param o _ type : searched object type
: type o _ type : str
: return : objects list
: rtype : alignak . objects . item . Items"""
|
if o_type not in [ t for t in self . app . sched . pushed_conf . types_creations ] :
return None
try :
_ , _ , strclss , _ , _ = self . app . sched . pushed_conf . types_creations [ o_type ]
o_list = getattr ( self . app . sched , strclss )
except Exception : # pylint : disable = broad - except
return None
return o_list
|
def _vpc_config ( self ) :
"""Get VPC config ."""
|
if self . vpc_enabled :
subnets = get_subnets ( env = self . env , region = self . region , purpose = 'internal' ) [ 'subnet_ids' ] [ self . region ]
security_groups = self . _get_sg_ids ( )
vpc_config = { 'SubnetIds' : subnets , 'SecurityGroupIds' : security_groups }
else :
vpc_config = { 'SubnetIds' : [ ] , 'SecurityGroupIds' : [ ] }
LOG . debug ( "Lambda VPC config setup: %s" , vpc_config )
return vpc_config
|
def _get_dep_to_dot_name_mapping ( dependencies ) :
"""Creates mapping between Dependency classes and names used in DOT graph"""
|
dot_name_to_deps = { }
for dep in dependencies :
dot_name = dep . name
if dot_name not in dot_name_to_deps :
dot_name_to_deps [ dot_name ] = [ dep ]
else :
dot_name_to_deps [ dot_name ] . append ( dep )
dep_to_dot_name = { }
for dot_name , deps in dot_name_to_deps . items ( ) :
if len ( deps ) == 1 :
dep_to_dot_name [ deps [ 0 ] ] = dot_name
continue
for idx , dep in enumerate ( deps ) :
dep_to_dot_name [ dep ] = dot_name + str ( idx )
return dep_to_dot_name
|
def get_colormap ( cls , names = [ ] , N = 10 , * args , ** kwargs ) :
"""Open a : class : ` ColormapDialog ` and get a colormap
Parameters
% ( ColormapModel . parameters ) s
Other Parameters
` ` * args , * * kwargs ` `
Anything else that is passed to the ColormapDialog
Returns
str or matplotlib . colors . Colormap
Either the name of a standard colormap available via
: func : ` psy _ simple . colors . get _ cmap ` or a colormap"""
|
names = safe_list ( names )
obj = cls ( names , N , * args , ** kwargs )
vbox = obj . layout ( )
buttons = QDialogButtonBox ( QDialogButtonBox . Ok | QDialogButtonBox . Cancel , parent = obj )
buttons . button ( QDialogButtonBox . Ok ) . setEnabled ( False )
vbox . addWidget ( buttons )
buttons . accepted . connect ( obj . accept )
buttons . rejected . connect ( obj . reject )
obj . table . selectionModel ( ) . selectionChanged . connect ( lambda indices : buttons . button ( QDialogButtonBox . Ok ) . setEnabled ( bool ( indices ) ) )
accepted = obj . exec_ ( )
if accepted :
return obj . table . chosen_colormap
|
def transition ( self , duration , brightness = None ) :
"""Transition wrapper .
Short - circuit transition if necessary .
: param duration : Duration of transition .
: param brightness : Transition to this brightness ."""
|
if duration == 0 :
if brightness is not None :
self . brightness = brightness
return
if brightness != self . brightness :
self . _transition ( duration , brightness )
|
def start_cluster_server ( ctx , num_gpus = 1 , rdma = False ) :
"""Function that wraps the creation of TensorFlow ` ` tf . train . Server ` ` for a node in a distributed TensorFlow cluster .
This is intended to be invoked from within the TF ` ` map _ fun ` ` , replacing explicit code to instantiate ` ` tf . train . ClusterSpec ` `
and ` ` tf . train . Server ` ` objects .
Args :
: ctx : TFNodeContext containing the metadata specific to this node in the cluster .
: num _ gpu : number of GPUs desired
: rdma : boolean indicating if RDMA ' iverbs ' should be used for cluster communications .
Returns :
A tuple of ( cluster _ spec , server )"""
|
import tensorflow as tf
from . import gpu_info
logging . info ( "{0}: ======== {1}:{2} ========" . format ( ctx . worker_num , ctx . job_name , ctx . task_index ) )
cluster_spec = ctx . cluster_spec
logging . info ( "{0}: Cluster spec: {1}" . format ( ctx . worker_num , cluster_spec ) )
if tf . test . is_built_with_cuda ( ) and num_gpus > 0 : # compute my index relative to other nodes placed on the same host ( for GPU allocation )
my_addr = cluster_spec [ ctx . job_name ] [ ctx . task_index ]
my_host = my_addr . split ( ':' ) [ 0 ]
flattened = [ v for sublist in cluster_spec . values ( ) for v in sublist ]
local_peers = [ p for p in flattened if p . startswith ( my_host ) ]
my_index = local_peers . index ( my_addr )
# GPU
gpu_initialized = False
retries = 3
while not gpu_initialized and retries > 0 :
try : # override PS jobs to only reserve one GPU
if ctx . job_name == 'ps' :
num_gpus = 1
# Find a free gpu ( s ) to use
gpus_to_use = gpu_info . get_gpus ( num_gpus , my_index )
gpu_prompt = "GPU" if num_gpus == 1 else "GPUs"
logging . info ( "{0}: Using {1}: {2}" . format ( ctx . worker_num , gpu_prompt , gpus_to_use ) )
# Set GPU device to use for TensorFlow
os . environ [ 'CUDA_VISIBLE_DEVICES' ] = gpus_to_use
# Create a cluster from the parameter server and worker hosts .
cluster = tf . train . ClusterSpec ( cluster_spec )
# Create and start a server for the local task .
if rdma :
server = tf . train . Server ( cluster , ctx . job_name , ctx . task_index , protocol = "grpc+verbs" )
else :
server = tf . train . Server ( cluster , ctx . job_name , ctx . task_index )
gpu_initialized = True
except Exception as e :
print ( e )
logging . error ( "{0}: Failed to allocate GPU, trying again..." . format ( ctx . worker_num ) )
retries -= 1
time . sleep ( 10 )
if not gpu_initialized :
raise Exception ( "Failed to allocate GPU" )
else : # CPU
os . environ [ 'CUDA_VISIBLE_DEVICES' ] = ''
logging . info ( "{0}: Using CPU" . format ( ctx . worker_num ) )
# Create a cluster from the parameter server and worker hosts .
cluster = tf . train . ClusterSpec ( cluster_spec )
# Create and start a server for the local task .
server = tf . train . Server ( cluster , ctx . job_name , ctx . task_index )
return ( cluster , server )
|
def table_to_source_list ( table , src_type = OutputSource ) :
"""Convert a table of data into a list of sources .
A single table must have consistent source types given by src _ type . src _ type should be one of
: class : ` AegeanTools . models . OutputSource ` , : class : ` AegeanTools . models . SimpleSource ` ,
or : class : ` AegeanTools . models . IslandSource ` .
Parameters
table : Table
Table of sources
src _ type : class
Sources must be of type : class : ` AegeanTools . models . OutputSource ` ,
: class : ` AegeanTools . models . SimpleSource ` , or : class : ` AegeanTools . models . IslandSource ` .
Returns
sources : list
A list of objects of the given type ."""
|
source_list = [ ]
if table is None :
return source_list
for row in table : # Initialise our object
src = src_type ( )
# look for the columns required by our source object
for param in src_type . names :
if param in table . colnames : # copy the value to our object
val = row [ param ]
# hack around float32 ' s broken - ness
if isinstance ( val , np . float32 ) :
val = np . float64 ( val )
setattr ( src , param , val )
# save this object to our list of sources
source_list . append ( src )
return source_list
|
def Chemistry ( self ) :
'''Get cells chemistry'''
|
length = self . bus . read_byte_data ( self . address , 0x79 )
chem = [ ]
for n in range ( length ) :
chem . append ( self . bus . read_byte_data ( self . address , 0x7A + n ) )
return chem
|
def _traverse ( element , condition = None ) :
"""Traversal API intended for debugging ."""
|
if condition is None or condition ( element ) :
yield element
if isinstance ( element , DictElement ) :
for child in element . values ( ) :
for _ in BaseElement . _traverse ( child , condition ) :
yield _
elif isinstance ( element , ListElement ) :
for child in element :
for _ in BaseElement . _traverse ( child , condition ) :
yield _
elif attr . has ( element . __class__ ) :
for field in attr . fields ( element . __class__ ) :
child = getattr ( element , field . name )
for _ in BaseElement . _traverse ( child , condition ) :
yield _
|
def main ( args ) :
"""API with args object containing configuration parameters"""
|
global logging , log
args = parse_args ( args )
logging . basicConfig ( format = LOG_FORMAT , level = logging . DEBUG if args . verbose else logging . INFO , stream = sys . stdout )
df = cat_tweets ( path = args . path , verbosity = args . verbose + 1 , numtweets = args . numtweets , ignore_suspicious = False )
log . info ( 'Combined {} tweets' . format ( len ( df ) ) )
df = drop_nan_columns ( df )
save_tweets ( df , path = args . path , filename = args . tweetfile )
geo = get_geo ( df , path = args . path , filename = args . geofile )
log . info ( "Combined {} tweets into a single file {} and set asside {} geo tweets in {}" . format ( len ( df ) , args . tweetfile , len ( geo ) , args . geofile ) )
return df , geo
|
def set_column_width ( self , n = 0 , width = 120 ) :
"""Sets the n ' th column width in pixels ."""
|
self . _widget . setColumnWidth ( n , width )
return self
|
def filter_headers ( data ) :
"""只设置host content - type 还有x开头的头部 .
: param data ( dict ) : 所有的头部信息 .
: return ( dict ) : 计算进签名的头部 ."""
|
headers = { }
for i in data :
if i == 'Content-Type' or i == 'Host' or i [ 0 ] == 'x' or i [ 0 ] == 'X' :
headers [ i ] = data [ i ]
return headers
|
def parse_connection_string ( self , connection ) :
"""Parse string as returned by the ` ` connected _ users _ info ` ` or ` ` user _ sessions _ info ` ` API calls .
> > > EjabberdBackendBase ( ) . parse _ connection _ string ( ' c2s _ tls ' )
(0 , True , False )
> > > EjabberdBackendBase ( ) . parse _ connection _ string ( ' c2s _ compressed _ tls ' )
(0 , True , True )
> > > EjabberdBackendBase ( ) . parse _ connection _ string ( ' http _ bind ' )
(2 , None , None )
: param connection : The connection string as returned by the ejabberd APIs .
: type connection : str
: return : A tuple representing the conntion type , if it is encrypted and if it uses XMPP stream
compression .
: rtype : tuple"""
|
# TODO : Websockets , HTTP Polling
if connection == 'c2s_tls' :
return CONNECTION_XMPP , True , False
elif connection == 'c2s_compressed_tls' :
return CONNECTION_XMPP , True , True
elif connection == 'http_bind' :
return CONNECTION_HTTP_BINDING , None , None
elif connection == 'c2s' :
return CONNECTION_XMPP , False , False
log . warn ( 'Could not parse connection string "%s"' , connection )
return CONNECTION_UNKNOWN , True , True
|
def setLineEdit ( self , lineEdit ) :
"""Sets the line edit instance for this label .
: param lineEdit | < XLineEdit >"""
|
self . _lineEdit = lineEdit
if lineEdit :
lineEdit . setFont ( self . font ( ) )
lineEdit . installEventFilter ( self )
lineEdit . resize ( self . size ( ) )
lineEdit . hide ( )
|
def del_node ( self , name ) :
'''API : del _ node ( self , name )
Description :
Removes node from Graph .
Input :
name : Name of the node .
Pre :
Graph should contain a node with this name .
Post :
self . neighbors , self . nodes and self . in _ neighbors are updated .'''
|
if name not in self . neighbors :
raise Exception ( 'Node %s does not exist!' % str ( name ) )
for n in self . neighbors [ name ] :
del self . edge_attr [ ( name , n ) ]
if self . graph_type == UNDIRECTED_GRAPH :
self . neighbors [ n ] . remove ( name )
else :
self . in_neighbors [ n ] . remove ( name )
if self . graph_type is DIRECTED_GRAPH :
for n in self . in_neighbors [ name ] :
del self . edge_attr [ ( n , name ) ]
self . neighbors [ n ] . remove ( name )
del self . neighbors [ name ]
del self . in_neighbors [ name ]
del self . nodes [ name ]
|
def search_registered_query_deleted_entities ( self , ** kwargs ) : # noqa : E501
"""Search over a customer ' s deleted derived metric definitions # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . search _ registered _ query _ deleted _ entities ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param SortableSearchRequest body :
: return : ResponseContainerPagedDerivedMetricDefinition
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . search_registered_query_deleted_entities_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . search_registered_query_deleted_entities_with_http_info ( ** kwargs )
# noqa : E501
return data
|
def is_archived ( self , experiment , ignore_missing = True ) :
"""Convenience function to determine whether the given experiment has been
archived already
Parameters
experiment : str
The experiment to check
Returns
str or None
The path to the archive if it has been archived , otherwise None"""
|
if ignore_missing :
if isinstance ( self . config . experiments . get ( experiment , True ) , Archive ) :
return self . config . experiments . get ( experiment , True )
else :
if isinstance ( self . config . experiments [ experiment ] , Archive ) :
return self . config . experiments [ experiment ]
|
def _read_console_output ( self , ws , out ) :
"""Read Websocket and forward it to the telnet
: param ws : Websocket connection
: param out : Output stream"""
|
while True :
msg = yield from ws . receive ( )
if msg . tp == aiohttp . WSMsgType . text :
out . feed_data ( msg . data . encode ( ) )
elif msg . tp == aiohttp . WSMsgType . BINARY :
out . feed_data ( msg . data )
elif msg . tp == aiohttp . WSMsgType . ERROR :
log . critical ( "Docker WebSocket Error: {}" . format ( msg . data ) )
else :
out . feed_eof ( )
ws . close ( )
break
yield from self . stop ( )
|
def serialize ( self , method = "urlencoded" , lev = 0 , ** kwargs ) :
"""Convert this instance to another representation . Which representation
is given by the choice of serialization method .
: param method : A serialization method . Presently ' urlencoded ' , ' json ' ,
' jwt ' and ' dict ' is supported .
: param lev :
: param kwargs : Extra key word arguments
: return : THe content of this message serialized using a chosen method"""
|
return getattr ( self , "to_%s" % method ) ( lev = lev , ** kwargs )
|
def send_reset_password_email ( person ) :
"""Sends an email to user allowing them to set their password ."""
|
uid = urlsafe_base64_encode ( force_bytes ( person . pk ) ) . decode ( "ascii" )
token = default_token_generator . make_token ( person )
url = '%s/persons/reset/%s/%s/' % ( settings . REGISTRATION_BASE_URL , uid , token )
context = CONTEXT . copy ( )
context . update ( { 'url' : url , 'receiver' : person , } )
to_email = person . email
subject , body = render_email ( 'reset_password' , context )
send_mail ( subject , body , settings . ACCOUNTS_EMAIL , [ to_email ] )
|
def value ( self ) :
"""Value of a reference property .
You can set the reference with a Part , Part id or None value .
Ensure that the model of the provided part , matches the configured model
: return : a : class : ` Part ` or None
: raises APIError : When unable to find the associated : class : ` Part `
Example
Get the wheel reference property
> > > part = project . part ( ' Bike ' )
> > > wheels _ ref _ property = part . property ( ' Wheels ' )
> > > isinstance ( wheels _ ref _ property , MultiReferenceProperty )
True
The value returns a list of Parts or is an empty list
> > > type ( wheels _ ref _ property . value ) in ( list , tuple )
True
Get the selection of wheel instances :
> > > wheel _ choices = wheels _ ref _ property . choices ( )
Choose random wheel from the wheel _ choices :
> > > from random import choice
> > > wheel _ choice _ 1 = choice ( wheel _ choices )
> > > wheel _ choice _ 2 = choice ( wheel _ choices )
Set chosen wheel
1 : provide a single wheel :
> > > wheels _ ref _ property . value = [ wheel _ choice _ 1]
2 : provide multiple wheels :
> > > wheels _ ref _ property . value = [ wheel _ choice _ 1 , wheel _ choice _ 2]"""
|
if not self . _value :
return None
if not self . _cached_values and isinstance ( self . _value , ( list , tuple ) ) :
ids = [ v . get ( 'id' ) for v in self . _value ]
self . _cached_values = list ( self . _client . parts ( id__in = ',' . join ( ids ) , category = None ) )
return self . _cached_values
|
def get_url_reports ( self , resources ) :
"""Retrieves a scan report on a given URL .
Args :
resources : list of URLs .
Returns :
A dict with the URL as key and the VT report as value ."""
|
api_name = 'virustotal-url-reports'
( all_responses , resources ) = self . _bulk_cache_lookup ( api_name , resources )
resource_chunks = self . _prepare_resource_chunks ( resources , '\n' )
response_chunks = self . _request_reports ( "resource" , resource_chunks , 'url/report' )
self . _extract_response_chunks ( all_responses , response_chunks , api_name )
return all_responses
|
def argument ( * args , ** kwargs ) :
"""Decorator to define an argparse option or argument .
The arguments to this decorator are the same as the
` ArgumentParser . add _ argument < https : / / docs . python . org / 3 / library / argparse . html # the - add - argument - method > ` _
method ."""
|
def decorator ( f ) :
if not hasattr ( f , '_arguments' ) :
f . _arguments = [ ]
if not hasattr ( f , '_argnames' ) :
f . _argnames = [ ]
f . _arguments . append ( ( args , kwargs ) )
f . _argnames . append ( _get_dest ( * args , ** kwargs ) )
return f
return decorator
|
def trim ( self : 'Variable' , lower = None , upper = None ) -> None :
"""Trim the value ( s ) of a | Variable | instance .
Usually , users do not need to apply function | trim | directly .
Instead , some | Variable | subclasses implement their own ` trim `
methods relying on function | trim | . Model developers should
implement individual ` trim ` methods for their | Parameter | or
| Sequence | subclasses when their boundary values depend on the
actual project configuration ( one example is soil moisture ;
its lowest possible value should possibly be zero in all cases ,
but its highest possible value could depend on another parameter
defining the maximum storage capacity ) .
For the following examples , we prepare a simple ( not fully
functional ) | Variable | subclass , making use of function | trim |
without any modifications . Function | trim | works slightly
different for variables handling | float | , | int | , and | bool |
values . We start with the most common content type | float | :
> > > from hydpy . core . variabletools import trim , Variable
> > > class Var ( Variable ) :
. . . NDIM = 0
. . . TYPE = float
. . . SPAN = 1.0 , 3.0
. . . trim = trim
. . . initinfo = 2.0 , False
. . . _ _ hydpy _ _ connect _ variable2subgroup _ _ = None
First , we enable the printing of warning messages raised by function
| trim | :
> > > from hydpy import pub
> > > pub . options . warntrim = True
When not passing boundary values , function | trim | extracts them from
class attribute ` SPAN ` of the given | Variable | instance , if available :
> > > var = Var ( None )
> > > var . value = 2.0
> > > var . trim ( )
> > > var
var ( 2.0)
> > > var . value = 0.0
> > > var . trim ( )
Traceback ( most recent call last ) :
UserWarning : For variable ` var ` at least one value needed to be trimmed . The old and the new value ( s ) are ` 0.0 ` and ` 1.0 ` , respectively .
> > > var
var ( 1.0)
> > > var . value = 4.0
> > > var . trim ( )
Traceback ( most recent call last ) :
UserWarning : For variable ` var ` at least one value needed to be trimmed . The old and the new value ( s ) are ` 4.0 ` and ` 3.0 ` , respectively .
> > > var
var ( 3.0)
In the examples above , outlier values are set to the respective
boundary value , accompanied by suitable warning messages . For very
tiny deviations , which might be due to precision problems only ,
outliers are trimmed but not reported :
> > > var . value = 1.0 - 1e - 15
> > > var = = 1.0
False
> > > trim ( var )
> > > var = = 1.0
True
> > > var . value = 3.0 + 1e - 15
> > > var = = 3.0
False
> > > var . trim ( )
> > > var = = 3.0
True
Use arguments ` lower ` and ` upper ` to override the ( eventually )
available ` SPAN ` entries :
> > > var . trim ( lower = 4.0)
Traceback ( most recent call last ) :
UserWarning : For variable ` var ` at least one value needed to be trimmed . The old and the new value ( s ) are ` 3.0 ` and ` 4.0 ` , respectively .
> > > var . trim ( upper = 3.0)
Traceback ( most recent call last ) :
UserWarning : For variable ` var ` at least one value needed to be trimmed . The old and the new value ( s ) are ` 4.0 ` and ` 3.0 ` , respectively .
Function | trim | interprets both | None | and | numpy . nan | values as if
no boundary value exists :
> > > import numpy
> > > var . value = 0.0
> > > var . trim ( lower = numpy . nan )
> > > var . value = 5.0
> > > var . trim ( upper = numpy . nan )
You can disable function | trim | via option | Options . trimvariables | :
> > > with pub . options . trimvariables ( False ) :
. . . var . value = 5.0
. . . var . trim ( )
> > > var
var ( 5.0)
Alternatively , you can omit the warning messages only :
> > > with pub . options . warntrim ( False ) :
. . . var . value = 5.0
. . . var . trim ( )
> > > var
var ( 3.0)
If a | Variable | subclass does not have ( fixed ) boundaries , give it
either no ` SPAN ` attribute or a | tuple | containing | None | values :
> > > del Var . SPAN
> > > var . value = 5.0
> > > var . trim ( )
> > > var
var ( 5.0)
> > > Var . SPAN = ( None , None )
> > > var . trim ( )
> > > var
var ( 5.0)
The above examples deal with a 0 - dimensional | Variable | subclass .
The following examples repeat the most relevant examples for a
2 - dimensional subclass :
> > > Var . SPAN = 1.0 , 3.0
> > > Var . NDIM = 2
> > > var . shape = 1 , 3
> > > var . values = 2.0
> > > var . trim ( )
> > > var . values = 0.0 , 1.0 , 2.0
> > > var . trim ( )
Traceback ( most recent call last ) :
UserWarning : For variable ` var ` at least one value needed to be trimmed . The old and the new value ( s ) are ` [ [ 0 . 1 . 2 . ] ] ` and ` [ [ 1 . 1 . 2 . ] ] ` , respectively .
> > > var
var ( [ [ 1.0 , 1.0 , 2.0 ] ] )
> > > var . values = 2.0 , 3.0 , 4.0
> > > var . trim ( )
Traceback ( most recent call last ) :
UserWarning : For variable ` var ` at least one value needed to be trimmed . The old and the new value ( s ) are ` [ [ 2 . 3 . 4 . ] ] ` and ` [ [ 2 . 3 . 3 . ] ] ` , respectively .
> > > var
var ( [ [ 2.0 , 3.0 , 3.0 ] ] )
> > > var . values = 1.0-1e - 15 , 2.0 , 3.0 + 1e - 15
> > > var . values = = ( 1.0 , 2.0 , 3.0)
array ( [ [ False , True , False ] ] , dtype = bool )
> > > var . trim ( )
> > > var . values = = ( 1.0 , 2.0 , 3.0)
array ( [ [ True , True , True ] ] , dtype = bool )
> > > var . values = 0.0 , 2.0 , 4.0
> > > var . trim ( lower = numpy . nan , upper = numpy . nan )
> > > var
var ( [ [ 0.0 , 2.0 , 4.0 ] ] )
> > > var . trim ( lower = [ numpy . nan , 3.0 , 3.0 ] )
Traceback ( most recent call last ) :
UserWarning : For variable ` var ` at least one value needed to be trimmed . The old and the new value ( s ) are ` [ [ 0 . 2 . 4 . ] ] ` and ` [ [ 0 . 3 . 3 . ] ] ` , respectively .
> > > var . values = 0.0 , 2.0 , 4.0
> > > var . trim ( upper = [ numpy . nan , 1.0 , numpy . nan ] )
Traceback ( most recent call last ) :
UserWarning : For variable ` var ` at least one value needed to be trimmed . The old and the new value ( s ) are ` [ [ 0 . 2 . 4 . ] ] ` and ` [ [ 1 . 1 . 4 . ] ] ` , respectively .
For | Variable | subclasses handling | float | values , setting outliers
to the respective boundary value might often be an acceptable approach .
However , this is often not the case for subclasses handling | int |
values , which often serve as option flags ( e . g . to enable / disable
a certain hydrological process for different land - use types ) . Hence ,
function | trim | raises an exception instead of a warning and does
not modify the wrong | int | value :
> > > Var . TYPE = int
> > > Var . NDIM = 0
> > > Var . SPAN = 1 , 3
> > > var . value = 2
> > > var . trim ( )
> > > var
var ( 2)
> > > var . value = 0
> > > var . trim ( )
Traceback ( most recent call last ) :
ValueError : The value ` 0 ` of parameter ` var ` of element ` ? ` is not valid .
> > > var
var ( 0)
> > > var . value = 4
> > > var . trim ( )
Traceback ( most recent call last ) :
ValueError : The value ` 4 ` of parameter ` var ` of element ` ? ` is not valid .
> > > var
var ( 4)
> > > from hydpy import INT _ NAN
> > > var . value = 0
> > > var . trim ( lower = 0)
> > > var . trim ( lower = INT _ NAN )
> > > var . value = 4
> > > var . trim ( upper = 4)
> > > var . trim ( upper = INT _ NAN )
> > > Var . SPAN = 1 , None
> > > var . value = 0
> > > var . trim ( )
Traceback ( most recent call last ) :
ValueError : The value ` 0 ` of parameter ` var ` of element ` ? ` is not valid .
> > > var
var ( 0)
> > > Var . SPAN = None , 3
> > > var . value = 0
> > > var . trim ( )
> > > var . value = 4
> > > var . trim ( )
Traceback ( most recent call last ) :
ValueError : The value ` 4 ` of parameter ` var ` of element ` ? ` is not valid .
> > > del Var . SPAN
> > > var . value = 0
> > > var . trim ( )
> > > var . value = 4
> > > var . trim ( )
> > > Var . SPAN = 1 , 3
> > > Var . NDIM = 2
> > > var . shape = ( 1 , 3)
> > > var . values = 2
> > > var . trim ( )
> > > var . values = 0 , 1 , 2
> > > var . trim ( )
Traceback ( most recent call last ) :
ValueError : At least one value of parameter ` var ` of element ` ? ` is not valid .
> > > var
var ( [ [ 0 , 1 , 2 ] ] )
> > > var . values = 2 , 3 , 4
> > > var . trim ( )
Traceback ( most recent call last ) :
ValueError : At least one value of parameter ` var ` of element ` ? ` is not valid .
> > > var
var ( [ [ 2 , 3 , 4 ] ] )
> > > var . values = 0 , 0 , 2
> > > var . trim ( lower = [ 0 , INT _ NAN , 2 ] )
> > > var . values = 2 , 4 , 4
> > > var . trim ( upper = [ 2 , INT _ NAN , 4 ] )
For | bool | values , defining outliers does not make much sense ,
which is why function | trim | does nothing when applied on
variables handling | bool | values :
> > > Var . TYPE = bool
> > > var . trim ( )
If function | trim | encounters an unmanageable type , it raises an
exception like the following :
> > > Var . TYPE = str
> > > var . trim ( )
Traceback ( most recent call last ) :
NotImplementedError : Method ` trim ` can only be applied on parameters handling floating point , integer , or boolean values , but the " value type " of parameter ` var ` is ` str ` .
> > > pub . options . warntrim = False"""
|
if hydpy . pub . options . trimvariables :
if lower is None :
lower = self . SPAN [ 0 ]
if upper is None :
upper = self . SPAN [ 1 ]
type_ = getattr ( self , 'TYPE' , float )
if type_ is float :
if self . NDIM == 0 :
_trim_float_0d ( self , lower , upper )
else :
_trim_float_nd ( self , lower , upper )
elif type_ is int :
if self . NDIM == 0 :
_trim_int_0d ( self , lower , upper )
else :
_trim_int_nd ( self , lower , upper )
elif type_ is bool :
pass
else :
raise NotImplementedError ( f'Method `trim` can only be applied on parameters ' f'handling floating point, integer, or boolean values, ' f'but the "value type" of parameter `{self.name}` is ' f'`{objecttools.classname(self.TYPE)}`.' )
|
def clear_texts ( self ) :
"""stub"""
|
if self . get_texts_metadata ( ) . is_read_only ( ) :
raise NoAccess ( )
self . my_osid_object_form . _my_map [ 'texts' ] = self . _texts_metadata [ 'default_object_values' ] [ 0 ]
|
def in_domain ( self , points ) :
"""Returns ` ` True ` ` if all of the given points are in the domain ,
` ` False ` ` otherwise .
: param np . ndarray points : An ` np . ndarray ` of type ` self . dtype ` .
: rtype : ` bool `"""
|
return all ( [ domain . in_domain ( array ) for domain , array in zip ( self . _domains , separate_struct_array ( points , self . _dtypes ) ) ] )
|
def enable_global_auto_override_decorator ( flag = True , retrospective = True ) :
"""Enables or disables global auto _ override mode via decorators .
See flag global _ auto _ override _ decorator .
In contrast to setting the flag directly , this function provides
a retrospective option . If retrospective is true , this will also
affect already imported modules , not only future imports ."""
|
global global_auto_override_decorator
global_auto_override_decorator = flag
if import_hook_enabled :
_install_import_hook ( )
if global_auto_override_decorator and retrospective :
_catch_up_global_auto_override_decorator ( )
return global_auto_override_decorator
|
def ccmod_ystep ( ) :
"""Do the Y step of the ccmod stage . There are no parameters
or return values because all inputs and outputs are from and to
global variables ."""
|
mAXU = np . mean ( mp_D_X + mp_D_U , axis = 0 )
mp_D_Y [ : ] = mp_dprox ( mAXU )
|
def _paragraph ( self , sentences ) :
"""Generate a paragraph"""
|
paragraph = [ ]
for i in range ( sentences ) :
sentence = self . _sentence ( random . randint ( 5 , 16 ) )
paragraph . append ( sentence )
return ' ' . join ( paragraph )
|
def get_core ( self ) :
"""Get an unsatisfiable core if the formula was previously
unsatisfied ."""
|
if self . maplesat and self . status == False :
return pysolvers . maplechrono_core ( self . maplesat )
|
def build_select ( query_obj ) :
"""Given a Query obj , return the corresponding sql"""
|
return build_select_query ( query_obj . source , query_obj . fields , query_obj . filter , skip = query_obj . skip , limit = query_obj . limit , sort = query_obj . sort , distinct = query_obj . distinct )
|
def show_domain ( self , domain_id ) :
"""This method returns the specified domain .
Required parameters
domain _ id :
Integer or Domain Name ( e . g . domain . com ) , specifies the domain
to display ."""
|
json = self . request ( '/domains/%s' % domain_id , method = 'GET' )
status = json . get ( 'status' )
if status == 'OK' :
domain_json = json . get ( 'domain' )
domain = Domain . from_json ( domain_json )
return domain
else :
message = json . get ( 'message' )
raise DOPException ( '[%s]: %s' % ( status , message ) )
|
def read_price_data ( files , name_func = None ) :
"""Convenience function for reading in pricing data from csv files
Parameters
files : list
List of strings refering to csv files to read data in from , first
column should be dates
name _ func : func
A function to apply to the file strings to infer the instrument name ,
used in the second level of the MultiIndex index . Default is the file
name excluding the pathname and file ending ,
e . g . / path / to / file / name . csv - > name
Returns
A pandas . DataFrame with a pandas . MultiIndex where the top level is
pandas . Timestamps and the second level is instrument names . Columns are
given by the csv file columns ."""
|
if name_func is None :
def name_func ( x ) :
return os . path . split ( x ) [ 1 ] . split ( "." ) [ 0 ]
dfs = [ ]
for f in files :
name = name_func ( f )
df = pd . read_csv ( f , index_col = 0 , parse_dates = True )
df . sort_index ( inplace = True )
df . index = pd . MultiIndex . from_product ( [ df . index , [ name ] ] , names = [ "date" , "contract" ] )
dfs . append ( df )
return pd . concat ( dfs , axis = 0 , sort = False ) . sort_index ( )
|
def get_prefix_dir ( archive ) :
"""Often , all files are in a single directory . If so , they ' ll all have
the same prefix . Determine any such prefix .
archive is a ZipFile"""
|
names = archive . namelist ( )
shortest_name = sorted ( names , key = len ) [ 0 ]
candidate_prefixes = [ shortest_name [ : length ] for length in range ( len ( shortest_name ) , - 1 , - 1 ) ]
for prefix in candidate_prefixes :
if all ( name . startswith ( prefix ) for name in names ) :
return prefix
return ''
|
def mapper_from_prior_arguments ( self , arguments ) :
"""Creates a new model mapper from a dictionary mapping _ matrix existing priors to new priors .
Parameters
arguments : { Prior : Prior }
A dictionary mapping _ matrix priors to priors
Returns
model _ mapper : ModelMapper
A new model mapper with updated priors ."""
|
mapper = copy . deepcopy ( self )
for prior_model_tuple in self . prior_model_tuples :
setattr ( mapper , prior_model_tuple . name , prior_model_tuple . prior_model . gaussian_prior_model_for_arguments ( arguments ) )
return mapper
|
def from_conll ( this_class , stream ) :
"""Construct a Sentence . stream is an iterable over strings where
each string is a line in CoNLL - X format . If there are multiple
sentences in this stream , we only return the first one ."""
|
stream = iter ( stream )
sentence = this_class ( )
for line in stream :
line = line . strip ( )
if line :
sentence . append ( Token . from_conll ( line ) )
elif sentence :
return sentence
return sentence
|
def lu_solve ( LU , b ) :
r"""Solve for LU decomposition .
Solve the linear equations : math : ` \ mathrm A \ mathbf x = \ mathbf b ` ,
given the LU factorization of : math : ` \ mathrm A ` .
Args :
LU ( array _ like ) : LU decomposition .
b ( array _ like ) : Right - hand side .
Returns :
: class : ` numpy . ndarray ` : The solution to the system
: math : ` \ mathrm A \ mathbf x = \ mathbf b ` .
See Also
scipy . linalg . lu _ factor : LU decomposition .
scipy . linalg . lu _ solve : Solve linear equations given LU factorization ."""
|
from scipy . linalg import lu_solve as sp_lu_solve
LU = ( asarray ( LU [ 0 ] , float ) , asarray ( LU [ 1 ] , float ) )
b = asarray ( b , float )
return sp_lu_solve ( LU , b , check_finite = False )
|
def sort_by ( self , * ids ) :
"""Update files order .
: param ids : List of ids specifying the final status of the list ."""
|
# Support sorting by file _ ids or keys .
files = { str ( f_ . file_id ) : f_ . key for f_ in self }
# self . record [ ' _ files ' ] = [ { ' key ' : files . get ( id _ , id _ ) } for id _ in ids ]
self . filesmap = OrderedDict ( [ ( files . get ( id_ , id_ ) , self [ files . get ( id_ , id_ ) ] . dumps ( ) ) for id_ in ids ] )
self . flush ( )
|
def l_constraint ( model , name , constraints , * args ) :
"""A replacement for pyomo ' s Constraint that quickly builds linear
constraints .
Instead of
model . name = Constraint ( index1 , index2 , . . . , rule = f )
call instead
l _ constraint ( model , name , constraints , index1 , index2 , . . . )
where constraints is a dictionary of constraints of the form :
constraints [ i ] = LConstraint object
OR using the soon - to - be - deprecated list format :
constraints [ i ] = [ [ ( coeff1 , var1 ) , ( coeff2 , var2 ) , . . . ] , sense , constant _ term ]
i . e . the first argument is a list of tuples with the variables and their
coefficients , the second argument is the sense string ( must be one of
" = = " , " < = " , " > = " , " > < " ) and the third argument is the constant term
( a float ) . The sense " > < " allows lower and upper bounds and requires
` constant _ term ` to be a 2 - tuple .
Variables may be repeated with different coefficients , which pyomo
will sum up .
Parameters
model : pyomo . environ . ConcreteModel
name : string
Name of constraints to be constructed
constraints : dict
A dictionary of constraints ( see format above )
* args :
Indices of the constraints"""
|
setattr ( model , name , Constraint ( * args , noruleinit = True ) )
v = getattr ( model , name )
for i in v . _index :
c = constraints [ i ]
if type ( c ) is LConstraint :
variables = c . lhs . variables + [ ( - item [ 0 ] , item [ 1 ] ) for item in c . rhs . variables ]
sense = c . sense
constant = c . rhs . constant - c . lhs . constant
else :
variables = c [ 0 ]
sense = c [ 1 ]
constant = c [ 2 ]
v . _data [ i ] = pyomo . core . base . constraint . _GeneralConstraintData ( None , v )
v . _data [ i ] . _body = _build_sum_expression ( variables )
if sense == "==" :
v . _data [ i ] . _equality = True
v . _data [ i ] . _lower = pyomo . core . base . numvalue . NumericConstant ( constant )
v . _data [ i ] . _upper = pyomo . core . base . numvalue . NumericConstant ( constant )
elif sense == "<=" :
v . _data [ i ] . _equality = False
v . _data [ i ] . _lower = None
v . _data [ i ] . _upper = pyomo . core . base . numvalue . NumericConstant ( constant )
elif sense == ">=" :
v . _data [ i ] . _equality = False
v . _data [ i ] . _lower = pyomo . core . base . numvalue . NumericConstant ( constant )
v . _data [ i ] . _upper = None
elif sense == "><" :
v . _data [ i ] . _equality = False
v . _data [ i ] . _lower = pyomo . core . base . numvalue . NumericConstant ( constant [ 0 ] )
v . _data [ i ] . _upper = pyomo . core . base . numvalue . NumericConstant ( constant [ 1 ] )
else :
raise KeyError ( '`sense` must be one of "==","<=",">=","><"; got: {}' . format ( sense ) )
|
def stream ( self , device_sid = values . unset , limit = None , page_size = None ) :
"""Streams KeyInstance records from the API as a generator stream .
This operation lazily loads records as efficiently as possible until the limit
is reached .
The results are returned as a generator , so this operation is memory efficient .
: param unicode device _ sid : Find all Keys authenticating specified Device .
: param int limit : Upper limit for the number of records to return . stream ( )
guarantees to never return more than limit . Default is no limit
: param int page _ size : Number of records to fetch per request , when not set will use
the default value of 50 records . If no page _ size is defined
but a limit is defined , stream ( ) will attempt to read the
limit with the most efficient page size , i . e . min ( limit , 1000)
: returns : Generator that will yield up to limit results
: rtype : list [ twilio . rest . preview . deployed _ devices . fleet . key . KeyInstance ]"""
|
limits = self . _version . read_limits ( limit , page_size )
page = self . page ( device_sid = device_sid , page_size = limits [ 'page_size' ] , )
return self . _version . stream ( page , limits [ 'limit' ] , limits [ 'page_limit' ] )
|
def dot_product_single_head ( q , k , v , gates_q , gates_k , bi ) :
"""Perform a dot product attention on a single sequence on a single head .
This function dispatch the q , k , v and loop over the buckets to compute the
attention dot product on each subsequences .
Args :
q ( tf . Tensor ) : [ length _ q , depth _ q ]
k ( tf . Tensor ) : [ length _ k , depth _ q ]
v ( tf . Tensor ) : [ length _ k , depth _ v ]
gates _ q ( tf . Tensor ) : One - hot vector of shape [ length _ q , nb _ buckets ]
gates _ k ( tf . Tensor ) : One - hot vector of shape [ length _ k , nb _ buckets ]
bi ( BatchInfo ) : Contains the batch coordinates and sequence order
Returns :
tf . Tensor : [ length _ q , depth _ v ]"""
|
nb_buckets = gates_q . get_shape ( ) . as_list ( ) [ - 1 ]
q_dispatcher = expert_utils . SparseDispatcher ( nb_buckets , gates_q )
k_dispatcher = expert_utils . SparseDispatcher ( nb_buckets , gates_k )
def eventually_dispatch ( dispatcher , value ) :
if value is not None :
return dispatcher . dispatch ( value )
return [ None ] * nb_buckets
# Iterate over every dispatched group
list_v_out = [ ]
for ( q_i , k_i , v_i , qbc , qbo , kbc , kbo , ) in zip ( # Dispatch queries , keys and values
q_dispatcher . dispatch ( q ) , k_dispatcher . dispatch ( k ) , k_dispatcher . dispatch ( v ) , # Also dispatch the sequence positions and batch coordinates
eventually_dispatch ( q_dispatcher , bi . coordinates ) , eventually_dispatch ( q_dispatcher , bi . order ) , eventually_dispatch ( k_dispatcher , bi . coordinates ) , eventually_dispatch ( k_dispatcher , bi . order ) , ) :
list_v_out . append ( expert_dot_product ( q_i , k_i , v_i , info_q = BatchInfo ( coordinates = qbc , order = qbo ) , info_k = BatchInfo ( coordinates = kbc , order = kbo ) ) )
# Combine all buckets together to restore the original length
return q_dispatcher . combine ( list_v_out )
|
def compute_n_splits ( cv , X , y = None , groups = None ) :
"""Return the number of splits .
Parameters
cv : BaseCrossValidator
X , y , groups : array _ like , dask object , or None
Returns
n _ splits : int"""
|
if not any ( is_dask_collection ( i ) for i in ( X , y , groups ) ) :
return cv . get_n_splits ( X , y , groups )
if isinstance ( cv , ( _BaseKFold , BaseShuffleSplit ) ) :
return cv . n_splits
elif isinstance ( cv , PredefinedSplit ) :
return len ( cv . unique_folds )
elif isinstance ( cv , _CVIterableWrapper ) :
return len ( cv . cv )
elif isinstance ( cv , ( LeaveOneOut , LeavePOut ) ) and not is_dask_collection ( X ) : # Only ` X ` is referenced for these classes
return cv . get_n_splits ( X , None , None )
elif isinstance ( cv , ( LeaveOneGroupOut , LeavePGroupsOut ) ) and not is_dask_collection ( groups ) : # Only ` groups ` is referenced for these classes
return cv . get_n_splits ( None , None , groups )
else :
return delayed ( cv ) . get_n_splits ( X , y , groups ) . compute ( )
|
def local_open ( url ) :
"""Read a local path , with special support for directories"""
|
scheme , server , path , param , query , frag = urllib . parse . urlparse ( url )
filename = urllib . request . url2pathname ( path )
if os . path . isfile ( filename ) :
return urllib . request . urlopen ( url )
elif path . endswith ( '/' ) and os . path . isdir ( filename ) :
files = [ ]
for f in os . listdir ( filename ) :
filepath = os . path . join ( filename , f )
if f == 'index.html' :
with open ( filepath , 'r' ) as fp :
body = fp . read ( )
break
elif os . path . isdir ( filepath ) :
f += '/'
files . append ( '<a href="{name}">{name}</a>' . format ( name = f ) )
else :
tmpl = ( "<html><head><title>{url}</title>" "</head><body>{files}</body></html>" )
body = tmpl . format ( url = url , files = '\n' . join ( files ) )
status , message = 200 , "OK"
else :
status , message , body = 404 , "Path not found" , "Not found"
headers = { 'content-type' : 'text/html' }
body_stream = six . StringIO ( body )
return urllib . error . HTTPError ( url , status , message , headers , body_stream )
|
def bresenham_circle_octant ( radius ) :
"""Uses Bresenham ' s algorithm to draw a single octant of a circle with thickness 1,
centered on the origin and with the given radius .
: param radius : The radius of the circle to draw
: return : A list of integer coordinates representing pixels .
Starts at ( radius , 0 ) and end with a pixel ( x , y ) where x = = y ."""
|
x , y = radius , 0
r2 = radius * radius
coords = [ ]
while x >= y :
coords . append ( ( x , y ) )
y += 1
if abs ( ( x - 1 ) * ( x - 1 ) + y * y - r2 ) < abs ( x * x + y * y - r2 ) :
x -= 1
# add a point on the line x = y at the end if it ' s not already there .
if coords [ - 1 ] [ 0 ] != coords [ - 1 ] [ 1 ] :
coords . append ( ( coords [ - 1 ] [ 0 ] , coords [ - 1 ] [ 0 ] ) )
return coords
|
def CopyToDateTimeString ( self ) :
"""Copies the FILETIME timestamp to a date and time string .
Returns :
str : date and time value formatted as : " YYYY - MM - DD hh : mm : ss . # # # # # " or
None if the timestamp is missing or invalid ."""
|
if ( self . _timestamp is None or self . _timestamp < 0 or self . _timestamp > self . _UINT64_MAX ) :
return None
timestamp , remainder = divmod ( self . _timestamp , self . _100NS_PER_SECOND )
number_of_days , hours , minutes , seconds = self . _GetTimeValues ( timestamp )
year , month , day_of_month = self . _GetDateValuesWithEpoch ( number_of_days , self . _EPOCH )
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:07d}' . format ( year , month , day_of_month , hours , minutes , seconds , remainder )
|
def reverse ( self , query , exactly_one = DEFAULT_SENTINEL , timeout = DEFAULT_SENTINEL , kind = None , ) :
"""Return an address by location point .
: param query : The coordinates for which you wish to obtain the
closest human - readable addresses .
: type query : : class : ` geopy . point . Point ` , list or tuple of ` ` ( latitude ,
longitude ) ` ` , or string as ` ` " % ( latitude ) s , % ( longitude ) s " ` ` .
: param bool exactly _ one : Return one result or a list of results , if
available .
. . versionchanged : : 1.14.0
Default value for ` ` exactly _ one ` ` was ` ` False ` ` , which differs
from the conventional default across geopy . Please always pass
this argument explicitly , otherwise you would get a warning .
In geopy 2.0 the default value will become ` ` True ` ` .
: param int timeout : Time , in seconds , to wait for the geocoding service
to respond before raising a : class : ` geopy . exc . GeocoderTimedOut `
exception . Set this only if you wish to override , on this call
only , the value set during the geocoder ' s initialization .
: param str kind : Type of toponym . Allowed values : ` house ` , ` street ` , ` metro ` ,
` district ` , ` locality ` .
. . versionadded : : 1.14.0
: rtype : ` ` None ` ` , : class : ` geopy . location . Location ` or a list of them , if
` ` exactly _ one = False ` ` ."""
|
if exactly_one is DEFAULT_SENTINEL :
warnings . warn ( '%s.reverse: default value for `exactly_one` ' 'argument will become True in geopy 2.0. ' 'Specify `exactly_one=False` as the argument ' 'explicitly to get rid of this warning.' % type ( self ) . __name__ , DeprecationWarning , stacklevel = 2 )
exactly_one = False
try :
point = self . _coerce_point_to_string ( query , "%(lon)s,%(lat)s" )
except ValueError :
raise ValueError ( "Must be a coordinate pair or Point" )
params = { 'geocode' : point , 'format' : 'json' }
if self . api_key :
params [ 'apikey' ] = self . api_key
if self . lang :
params [ 'lang' ] = self . lang
if kind :
params [ 'kind' ] = kind
url = "?" . join ( ( self . api , urlencode ( params ) ) )
logger . debug ( "%s.reverse: %s" , self . __class__ . __name__ , url )
return self . _parse_json ( self . _call_geocoder ( url , timeout = timeout ) , exactly_one )
|
def get_locations_list ( self , lower_bound = 0 , upper_bound = None ) :
"""Return the internal location list .
Args :
lower _ bound :
upper _ bound :
Returns :"""
|
real_upper_bound = upper_bound
if upper_bound is None :
real_upper_bound = self . nbr_of_sub_locations ( )
try :
return self . _locations_list [ lower_bound : real_upper_bound ]
except :
return list ( )
|
def want_host_notification ( self , notifways , timeperiods , timestamp , state , n_type , business_impact , cmd = None ) :
"""Check if notification options match the state of the host
: param timestamp : time we want to notify the contact ( usually now )
: type timestamp : int
: param state : host or service state ( " UP " , " DOWN " . . )
: type state : str
: param n _ type : type of notification ( " PROBLEM " , " RECOVERY " . . )
: type n _ type : str
: param business _ impact : impact of this host
: type business _ impact : int
: param cmd : command launch to notify the contact
: type cmd : str
: return : True if contact wants notification , otherwise False
: rtype : bool"""
|
if not self . host_notifications_enabled :
return False
# If we are in downtime , we do not want notification
for downtime in self . downtimes :
if downtime . is_in_effect :
self . in_scheduled_downtime = True
return False
self . in_scheduled_downtime = False
# Now it ' s all for sub notificationways . If one is OK , we are OK
# We will filter in another phase
for notifway_id in self . notificationways :
notifway = notifways [ notifway_id ]
nw_b = notifway . want_host_notification ( timeperiods , timestamp , state , n_type , business_impact , cmd )
if nw_b :
return True
# Oh , nobody . . so NO : )
return False
|
def pitch ( self ) :
"""Calculates the Pitch of the Quaternion ."""
|
x , y , z , w = self . x , self . y , self . z , self . w
return math . atan2 ( 2 * x * w - 2 * y * z , 1 - 2 * x * x - 2 * z * z )
|
def add_codes ( err_cls ) :
"""Add error codes to string messages via class attribute names ."""
|
class ErrorsWithCodes ( object ) :
def __getattribute__ ( self , code ) :
msg = getattr ( err_cls , code )
return '[{code}] {msg}' . format ( code = code , msg = msg )
return ErrorsWithCodes ( )
|
def lines ( self ) :
"""List of file lines ."""
|
if self . _lines is None :
with io . open ( self . path , 'r' , encoding = 'utf-8' ) as fh :
self . _lines = fh . read ( ) . split ( '\n' )
return self . _lines
|
def tcache ( parser , token ) :
"""This will cache the contents of a template fragment for a given amount
of time with support tags .
Usage : :
{ % tcache [ expire _ time ] [ fragment _ name ] [ tags = ' tag1 , tag2 ' ] % }
. . some expensive processing . .
{ % endtcache % }
This tag also supports varying by a list of arguments :
{ % tcache [ expire _ time ] [ fragment _ name ] [ var1 ] [ var2 ] . . [ tags = tags ] % }
. . some expensive processing . .
{ % endtcache % }
Each unique set of arguments will result in a unique cache entry ."""
|
nodelist = parser . parse ( ( 'endtcache' , ) )
parser . delete_first_token ( )
tokens = token . split_contents ( )
if len ( tokens ) < 3 :
raise template . TemplateSyntaxError ( "'%r' tag requires at least 2 arguments." % tokens [ 0 ] )
tags = None
if len ( tokens ) > 3 and 'tags=' in tokens [ - 1 ] :
tags = parser . compile_filter ( tokens [ - 1 ] [ 5 : ] )
del tokens [ - 1 ]
return CacheNode ( nodelist , parser . compile_filter ( tokens [ 1 ] ) , tokens [ 2 ] , # fragment _ name can ' t be a variable .
[ parser . compile_filter ( token ) for token in tokens [ 3 : ] ] , tags )
|
def _bounds ( component , glyph_set ) :
"""Return the ( xmin , ymin ) of the bounds of ` component ` ."""
|
if hasattr ( component , "bounds" ) : # e . g . defcon
return component . bounds [ : 2 ]
elif hasattr ( component , "draw" ) : # e . g . ufoLib2
pen = fontTools . pens . boundsPen . BoundsPen ( glyphSet = glyph_set )
component . draw ( pen )
return pen . bounds [ : 2 ]
else :
raise ValueError ( "Don't know to to compute the bounds of component '{}' " . format ( component ) )
|
def calc_outuh_quh_v1 ( self ) :
"""Calculate the unit hydrograph output ( convolution ) .
Required derived parameters :
| UH |
Required flux sequences :
| Q0 |
| Q1 |
| InUH |
Updated log sequence :
| QUH |
Calculated flux sequence :
| OutUH |
Examples :
Prepare a unit hydrograph with only three ordinates - - -
representing a fast catchment response compared to the selected
step size :
> > > from hydpy . models . hland import *
> > > parameterstep ( ' 1d ' )
> > > derived . uh . shape = 3
> > > derived . uh = 0.3 , 0.5 , 0.2
> > > logs . quh . shape = 3
> > > logs . quh = 1.0 , 3.0 , 0.0
Without new input , the actual output is simply the first value
stored in the logging sequence and the values of the logging
sequence are shifted to the left :
> > > fluxes . inuh = 0.0
> > > model . calc _ outuh _ quh _ v1 ( )
> > > fluxes . outuh
outuh ( 1.0)
> > > logs . quh
quh ( 3.0 , 0.0 , 0.0)
With an new input of 4mm , the actual output consists of the first
value stored in the logging sequence and the input value
multiplied with the first unit hydrograph ordinate . The updated
logging sequence values result from the multiplication of the
input values and the remaining ordinates :
> > > fluxes . inuh = 4.0
> > > model . calc _ outuh _ quh _ v1 ( )
> > > fluxes . outuh
outuh ( 4.2)
> > > logs . quh
quh ( 2.0 , 0.8 , 0.0)
The next example demonstates the updating of non empty logging
sequence :
> > > fluxes . inuh = 4.0
> > > model . calc _ outuh _ quh _ v1 ( )
> > > fluxes . outuh
outuh ( 3.2)
> > > logs . quh
quh ( 2.8 , 0.8 , 0.0)
A unit hydrograph with only one ordinate results in the direct
routing of the input :
> > > derived . uh . shape = 1
> > > derived . uh = 1.0
> > > fluxes . inuh = 0.0
> > > logs . quh . shape = 1
> > > logs . quh = 0.0
> > > model . calc _ outuh _ quh _ v1 ( )
> > > fluxes . outuh
outuh ( 0.0)
> > > logs . quh
quh ( 0.0)
> > > fluxes . inuh = 4.0
> > > model . calc _ outuh _ quh ( )
> > > fluxes . outuh
outuh ( 4.0)
> > > logs . quh
quh ( 0.0)"""
|
der = self . parameters . derived . fastaccess
flu = self . sequences . fluxes . fastaccess
log = self . sequences . logs . fastaccess
flu . outuh = der . uh [ 0 ] * flu . inuh + log . quh [ 0 ]
for jdx in range ( 1 , len ( der . uh ) ) :
log . quh [ jdx - 1 ] = der . uh [ jdx ] * flu . inuh + log . quh [ jdx ]
|
def start_kex ( self ) :
"""Start the GSS - API / SSPI Authenticated Diffie - Hellman Key Exchange ."""
|
self . _generate_x ( )
if self . transport . server_mode : # compute f = g ^ x mod p , but don ' t send it yet
self . f = pow ( self . G , self . x , self . P )
self . transport . _expect_packet ( MSG_KEXGSS_INIT )
return
# compute e = g ^ x mod p ( where g = 2 ) , and send it
self . e = pow ( self . G , self . x , self . P )
# Initialize GSS - API Key Exchange
self . gss_host = self . transport . gss_host
m = Message ( )
m . add_byte ( c_MSG_KEXGSS_INIT )
m . add_string ( self . kexgss . ssh_init_sec_context ( target = self . gss_host ) )
m . add_mpint ( self . e )
self . transport . _send_message ( m )
self . transport . _expect_packet ( MSG_KEXGSS_HOSTKEY , MSG_KEXGSS_CONTINUE , MSG_KEXGSS_COMPLETE , MSG_KEXGSS_ERROR , )
|
def getWindowByTitle ( self , wildcard , order = 0 ) :
"""Returns a handle for the first window that matches the provided " wildcard " regex"""
|
EnumWindowsProc = ctypes . WINFUNCTYPE ( ctypes . c_bool , ctypes . POINTER ( ctypes . c_int ) , ctypes . py_object )
def callback ( hwnd , context ) :
if ctypes . windll . user32 . IsWindowVisible ( hwnd ) :
length = ctypes . windll . user32 . GetWindowTextLengthW ( hwnd )
buff = ctypes . create_unicode_buffer ( length + 1 )
ctypes . windll . user32 . GetWindowTextW ( hwnd , buff , length + 1 )
if re . search ( context [ "wildcard" ] , buff . value , flags = re . I ) != None and not context [ "handle" ] :
if context [ "order" ] > 0 :
context [ "order" ] -= 1
else :
context [ "handle" ] = hwnd
return True
data = { "wildcard" : wildcard , "handle" : None , "order" : order }
ctypes . windll . user32 . EnumWindows ( EnumWindowsProc ( callback ) , ctypes . py_object ( data ) )
return data [ "handle" ]
|
def HuntIDToInt ( hunt_id ) :
"""Convert hunt id string to an integer ."""
|
# TODO ( user ) : This code is only needed for a brief period of time when we
# allow running new rel - db flows with old aff4 - based hunts . In this scenario
# parent _ hunt _ id is effectively not used , but it has to be an
# integer . Stripping " H : " from hunt ids then makes the rel - db happy . Remove
# this code when hunts are rel - db only .
if hunt_id . startswith ( "H:" ) :
hunt_id = hunt_id [ 2 : ]
try :
return int ( hunt_id or "0" , 16 )
except ValueError as e :
raise HuntIDIsNotAnIntegerError ( e )
|
def _find_and_replace ( self , date_string , captures ) :
""": warning : when multiple tz matches exist the last sorted capture will trump
: param date _ string :
: return : date _ string , tz _ string"""
|
# add timezones to replace
cloned_replacements = copy . copy ( REPLACEMENTS )
# don ' t mutate
for tz_string in captures . get ( "timezones" , [ ] ) :
cloned_replacements . update ( { tz_string : " " } )
date_string = date_string . lower ( )
for key , replacement in cloned_replacements . items ( ) : # we really want to match all permutations of the key surrounded by whitespace chars except one
# for example : consider the key = ' to '
# 1 . match ' to '
# 2 . match ' to '
# 3 . match ' to '
# but never match r ' ( \ s | ) to ( \ s | ) ' which would make ' october ' > ' ocber '
date_string = re . sub ( r"(^|\s)" + key + r"(\s|$)" , replacement , date_string , flags = re . IGNORECASE , )
return date_string , self . _pop_tz_string ( sorted ( captures . get ( "timezones" , [ ] ) ) )
|
def stop_workers ( self , clean ) :
"""Stop workers and deferred events ."""
|
with executor_lock :
self . executor . shutdown ( clean )
del self . executor
with self . worker_lock :
if clean :
self . pool . close ( )
else :
self . pool . terminate ( )
self . pool . join ( )
del self . pool
for x in self . events . values ( ) :
x . event . cancel ( )
self . events . clear ( )
|
def Minus ( self , other ) :
"""Returns a new point which is the pointwise subtraction of other from
self ."""
|
return Point ( self . x - other . x , self . y - other . y , self . z - other . z )
|
def _handle_429 ( self , data ) :
"""Handle Lain being helpful"""
|
ex = IOError ( "Too fast" , data )
self . conn . reraise ( ex )
|
def _assemble_and_send_request ( self ) :
"""Fires off the Fedex request .
@ warning : NEVER CALL THIS METHOD DIRECTLY . CALL send _ request ( ) ,
WHICH RESIDES ON FedexBaseService AND IS INHERITED ."""
|
# We get an exception like this when specifying an IntegratorId :
# suds . TypeNotFound : Type not found : ' IntegratorId '
# Setting it to None does not seem to appease it .
del self . ClientDetail . IntegratorId
self . logger . debug ( self . WebAuthenticationDetail )
self . logger . debug ( self . ClientDetail )
self . logger . debug ( self . TransactionDetail )
self . logger . debug ( self . VersionId )
# Fire off the query .
return self . client . service . serviceAvailability ( WebAuthenticationDetail = self . WebAuthenticationDetail , ClientDetail = self . ClientDetail , TransactionDetail = self . TransactionDetail , Version = self . VersionId , Origin = self . Origin , Destination = self . Destination , ShipDate = self . ShipDate , CarrierCode = self . CarrierCode , Service = self . Service , Packaging = self . Packaging )
|
def get_all_paths_from ( self , start , seen = None ) :
'''Return a list of all paths to all nodes from a given start node'''
|
if seen is None :
seen = frozenset ( )
results = [ ( 0 , ( start , ) ) ]
if start in seen or start not in self . edges :
return results
seen = seen | frozenset ( ( start , ) )
for node , edge_weight in self . edges [ start ] . items ( ) :
for subpath_weight , subpath in self . get_all_paths_from ( node , seen ) :
total_weight = edge_weight + subpath_weight
full_path = ( start , ) + subpath
results . append ( ( total_weight , full_path ) )
return tuple ( results )
|
def create_cache ( name ) :
"""Create a cache by name .
Defaults to ` NaiveCache `"""
|
caches = { subclass . name ( ) : subclass for subclass in Cache . __subclasses__ ( ) }
return caches . get ( name , NaiveCache ) ( )
|
def set_lim ( min , max , name ) :
"""Set the domain bounds of the scale associated with the provided key .
Parameters
name : hashable
Any variable that can be used as a key for a dictionary
Raises
KeyError
When no context figure is associated with the provided key ."""
|
scale = _context [ 'scales' ] [ _get_attribute_dimension ( name ) ]
scale . min = min
scale . max = max
return scale
|
def feedback ( self ) :
"""Access the feedback
: returns : twilio . rest . api . v2010 . account . message . feedback . FeedbackList
: rtype : twilio . rest . api . v2010 . account . message . feedback . FeedbackList"""
|
if self . _feedback is None :
self . _feedback = FeedbackList ( self . _version , account_sid = self . _solution [ 'account_sid' ] , message_sid = self . _solution [ 'sid' ] , )
return self . _feedback
|
def stop_trial ( self , trial_id ) :
"""Requests to stop trial by trial _ id ."""
|
response = requests . put ( urljoin ( self . _path , "trials/{}" . format ( trial_id ) ) )
return self . _deserialize ( response )
|
def get_input ( self ) :
"""Loads web input , initialise default values and check / sanitise some inputs from users"""
|
user_input = web . input ( user = [ ] , task = [ ] , aggregation = [ ] , org_tags = [ ] , grade_min = '' , grade_max = '' , sort_by = "submitted_on" , order = '0' , # "0 " for pymongo . DESCENDING , anything else for pymongo . ASCENDING
limit = '' , filter_tags = [ ] , filter_tags_presence = [ ] , date_after = '' , date_before = '' , stat = 'with_stat' , )
# Sanitise inputs
for item in itertools . chain ( user_input . task , user_input . aggregation ) :
if not id_checker ( item ) :
raise web . notfound ( )
if user_input . sort_by not in self . _allowed_sort :
raise web . notfound ( )
digits = [ user_input . grade_min , user_input . grade_max , user_input . order , user_input . limit ]
for d in digits :
if d != '' and not d . isdigit ( ) :
raise web . notfound ( )
return user_input
|
def start ( ** kwargs ) :
'''Start KodeDrive daemon .'''
|
output , err = cli_syncthing_adapter . start ( ** kwargs )
click . echo ( "%s" % output , err = err )
|
def check_rules_dict ( rules ) :
"""Verify the ` rules ` that classes may use for the ` _ rules ` or
` _ binary _ rules ` class attribute .
Specifically , ` rules ` must be a
: class : ` ~ collections . OrderedDict ` - compatible object
( list of key - value tuples , : class : ` dict ` ,
: class : ` ~ collections . OrderedDict ` )
that maps a rule name ( : class : ` str ` ) to a rule . Each rule consists of a
: class : ` ~ qnet . algebra . pattern _ matching . Pattern ` and a replaceent callable .
The Pattern must be set up to match a
: class : ` ~ qnet . algebra . pattern _ matching . ProtoExpr ` . That is ,
the Pattern should be constructed through the
: func : ` ~ qnet . algebra . pattern _ matching . pattern _ head ` routine .
Raises :
TypeError : If ` rules ` is not compatible with
: class : ` ~ collections . OrderedDict ` , the
keys in ` rules ` are not strings , or rule is not a tuple of
( : class : ` ~ qnet . algebra . pattern _ matching . Pattern ` , ` callable ` )
ValueError : If the ` head ` - attribute of each Pattern is not an instance
of : class : ` ~ qnet . algebra . pattern _ matching . ProtoExpr ` , or if there
are duplicate keys in ` rules `
Returns :
: class : ` ~ collections . OrderedDict ` of rules"""
|
from qnet . algebra . pattern_matching import Pattern , ProtoExpr
if hasattr ( rules , 'items' ) :
items = rules . items ( )
# ` rules ` is already a dict / OrderedDict
else :
items = rules
# ` rules ` is a list of ( key , value ) tuples
keys = set ( )
for key_rule in items :
try :
key , rule = key_rule
except ValueError :
raise TypeError ( "rules does not contain (key, rule) tuples" )
if not isinstance ( key , str ) :
raise TypeError ( "Key '%s' is not a string" % key )
if key in keys :
raise ValueError ( "Duplicate key '%s'" % key )
else :
keys . add ( key )
try :
pat , replacement = rule
except TypeError :
raise TypeError ( "Rule in '%s' is not a (pattern, replacement) tuple" % key )
if not isinstance ( pat , Pattern ) :
raise TypeError ( "Pattern in '%s' is not a Pattern instance" % key )
if pat . head is not ProtoExpr :
raise ValueError ( "Pattern in '%s' does not match a ProtoExpr" % key )
if not callable ( replacement ) :
raise ValueError ( "replacement in '%s' is not callable" % key )
else :
arg_names = inspect . signature ( replacement ) . parameters . keys ( )
if not arg_names == pat . wc_names :
raise ValueError ( "arguments (%s) of replacement function differ from the " "wildcard names (%s) in pattern" % ( ", " . join ( sorted ( arg_names ) ) , ", " . join ( sorted ( pat . wc_names ) ) ) )
return OrderedDict ( rules )
|
def get_delivery_stats ( api_key = None , secure = None , test = None , ** request_args ) :
'''Get delivery stats for your Postmark account .
: param api _ key : Your Postmark API key . Required , if ` test ` is not ` True ` .
: param secure : Use the https scheme for the Postmark API .
Defaults to ` True `
: param test : Use the Postmark Test API . Defaults to ` False ` .
: param \ * \ * request _ args : Keyword arguments to pass to
: func : ` requests . request ` .
: rtype : : class : ` DeliveryStatsResponse `'''
|
return _default_delivery_stats . get ( api_key = api_key , secure = secure , test = test , ** request_args )
|
def read_stream ( self , stream_id , since_epoch ) :
'''get datafeed'''
|
response , status_code = self . __agent__ . Messages . get_v4_stream_sid_message ( sessionToken = self . __session__ , keyManagerToken = self . __keymngr__ , sid = stream_id , since = since_epoch ) . result ( )
self . logger . debug ( '%s: %s' % ( status_code , response ) )
return status_code , response
|
def provision_product ( AcceptLanguage = None , ProductId = None , ProvisioningArtifactId = None , PathId = None , ProvisionedProductName = None , ProvisioningParameters = None , Tags = None , NotificationArns = None , ProvisionToken = None ) :
"""Requests a Provision of a specified product . A ProvisionedProduct is a resourced instance for a product . For example , provisioning a CloudFormation - template - backed product results in launching a CloudFormation stack and all the underlying resources that come with it .
You can check the status of this request using the DescribeRecord operation .
See also : AWS API Documentation
: example : response = client . provision _ product (
AcceptLanguage = ' string ' ,
ProductId = ' string ' ,
ProvisioningArtifactId = ' string ' ,
PathId = ' string ' ,
ProvisionedProductName = ' string ' ,
ProvisioningParameters = [
' Key ' : ' string ' ,
' Value ' : ' string '
Tags = [
' Key ' : ' string ' ,
' Value ' : ' string '
NotificationArns = [
' string ' ,
ProvisionToken = ' string '
: type AcceptLanguage : string
: param AcceptLanguage : The language code to use for this operation . Supported language codes are as follows :
' en ' ( English )
' jp ' ( Japanese )
' zh ' ( Chinese )
If no code is specified , ' en ' is used as the default .
: type ProductId : string
: param ProductId : [ REQUIRED ]
The product identifier .
: type ProvisioningArtifactId : string
: param ProvisioningArtifactId : [ REQUIRED ]
The provisioning artifact identifier for this product .
: type PathId : string
: param PathId : The identifier of the path for this product ' s provisioning . This value is optional if the product has a default path , and is required if there is more than one path for the specified product .
: type ProvisionedProductName : string
: param ProvisionedProductName : [ REQUIRED ]
A user - friendly name to identify the ProvisionedProduct object . This value must be unique for the AWS account and cannot be updated after the product is provisioned .
: type ProvisioningParameters : list
: param ProvisioningParameters : Parameters specified by the administrator that are required for provisioning the product .
( dict ) - - The arameter key / value pairs used to provision a product .
Key ( string ) - - The ProvisioningArtifactParameter . ParameterKey parameter from DescribeProvisioningParameters .
Value ( string ) - - The value to use for provisioning . Any constraints on this value can be found in ProvisioningArtifactParameter for Key .
: type Tags : list
: param Tags : A list of tags to use as provisioning options .
( dict ) - - Key / value pairs to associate with this provisioning . These tags are entirely discretionary and are propagated to the resources created in the provisioning .
Key ( string ) - - [ REQUIRED ] The ProvisioningArtifactParameter . TagKey parameter from DescribeProvisioningParameters .
Value ( string ) - - [ REQUIRED ] The esired value for this key .
: type NotificationArns : list
: param NotificationArns : Passed to CloudFormation . The SNS topic ARNs to which to publish stack - related events .
( string ) - -
: type ProvisionToken : string
: param ProvisionToken : [ REQUIRED ]
An idempotency token that uniquely identifies the provisioning request .
This field is autopopulated if not provided .
: rtype : dict
: return : {
' RecordDetail ' : {
' RecordId ' : ' string ' ,
' ProvisionedProductName ' : ' string ' ,
' Status ' : ' IN _ PROGRESS ' | ' SUCCEEDED ' | ' ERROR ' ,
' CreatedTime ' : datetime ( 2015 , 1 , 1 ) ,
' UpdatedTime ' : datetime ( 2015 , 1 , 1 ) ,
' ProvisionedProductType ' : ' string ' ,
' RecordType ' : ' string ' ,
' ProvisionedProductId ' : ' string ' ,
' ProductId ' : ' string ' ,
' ProvisioningArtifactId ' : ' string ' ,
' PathId ' : ' string ' ,
' RecordErrors ' : [
' Code ' : ' string ' ,
' Description ' : ' string '
' RecordTags ' : [
' Key ' : ' string ' ,
' Value ' : ' string '"""
|
pass
|
def _seconds_have_elapsed ( token , num_seconds ) :
"""Tests if ' num _ seconds ' have passed since ' token ' was requested .
Not strictly thread - safe - may log with the wrong frequency if called
concurrently from multiple threads . Accuracy depends on resolution of
' timeit . default _ timer ( ) ' .
Always returns True on the first call for a given ' token ' .
Args :
token : The token for which to look up the count .
num _ seconds : The number of seconds to test for .
Returns :
Whether it has been > = ' num _ seconds ' since ' token ' was last requested ."""
|
now = timeit . default_timer ( )
then = _log_timer_per_token . get ( token , None )
if then is None or ( now - then ) >= num_seconds :
_log_timer_per_token [ token ] = now
return True
else :
return False
|
def node_received_infos ( node_id ) :
"""Get all the infos a node has been sent and has received .
You must specify the node id in the url .
You can also pass the info type ."""
|
exp = Experiment ( session )
# get the parameters
info_type = request_parameter ( parameter = "info_type" , parameter_type = "known_class" , default = models . Info )
if type ( info_type ) == Response :
return info_type
# check the node exists
node = models . Node . query . get ( node_id )
if node is None :
return error_response ( error_type = "/node/infos, node {} does not exist" . format ( node_id ) )
# execute the request :
infos = node . received_infos ( type = info_type )
try : # ping the experiment
exp . info_get_request ( node = node , infos = infos )
session . commit ( )
except Exception :
return error_response ( error_type = "info_get_request error" , status = 403 , participant = node . participant , )
return success_response ( infos = [ i . __json__ ( ) for i in infos ] )
|
def averageSize ( self ) :
"""Calculate the average size of a mesh .
This is the mean of the vertex distances from the center of mass ."""
|
cm = self . centerOfMass ( )
coords = self . coordinates ( copy = False )
if not len ( coords ) :
return 0
s , c = 0.0 , 0.0
n = len ( coords )
step = int ( n / 10000.0 ) + 1
for i in np . arange ( 0 , n , step ) :
s += utils . mag ( coords [ i ] - cm )
c += 1
return s / c
|
def _get_mapping ( self , section ) :
'''mapping will take the section name from a Singularity recipe
and return a map function to add it to the appropriate place .
Any lines that don ' t cleanly map are assumed to be comments .
Parameters
section : the name of the Singularity recipe section
Returns
function : to map a line to its command group ( e . g . , install )'''
|
# Ensure section is lowercase
section = section . lower ( )
mapping = { "environment" : self . _env , "comments" : self . _comments , "runscript" : self . _run , "labels" : self . _labels , "setup" : self . _setup , "files" : self . _files , "from" : self . _from , "post" : self . _post , "test" : self . _test , "help" : self . _comments }
if section in mapping :
return mapping [ section ]
return self . _comments
|
def people ( self ) :
"""Retrieve all people of the company
: return : list of people objects
: rtype : list"""
|
return fields . ListField ( name = HightonConstants . PEOPLE , init_class = Person ) . decode ( self . element_from_string ( self . _get_request ( endpoint = self . ENDPOINT + '/' + str ( self . id ) + '/people' , ) . text ) )
|
def create_eventhub ( self , ** kwargs ) :
"""todo make it so the client can be customised to publish / subscribe
Creates an instance of eventhub service"""
|
eventhub = predix . admin . eventhub . EventHub ( ** kwargs )
eventhub . create ( )
eventhub . add_to_manifest ( self )
eventhub . grant_client ( client_id = self . get_client_id ( ) , ** kwargs )
eventhub . add_to_manifest ( self )
return eventhub
|
def sign ( ctx , filename ) :
"""Sign a json - formatted transaction"""
|
if filename :
tx = filename . read ( )
else :
tx = sys . stdin . read ( )
tx = TransactionBuilder ( eval ( tx ) , bitshares_instance = ctx . bitshares )
tx . appendMissingSignatures ( )
tx . sign ( )
print_tx ( tx . json ( ) )
|
def get_dot_atom_text ( value ) :
"""dot - text = 1 * atext * ( " . " 1 * atext )"""
|
dot_atom_text = DotAtomText ( )
if not value or value [ 0 ] in ATOM_ENDS :
raise errors . HeaderParseError ( "expected atom at a start of " "dot-atom-text but found '{}'" . format ( value ) )
while value and value [ 0 ] not in ATOM_ENDS :
token , value = get_atext ( value )
dot_atom_text . append ( token )
if value and value [ 0 ] == '.' :
dot_atom_text . append ( DOT )
value = value [ 1 : ]
if dot_atom_text [ - 1 ] is DOT :
raise errors . HeaderParseError ( "expected atom at end of dot-atom-text " "but found '{}'" . format ( '.' + value ) )
return dot_atom_text , value
|
def bundle ( self , bundle_id , channel = None ) :
'''Get the default data for a bundle .
@ param bundle _ id The bundle ' s id .
@ param channel Optional channel name .'''
|
return self . entity ( bundle_id , get_files = True , channel = channel )
|
def delete_policy_set ( self , policy_set_id ) :
"""Delete a specific policy set by id . Method is idempotent ."""
|
uri = self . _get_policy_set_uri ( guid = policy_set_id )
return self . service . _delete ( uri )
|
def database_clone ( targetcall , databasepath , complete = False ) :
"""Checks to see if the database has already been downloaded . If not , runs the system call to
download the database , and writes stdout and stderr to the logfile
: param targetcall : system call to download , and possibly set - up the database
: param databasepath : absolute path of the database
: param complete : boolean variable to determine whether the complete file should be created"""
|
# Create a file to store the logs ; it will be used to determine if the database was downloaded and set - up
completefile = os . path . join ( databasepath , 'complete' )
# Run the system call if the database is not already downloaded
if not os . path . isfile ( completefile ) :
out , err = run_subprocess ( targetcall )
if complete : # Create the database completeness assessment file and populate it with the out and err streams
with open ( completefile , 'w' ) as complete :
complete . write ( out )
complete . write ( err )
|
def height_map ( lookup , height_stops , default_height = 0.0 ) :
"""Return a height value ( in meters ) interpolated from given height _ stops ;
for use with vector - based visualizations using fill - extrusion layers"""
|
# if no height _ stops , use default height
if len ( height_stops ) == 0 :
return default_height
# dictionary to lookup height from match - type height _ stops
match_map = dict ( ( x , y ) for ( x , y ) in height_stops )
# if lookup matches stop exactly , return corresponding height ( first priority )
# ( includes non - numeric height _ stop " keys " for finding height by match )
if lookup in match_map . keys ( ) :
return match_map . get ( lookup )
# if lookup value numeric , map height by interpolating from height scale
if isinstance ( lookup , ( int , float , complex ) ) : # try ordering stops
try :
stops , heights = zip ( * sorted ( height_stops ) )
# if not all stops are numeric , attempt looking up as if categorical stops
except TypeError :
return match_map . get ( lookup , default_height )
# for interpolation , all stops must be numeric
if not all ( isinstance ( x , ( int , float , complex ) ) for x in stops ) :
return default_height
# check if lookup value in stops bounds
if float ( lookup ) <= stops [ 0 ] :
return heights [ 0 ]
elif float ( lookup ) >= stops [ - 1 ] :
return heights [ - 1 ]
# check if lookup value matches any stop value
elif float ( lookup ) in stops :
return heights [ stops . index ( lookup ) ]
# interpolation required
else : # identify bounding height stop values
lower = max ( [ stops [ 0 ] ] + [ x for x in stops if x < lookup ] )
upper = min ( [ stops [ - 1 ] ] + [ x for x in stops if x > lookup ] )
# heights from bounding stops
lower_height = heights [ stops . index ( lower ) ]
upper_height = heights [ stops . index ( upper ) ]
# compute linear " relative distance " from lower bound height to upper bound height
distance = ( lookup - lower ) / ( upper - lower )
# return string representing rgb height value
return lower_height + distance * ( upper_height - lower_height )
# default height value catch - all
return default_height
|
def docs ( ** kwargs ) :
"""Annotate the decorated view function with the specified Swagger
attributes .
Usage :
. . code - block : : python
from aiohttp import web
@ docs ( tags = [ ' my _ tag ' ] ,
summary = ' Test method summary ' ,
description = ' Test method description ' ,
parameters = [ {
' in ' : ' header ' ,
' name ' : ' X - Request - ID ' ,
' schema ' : { ' type ' : ' string ' , ' format ' : ' uuid ' } ,
' required ' : ' true '
async def index ( request ) :
return web . json _ response ( { ' msg ' : ' done ' , ' data ' : { } } )"""
|
def wrapper ( func ) :
kwargs [ "produces" ] = [ "application/json" ]
if not hasattr ( func , "__apispec__" ) :
func . __apispec__ = { "parameters" : [ ] , "responses" : { } }
extra_parameters = kwargs . pop ( "parameters" , [ ] )
extra_responses = kwargs . pop ( "responses" , { } )
func . __apispec__ [ "parameters" ] . extend ( extra_parameters )
func . __apispec__ [ "responses" ] . update ( extra_responses )
func . __apispec__ . update ( kwargs )
return func
return wrapper
|
def init_indexes ( self ) :
"""Create indexes for schemas ."""
|
state = self . app_state
for name , schema in self . schemas . items ( ) :
if current_app . testing :
storage = TestingStorage ( )
else :
index_path = ( Path ( state . whoosh_base ) / name ) . absolute ( )
if not index_path . exists ( ) :
index_path . mkdir ( parents = True )
storage = FileStorage ( str ( index_path ) )
if storage . index_exists ( name ) :
index = FileIndex ( storage , schema , name )
else :
index = FileIndex . create ( storage , schema , name )
state . indexes [ name ] = index
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.