signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def get_erasure_profile ( service , name ) :
""": param service : six . string _ types . The Ceph user name to run the command under
: param name :
: return :"""
|
try :
out = check_output ( [ 'ceph' , '--id' , service , 'osd' , 'erasure-code-profile' , 'get' , name , '--format=json' ] )
if six . PY3 :
out = out . decode ( 'UTF-8' )
return json . loads ( out )
except ( CalledProcessError , OSError , ValueError ) :
return None
|
def create_apppool ( name ) :
'''Create an IIS application pool .
. . note : :
This function only validates against the application pool name , and will
return True even if the application pool already exists with a different
configuration . It will not modify the configuration of an existing
application pool .
Args :
name ( str ) : The name of the IIS application pool .
Returns :
bool : True if successful , otherwise False
CLI Example :
. . code - block : : bash
salt ' * ' win _ iis . create _ apppool name = ' MyTestPool ' '''
|
current_apppools = list_apppools ( )
apppool_path = r'IIS:\AppPools\{0}' . format ( name )
if name in current_apppools :
log . debug ( "Application pool '%s' already present." , name )
return True
ps_cmd = [ 'New-Item' , '-Path' , r"'{0}'" . format ( apppool_path ) ]
cmd_ret = _srvmgr ( ps_cmd )
if cmd_ret [ 'retcode' ] != 0 :
msg = 'Unable to create application pool: {0}\nError: {1}' '' . format ( name , cmd_ret [ 'stderr' ] )
raise CommandExecutionError ( msg )
log . debug ( 'Application pool created successfully: %s' , name )
return True
|
def p_l_comma_l ( self , p ) :
'''l : expression COMMA l'''
|
_LOGGER . debug ( "l -> expresion , l" )
if p [ 3 ] . type != TypedClass . LIST :
raise TypeError ( "list expected" )
l = TypedList ( [ p [ 1 ] ] + p [ 3 ] . value )
p [ 0 ] = l
|
def _add_new_init_method ( cls ) :
"""Replace the existing cls . _ _ init _ _ ( ) method with a new one
which calls the original one and in addition performs the
following actions :
(1 ) Finds all instances of tohu . BaseGenerator in the namespace
and collects them in the dictionary ` self . field _ gens ` .
(2 ) . . to do . ."""
|
orig_init = cls . __init__
def new_init_method ( self , * args , ** kwargs ) :
logger . debug ( f"Initialising new {self} (type: {type(self)})" )
# Call original _ _ init _ _ function to ensure we pick up
# any tohu generators that are defined there .
logger . debug ( f" orig_init: {orig_init}" )
orig_init ( self , * args , ** kwargs )
# Find field generator templates and spawn them to create
# field generators for the new custom generator instance .
field_gens_templates = find_field_generator_templates ( self )
logger . debug ( f'Found {len(field_gens_templates)} field generator template(s):' )
debug_print_dict ( field_gens_templates )
logger . debug ( 'Spawning field generator templates...' )
origs = { }
spawned = { }
dependency_mapping = { }
for ( name , gen ) in field_gens_templates . items ( ) :
origs [ name ] = gen
spawned [ name ] = gen . spawn ( dependency_mapping )
logger . debug ( f'Adding dependency mapping: {gen} -> {spawned[name]}' )
self . field_gens = spawned
self . __dict__ . update ( self . field_gens )
logger . debug ( f'Spawned field generators attached to custom generator instance:' )
debug_print_dict ( self . field_gens )
# Add seed generator
# self . seed _ generator = SeedGenerator ( )
# Create class for the items produced by this generator
self . __class__ . item_cls = make_item_class_for_custom_generator_class ( self )
cls . __init__ = new_init_method
|
def remap_label_indexers ( data_obj , indexers , method = None , tolerance = None ) :
"""Given an xarray data object and label based indexers , return a mapping
of equivalent location based indexers . Also return a mapping of updated
pandas index objects ( in case of multi - index level drop ) ."""
|
if method is not None and not isinstance ( method , str ) :
raise TypeError ( '``method`` must be a string' )
pos_indexers = { }
new_indexes = { }
dim_indexers = get_dim_indexers ( data_obj , indexers )
for dim , label in dim_indexers . items ( ) :
try :
index = data_obj . indexes [ dim ]
except KeyError : # no index for this dimension : reuse the provided labels
if method is not None or tolerance is not None :
raise ValueError ( 'cannot supply ``method`` or ``tolerance`` ' 'when the indexed dimension does not have ' 'an associated coordinate.' )
pos_indexers [ dim ] = label
else :
idxr , new_idx = convert_label_indexer ( index , label , dim , method , tolerance )
pos_indexers [ dim ] = idxr
if new_idx is not None :
new_indexes [ dim ] = new_idx
return pos_indexers , new_indexes
|
def get_validation_description ( view , method ) :
"""Returns validation description in format :
# # # Validation :
validate method docstring
* field1 name
* field1 validation docstring
* field2 name
* field2 validation docstring"""
|
if method not in ( 'PUT' , 'PATCH' , 'POST' ) or not hasattr ( view , 'get_serializer' ) :
return ''
serializer = view . get_serializer ( )
description = ''
if hasattr ( serializer , 'validate' ) and serializer . validate . __doc__ is not None :
description += formatting . dedent ( smart_text ( serializer . validate . __doc__ ) )
for field in serializer . fields . values ( ) :
if not hasattr ( serializer , 'validate_' + field . field_name ) :
continue
field_validation = getattr ( serializer , 'validate_' + field . field_name )
if field_validation . __doc__ is not None :
docstring = formatting . dedent ( smart_text ( field_validation . __doc__ ) ) . replace ( '\n' , '\n\t' )
field_description = '* %s\n * %s' % ( field . field_name , docstring )
description += '\n' + field_description if description else field_description
return '### Validation:\n' + description if description else ''
|
def xyz_to_cielab ( xyz , refwhite ) :
"""Convert CIE XYZ color values to CIE L * a * b * .
* xyz * should be of shape ( * , 3 ) . * refwhite * is the reference white value , of
shape ( 3 , ) .
Return value will have same shape as * xyz * , but be in CIE L * a * b *
coordinates ."""
|
norm = xyz / refwhite
pow = norm ** 0.333333333333333
scale = 7.787037 * norm + 16. / 116
mapped = np . where ( norm > 0.008856 , pow , scale )
cielab = np . empty_like ( xyz )
cielab [ ... , L ] = 116 * mapped [ ... , Y ] - 16
cielab [ ... , A ] = 500 * ( mapped [ ... , X ] - mapped [ ... , Y ] )
cielab [ ... , B ] = 200 * ( mapped [ ... , Y ] - mapped [ ... , Z ] )
return cielab
|
def get_xblock_settings ( self , default = None ) :
"""Gets XBlock - specific settigns for current XBlock
Returns default if settings service is not available .
Parameters :
default - default value to be used in two cases :
* No settings service is available
* As a ` default ` parameter to ` SettingsService . get _ settings _ bucket `"""
|
settings_service = self . runtime . service ( self , "settings" )
if settings_service :
return settings_service . get_settings_bucket ( self , default = default )
return default
|
def get_redis ( ** options ) :
"""if no options defined , then it ' ll use settings options
# unix _ socket _ path = ' / tmp / redis . sock '
connection _ pool = { ' host ' : ' localhost ' , ' port ' : 6379}
# if test after created redis client object
test _ first = False"""
|
from uliweb import settings
from uliweb . utils . common import log
import redis
options = ( options or { } )
options . update ( settings . REDIS )
if 'unix_socket_path' in options :
client = redis . Redis ( unix_socket_path = options [ 'unix_socket_path' ] )
else :
global __connection_pool__
if not __connection_pool__ or __connection_pool__ [ 0 ] != options [ 'connection_pool' ] :
d = { 'host' : 'localhost' , 'port' : 6379 }
d . update ( options [ 'connection_pool' ] )
__connection_pool__ = ( d , redis . ConnectionPool ( ** d ) )
client = redis . Redis ( connection_pool = __connection_pool__ [ 1 ] )
if settings . REDIS . test_first :
try :
client . info ( )
except Exception as e :
log . exception ( e )
client = None
return client
|
def addCodedValue ( self , name , code ) :
"""adds a coded value to the domain
Inputs :
name - name of the domain
code - value"""
|
i = { "name" : name , "code" : code }
if i not in self . _codedValues :
self . _codedValues . append ( i )
|
def reduce_tashkeel ( text ) :
"""Reduce the Tashkeel , by deleting evident cases .
@ param text : the input text fully vocalized .
@ type text : unicode .
@ return : partially vocalized text .
@ rtype : unicode ."""
|
patterns = [ # delete all fathat , except on waw and yeh
u"(?<!(%s|%s))(%s|%s)" % ( WAW , YEH , SUKUN , FATHA ) , # delete damma if followed by waw .
u"%s(?=%s)" % ( DAMMA , WAW ) , # delete kasra if followed by yeh .
u"%s(?=%s)" % ( KASRA , YEH ) , # delete fatha if followed by alef to reduce yeh maftouha
# and waw maftouha before alef .
u"%s(?=%s)" % ( FATHA , ALEF ) , # delete fatha from yeh and waw if they are in the word begining .
u"(?<=\s(%s|%s))%s" % ( WAW , YEH , FATHA ) , # delete kasra if preceded by Hamza below alef .
u"(?<=%s)%s" % ( ALEF_HAMZA_BELOW , KASRA ) , ]
reduced = text
for pat in patterns :
reduced = re . sub ( pat , '' , reduced )
return reduced
|
def status ( self , name = '' ) :
"""Return a list of the statuses of the ` name ` service , or
if name is omitted , a list of the status of all services for this
specific init system .
There should be a standardization around the status fields .
There currently isn ' t .
` self . services ` is set in ` base . py `"""
|
super ( SystemD , self ) . status ( name = name )
svc_list = sh . systemctl ( '--no-legend' , '--no-pager' , t = 'service' )
svcs_info = [ self . _parse_service_info ( svc ) for svc in svc_list ]
if name :
names = ( name , name + '.service' )
# return list of one item for specific service
svcs_info = [ s for s in svcs_info if s [ 'name' ] in names ]
self . services [ 'services' ] = svcs_info
return self . services
|
def clear ( self ) :
"""Restart with a clean config"""
|
self . _config = configparser . RawConfigParser ( )
# Override config from command line even if we modify the config file and live reload it .
self . _override_config = { }
self . read_config ( )
|
def verify ( self , type_ ) :
"""Check whether a type implements ` ` self ` ` .
Parameters
type _ : type
The type to check .
Raises
TypeError
If ` ` type _ ` ` doesn ' t conform to our interface .
Returns
None"""
|
raw_missing , mistyped , mismatched = self . _diff_signatures ( type_ )
# See if we have defaults for missing methods .
missing = [ ]
defaults_to_use = { }
for name in raw_missing :
try :
defaults_to_use [ name ] = self . _defaults [ name ] . implementation
except KeyError :
missing . append ( name )
if not any ( ( missing , mistyped , mismatched ) ) :
return defaults_to_use
raise self . _invalid_implementation ( type_ , missing , mistyped , mismatched )
|
def geodetic2geocentric ( theta , alt ) :
"""Conversion from geodetic to geocentric coordinates by using the WGS84 spheroid .
: param theta : colatitude ( float , rad )
: param alt : altitude ( float , km )
: return gccolat : geocentric colatitude ( float , rad )
d : gccolat minus theta ( float , rad )
r : geocentric radius ( float , km )"""
|
ct = np . cos ( theta )
st = np . sin ( theta )
a2 = 40680631.6
b2 = 40408296.0
one = a2 * st * st
two = b2 * ct * ct
three = one + two
rho = np . sqrt ( three )
r = np . sqrt ( alt * ( alt + 2.0 * rho ) + ( a2 * one + b2 * two ) / three )
cd = ( alt + rho ) / r
sd = ( a2 - b2 ) / rho * ct * st / r
one = ct
ct = ct * cd - st * sd
st = st * cd + one * sd
gccolat = np . arctan2 ( st , ct )
d = np . arctan2 ( sd , cd )
return gccolat , d , r
|
def set_sleep ( minutes ) :
'''Sets the amount of idle time until the machine sleeps . Sets the same value
for Computer , Display , and Hard Disk . Pass " Never " or " Off " for computers
that should never sleep .
: param minutes : Can be an integer between 1 and 180 or " Never " or " Off "
: ptype : int , str
: return : True if successful , False if not
: rtype : bool
CLI Example :
. . code - block : : bash
salt ' * ' power . set _ sleep 120
salt ' * ' power . set _ sleep never'''
|
value = _validate_sleep ( minutes )
cmd = 'systemsetup -setsleep {0}' . format ( value )
salt . utils . mac_utils . execute_return_success ( cmd )
state = [ ]
for check in ( get_computer_sleep , get_display_sleep , get_harddisk_sleep ) :
state . append ( salt . utils . mac_utils . confirm_updated ( value , check , ) )
return all ( state )
|
def event_return ( events ) :
'''Return event to Pg server
Requires that configuration be enabled via ' event _ return '
option in master config .'''
|
with _get_serv ( events , commit = True ) as cur :
for event in events :
tag = event . get ( 'tag' , '' )
data = event . get ( 'data' , '' )
sql = '''INSERT INTO salt_events (tag, data, master_id, alter_time)
VALUES (%s, %s, %s, to_timestamp(%s))'''
cur . execute ( sql , ( tag , psycopg2 . extras . Json ( data ) , __opts__ [ 'id' ] , time . time ( ) ) )
|
def dynamic_load ( name ) :
"""Equivalent of " from X import Y " statement using dot notation to specify
what to import and return . For example , foo . bar . thing returns the item
" thing " in the module " foo . bar " """
|
pieces = name . split ( '.' )
item = pieces [ - 1 ]
mod_name = '.' . join ( pieces [ : - 1 ] )
mod = __import__ ( mod_name , globals ( ) , locals ( ) , [ item ] )
return getattr ( mod , item )
|
def port_number_range ( prange ) :
"""Port number range validation and expansion ."""
|
# first , try it as a normal port number
try :
return port_number ( prange )
except ValueError :
pass
# then , consider it as a range with the format " x - y " and expand it
try :
bounds = list ( map ( int , re . match ( r'^(\d+)\-(\d+)$' , prange ) . groups ( ) ) )
if bounds [ 0 ] > bounds [ 1 ] :
raise AttributeError ( )
except ( AttributeError , TypeError ) :
raise ValueError ( "Bad port number range" )
return list ( range ( bounds [ 0 ] , bounds [ 1 ] + 1 ) )
|
def all_floating_ips ( self ) :
"""Lists all of the Floating IPs available on the account ."""
|
if self . api_version == 2 :
json = self . request ( '/floating_ips' )
return json [ 'floating_ips' ]
else :
raise DoError ( v2_api_required_str )
|
def _track ( metric_type , metric_call , metric_kwargs , name , description , labels , registry , before = None ) :
"""Internal method decorator logic .
: param metric _ type : the type of the metric from the ` prometheus _ client ` library
: param metric _ call : the invocation to execute as a callable with ` ( metric , time ) `
: param metric _ kwargs : additional keyword arguments for creating the metric
: param name : the name of the metric
: param description : the description of the metric
: param labels : a dictionary of ` { labelname : callable _ or _ value } ` for labels
: param before : an optional callable to invoke before executing the
request handler method accepting the single ` metric ` argument
: param registry : the Prometheus Registry to use"""
|
if labels is not None and not isinstance ( labels , dict ) :
raise TypeError ( 'labels needs to be a dictionary of {labelname: callable}' )
label_names = labels . keys ( ) if labels else tuple ( )
parent_metric = metric_type ( name , description , labelnames = label_names , registry = registry , ** metric_kwargs )
def argspec ( func ) :
if hasattr ( inspect , 'getfullargspec' ) :
return inspect . getfullargspec ( func )
else :
return inspect . getargspec ( func )
def label_value ( f ) :
if not callable ( f ) :
return lambda x : f
if argspec ( f ) . args :
return lambda x : f ( x )
else :
return lambda x : f ( )
label_generator = tuple ( ( key , label_value ( call ) ) for key , call in labels . items ( ) ) if labels else tuple ( )
def get_metric ( response ) :
if label_names :
return parent_metric . labels ( ** { key : call ( response ) for key , call in label_generator } )
else :
return parent_metric
def decorator ( f ) :
@ functools . wraps ( f )
def func ( * args , ** kwargs ) :
if before :
metric = get_metric ( None )
before ( metric )
else :
metric = None
start_time = default_timer ( )
try :
response = f ( * args , ** kwargs )
except HTTPException as ex :
response = ex
except Exception as ex :
response = make_response ( 'Exception: %s' % ex , 500 )
total_time = max ( default_timer ( ) - start_time , 0 )
if not metric :
response_for_metric = response
if not isinstance ( response , Response ) :
if request . endpoint == f . __name__ : # we are in a request handler method
response_for_metric = make_response ( response )
metric = get_metric ( response_for_metric )
metric_call ( metric , time = total_time )
return response
return func
return decorator
|
def update_service_profile ( self , service_profile , body ) :
"""Update a Neutron service profile ."""
|
return self . put ( self . service_profile_path % ( service_profile ) , body = body )
|
def reconnect ( self , logfile = None , max_timeout = 360 , force_discovery = False , tracefile = None , retry = True ) :
"""Reconnect to the device .
It can be called when after device reloads or the session was
disconnected either by device or jumphost . If multiple jumphosts are used then ` reconnect ` starts from
the last valid connection .
Args :
logfile ( file ) : Optional file descriptor for session logging . The file must be open for write .
The session is logged only if ` ` log _ session = True ` ` was passed to the constructor .
It the parameter is None the default * session . log * file is created in ` log _ dir ` .
max _ timeout ( int ) : This is the maximum amount of time during the session tries to reconnect . It may take
longer depending on the TELNET or SSH default timeout .
force _ discovery ( Bool ) : Optional . If True the device discover process will start after getting connected .
tracefile ( file ) : Optional file descriptor for condoor logging . The file must be open for write .
It the parameter is None the default * condoor . log * file is created in ` log _ dir ` .
retry ( bool ) : Optional parameter causing the connnection to retry until timeout
Raises :
ConnectionError : If the discovery method was not called first or there was a problem with getting
the connection .
ConnectionAuthenticationError : If the authentication failed .
ConnectionTimeoutError : If the connection timeout happened ."""
|
self . _enable_logging ( logfile , tracefile )
self . log ( "-" * 20 )
self . log ( "Condoor Version {}" . format ( __version__ ) )
self . log ( "Cache filename: {}" . format ( _CACHE_FILE ) )
self . connection_chains = [ Chain ( self , url_list ) for url_list in normalize_urls ( self . _urls ) ]
self . log ( "Connecting" )
self . _clear_cache ( ) if force_discovery else self . _read_cache ( )
excpt = ConnectionError ( "Could not (re)connect to the device" )
chains = len ( self . connection_chains )
chain_indices = self . _chain_indices ( )
for index , chain in enumerate ( self . connection_chains , start = 1 ) :
self . emit_message ( "Connection chain {}/{}: {}" . format ( index , chains , str ( chain ) ) , log_level = logging . INFO )
self . emit_message ( "Trying to (re)connect within {} seconds" . format ( max_timeout ) , log_level = logging . INFO )
sleep_time = 0
begin = time . time ( )
attempt = 1
elapsed = 0
while max_timeout - elapsed > 0 : # there is no reason to wait for another chain .
if ( attempt - 1 ) % len ( self . connection_chains ) != 0 :
sleep_time = 2
if sleep_time > 0 :
self . emit_message ( "Waiting {:.0f}s before next connection attempt" . format ( sleep_time ) , log_level = logging . INFO )
time . sleep ( sleep_time )
elapsed = time . time ( ) - begin
try :
index = chain_indices [ 0 ]
self . emit_message ( "Connection chain/attempt [{}/{}]" . format ( index + 1 , attempt ) , log_level = logging . INFO )
chain = self . connection_chains [ index ]
self . _last_chain_index = index
if chain . connect ( ) :
break
except ( ConnectionTimeoutError , ConnectionError ) as e : # pylint : disable = invalid - name
chain . disconnect ( )
self . emit_message ( "Connection error: {}" . format ( e ) , log_level = logging . INFO )
chain_indices . rotate ( - 1 )
excpt = e
except Exception as e :
self . log ( "Exception hit: {}" . format ( str ( e ) ) )
chain . disconnect ( )
self . emit_message ( "Connection error: {}" . format ( e ) , log_level = logging . INFO )
chain_indices . rotate ( - 1 )
excpt = e
finally : # TODO : Make a configuration parameter
elapsed = time . time ( ) - begin
sleep_time = min ( 30 , max_timeout - elapsed )
self . emit_message ( "Time elapsed {:.0f}s/{:.0f}s" . format ( elapsed , max_timeout ) , log_level = logging . INFO )
attempt += 1
if attempt > len ( self . connection_chains ) and not retry :
self . emit_message ( "Unable to (re)connect within {:.0f}s" . format ( elapsed ) , log_level = logging . ERROR )
self . _clear_cache ( )
self . _disable_logging ( )
raise excpt
else :
self . emit_message ( "Unable to (re)connect within {:.0f}s" . format ( elapsed ) , log_level = logging . ERROR )
self . _clear_cache ( )
self . _disable_logging ( )
raise excpt
self . _write_cache ( )
self . emit_message ( "Target device connected in {:.0f}s." . format ( elapsed ) , log_level = logging . INFO )
self . log ( "-" * 20 )
|
def write ( self , file ) :
"""Write the image to the open file object .
See ` . save ( ) ` if you have a filename .
In general , you can only call this method once ;
after it has been called the first time the PNG image is written ,
the source data will have been streamed , and
cannot be streamed again ."""
|
w = Writer ( ** self . info )
w . write ( file , self . rows )
|
def mad ( y_true , y_pred ) :
"""Median absolute deviation"""
|
y_true , y_pred = _mask_nan ( y_true , y_pred )
return np . mean ( np . abs ( y_true - y_pred ) )
|
def _parse_caps_cpu ( node ) :
'''Parse the < cpu > element of the domain capabilities'''
|
result = { }
for mode in node . findall ( 'mode' ) :
if not mode . get ( 'supported' ) == 'yes' :
continue
name = mode . get ( 'name' )
if name == 'host-passthrough' :
result [ name ] = True
elif name == 'host-model' :
host_model = { }
model_node = mode . find ( 'model' )
if model_node is not None :
model = { 'name' : model_node . text }
vendor_id = model_node . get ( 'vendor_id' )
if vendor_id :
model [ 'vendor_id' ] = vendor_id
fallback = model_node . get ( 'fallback' )
if fallback :
model [ 'fallback' ] = fallback
host_model [ 'model' ] = model
vendor = mode . find ( 'vendor' ) . text if mode . find ( 'vendor' ) is not None else None
if vendor :
host_model [ 'vendor' ] = vendor
features = { feature . get ( 'name' ) : feature . get ( 'policy' ) for feature in mode . findall ( 'feature' ) }
if features :
host_model [ 'features' ] = features
result [ name ] = host_model
elif name == 'custom' :
custom_model = { }
models = { model . text : model . get ( 'usable' ) for model in mode . findall ( 'model' ) }
if models :
custom_model [ 'models' ] = models
result [ name ] = custom_model
return result
|
def mles ( self ) :
"""return the maximum likelihood estimates for each of the energy bins"""
|
mle_vals = np . ndarray ( ( self . _nx ) )
for i in range ( self . _nx ) :
mle_vals [ i ] = self . _loglikes [ i ] . mle ( )
return mle_vals
|
def getSegmentOnCell ( self , c , i , segIdx ) :
"""Return the segment on cell ( c , i ) with index sidx .
Returns the segment as following list :
[ [ segmentID , sequenceSegmentFlag , frequency ] ,
[ col1 , idx1 , perm1 ] ,
[ col2 , idx2 , perm2 ] , . . ."""
|
return self . cells [ c ] [ i ] [ segIdx ]
|
def send_message ( self , opcode , message ) :
"""Send a message to the peer over the socket .
: param int opcode : The opcode for the message to send .
: param bytes message : The message data to send ."""
|
if not isinstance ( message , bytes ) :
message = message . encode ( 'utf-8' )
length = len ( message )
if not select . select ( [ ] , [ self . handler . wfile ] , [ ] , 0 ) [ 1 ] :
self . logger . error ( 'the socket is not ready for writing' )
self . close ( )
return
buffer = b''
buffer += struct . pack ( 'B' , 0x80 + opcode )
if length <= 125 :
buffer += struct . pack ( 'B' , length )
elif 126 <= length <= 65535 :
buffer += struct . pack ( '>BH' , 126 , length )
else :
buffer += struct . pack ( '>BQ' , 127 , length )
buffer += message
self . _last_sent_opcode = opcode
self . lock . acquire ( )
try :
self . handler . wfile . write ( buffer )
self . handler . wfile . flush ( )
except Exception :
self . logger . error ( 'an error occurred while sending a message' , exc_info = True )
self . close ( )
finally :
self . lock . release ( )
|
def prox_lim ( xy , step , boundary = None ) :
"""Proximal projection operator"""
|
if boundary == "circle" :
return prox_circle ( xy , step )
if boundary == "line" :
return prox_line ( xy , step )
# default : do nothing
return xy
|
def config_from_prefix ( prefix ) :
"""Get config from zmq prefix"""
|
settings = { }
if prefix . lower ( ) in ( 'default' , 'auto' , '' ) :
settings [ 'zmq_prefix' ] = ''
settings [ 'libzmq_extension' ] = False
settings [ 'no_libzmq_extension' ] = False
elif prefix . lower ( ) in ( 'bundled' , 'extension' ) :
settings [ 'zmq_prefix' ] = ''
settings [ 'libzmq_extension' ] = True
settings [ 'no_libzmq_extension' ] = False
else :
settings [ 'zmq_prefix' ] = prefix
settings [ 'libzmq_extension' ] = False
settings [ 'no_libzmq_extension' ] = True
return settings
|
def activate_firmware_and_wait ( self , rollback_override = None , timeout = 2 , interval = 1 ) :
"""Activate the new uploaded firmware and wait for
long running command ."""
|
try :
self . activate_firmware ( rollback_override )
except CompletionCodeError as e :
if e . cc == CC_LONG_DURATION_CMD_IN_PROGRESS :
self . wait_for_long_duration_command ( constants . CMDID_HPM_ACTIVATE_FIRMWARE , timeout , interval )
else :
raise HpmError ( 'activate_firmware CC=0x%02x' % e . cc )
except IpmiTimeoutError : # controller is in reset and flashed new firmware
pass
|
def request ( self , target ) :
"""Delete a configuration datastore .
* target * specifies the name or URL of configuration datastore to delete
: seealso : : ref : ` srctarget _ params `"""
|
node = new_ele ( "delete-config" )
node . append ( util . datastore_or_url ( "target" , target , self . _assert ) )
return self . _request ( node )
|
def getProvince ( self , default = None ) :
"""Return the Province from the Physical or Postal Address"""
|
physical_address = self . getPhysicalAddress ( ) . get ( "state" , default )
postal_address = self . getPostalAddress ( ) . get ( "state" , default )
return physical_address or postal_address
|
def search_uris ( self , uri , threat_types , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""This method is used to check whether a URI is on a given threatList .
Example :
> > > from google . cloud import webrisk _ v1beta1
> > > from google . cloud . webrisk _ v1beta1 import enums
> > > client = webrisk _ v1beta1 . WebRiskServiceV1Beta1Client ( )
> > > # TODO : Initialize ` uri ` :
> > > uri = ' '
> > > # TODO : Initialize ` threat _ types ` :
> > > threat _ types = [ ]
> > > response = client . search _ uris ( uri , threat _ types )
Args :
uri ( str ) : The URI to be checked for matches .
threat _ types ( list [ ~ google . cloud . webrisk _ v1beta1 . types . ThreatType ] ) : Required . The ThreatLists to search in .
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
A : class : ` ~ google . cloud . webrisk _ v1beta1 . types . SearchUrisResponse ` instance .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid ."""
|
# Wrap the transport method to add retry and timeout logic .
if "search_uris" not in self . _inner_api_calls :
self . _inner_api_calls [ "search_uris" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . search_uris , default_retry = self . _method_configs [ "SearchUris" ] . retry , default_timeout = self . _method_configs [ "SearchUris" ] . timeout , client_info = self . _client_info , )
request = webrisk_pb2 . SearchUrisRequest ( uri = uri , threat_types = threat_types )
return self . _inner_api_calls [ "search_uris" ] ( request , retry = retry , timeout = timeout , metadata = metadata )
|
def set_config ( config ) :
"""Set bigchaindb . config equal to the default config dict ,
then update that with whatever is in the provided config dict ,
and then set bigchaindb . config [ ' CONFIGURED ' ] = True
Args :
config ( dict ) : the config dict to read for changes
to the default config
Note :
Any previous changes made to ` ` bigchaindb . config ` ` will be lost ."""
|
# Deep copy the default config into bigchaindb . config
bigchaindb . config = copy . deepcopy ( bigchaindb . _config )
# Update the default config with whatever is in the passed config
update ( bigchaindb . config , update_types ( config , bigchaindb . config ) )
bigchaindb . config [ 'CONFIGURED' ] = True
|
def Browser ( driver_name = "firefox" , * args , ** kwargs ) :
"""Returns a driver instance for the given name .
When working with ` ` firefox ` ` , it ' s possible to provide a profile name
and a list of extensions .
If you don ' t provide any driver _ name , then ` ` firefox ` ` will be used .
If there is no driver registered with the provided ` ` driver _ name ` ` , this
function will raise a : class : ` splinter . exceptions . DriverNotFoundError `
exception ."""
|
try :
driver = _DRIVERS [ driver_name ]
except KeyError :
raise DriverNotFoundError ( "No driver for %s" % driver_name )
return driver ( * args , ** kwargs )
|
def write ( file , system ) :
"""Write data in system to a dm file"""
|
# TODO : Check for bugs ! ! !
out = list ( )
out . append ( '# DOME format version 1.0' )
ppl = 7
# parameter per line
retval = True
dev_list = sorted ( system . devman . devices )
for dev in dev_list :
model = system . __dict__ [ dev ]
if not model . n :
continue
out . append ( '' )
header = dev + ', '
space = ' ' * ( len ( dev ) + 2 )
keys = list ( model . _data . keys ( ) )
keys . extend ( [ 'name' , 'idx' ] )
keys = sorted ( keys )
# remove non - existent keys
for key in keys :
if key not in model . __dict__ . keys ( ) :
keys . pop ( key )
nline = int ( ceil ( len ( keys ) / ppl ) )
nelement = model . n
vals = [ '' ] * len ( keys )
# for each element , read values
for elem in range ( nelement ) :
for idx , key in enumerate ( keys ) :
if model . _flags [ 'sysbase' ] and key in model . _store . keys ( ) :
val = model . _store [ key ] [ elem ]
else :
val = model . __dict__ [ key ] [ elem ]
if isinstance ( val , float ) :
val = round ( val , 5 )
elif isinstance ( val , str ) :
val = '"{}"' . format ( val )
elif isinstance ( val , list ) :
val = list ( val )
val = '; ' . join ( str ( i ) for i in val )
val = '[{}]' . format ( val )
elif val is None :
val = 0
vals [ idx ] = val
pair = [ ]
for key , val in zip ( keys , vals ) :
pair . append ( '{} = {}' . format ( key , val ) )
for line in range ( nline ) :
string = ', ' . join ( pair [ ppl * line : ppl * ( line + 1 ) ] )
if line == 0 : # append header or space
string = header + string
else :
string = space + string
if not line == nline - 1 : # add comma except for last line
string += ','
out . append ( string )
fid = open ( file , 'w' )
for line in out :
fid . write ( line + '\n' )
fid . close ( )
return retval
|
def TableDescriptionParser ( table_description , depth = 0 ) :
"""Parses the table _ description object for internal use .
Parses the user - submitted table description into an internal format used
by the Python DataTable class . Returns the flat list of parsed columns .
Args :
table _ description : A description of the table which should comply
with one of the formats described below .
depth : Optional . The depth of the first level in the current description .
Used by recursive calls to this function .
Returns :
List of columns , where each column represented by a dictionary with the
keys : id , label , type , depth , container which means the following :
- id : the id of the column
- name : The name of the column
- type : The datatype of the elements in this column . Allowed types are
described in ColumnTypeParser ( ) .
- depth : The depth of this column in the table description
- container : ' dict ' , ' iter ' or ' scalar ' for parsing the format easily .
- custom _ properties : The custom properties for this column .
The returned description is flattened regardless of how it was given .
Raises :
DataTableException : Error in a column description or in the description
structure .
Examples :
A column description can be of the following forms :
' id '
( ' id ' , )
( ' id ' , ' type ' )
( ' id ' , ' type ' , ' label ' )
( ' id ' , ' type ' , ' label ' , { ' custom _ prop1 ' : ' custom _ val1 ' } )
or as a dictionary :
' id ' : ' type '
' id ' : ( ' type ' , )
' id ' : ( ' type ' , ' label ' )
' id ' : ( ' type ' , ' label ' , { ' custom _ prop1 ' : ' custom _ val1 ' } )
If the type is not specified , we treat it as string .
If no specific label is given , the label is simply the id .
If no custom properties are given , we use an empty dictionary .
input : [ ( ' a ' , ' date ' ) , ( ' b ' , ' timeofday ' , ' b ' , { ' foo ' : ' bar ' } ) ]
output : [ { ' id ' : ' a ' , ' label ' : ' a ' , ' type ' : ' date ' ,
' depth ' : 0 , ' container ' : ' iter ' , ' custom _ properties ' : { } } ,
{ ' id ' : ' b ' , ' label ' : ' b ' , ' type ' : ' timeofday ' ,
' depth ' : 0 , ' container ' : ' iter ' ,
' custom _ properties ' : { ' foo ' : ' bar ' } } ]
input : { ' a ' : [ ( ' b ' , ' number ' ) , ( ' c ' , ' string ' , ' column c ' ) ] }
output : [ { ' id ' : ' a ' , ' label ' : ' a ' , ' type ' : ' string ' ,
' depth ' : 0 , ' container ' : ' dict ' , ' custom _ properties ' : { } } ,
{ ' id ' : ' b ' , ' label ' : ' b ' , ' type ' : ' number ' ,
' depth ' : 1 , ' container ' : ' iter ' , ' custom _ properties ' : { } } ,
{ ' id ' : ' c ' , ' label ' : ' column c ' , ' type ' : ' string ' ,
' depth ' : 1 , ' container ' : ' iter ' , ' custom _ properties ' : { } } ]
input : { ( ' a ' , ' number ' , ' column a ' ) : { ' b ' : ' number ' , ' c ' : ' string ' } }
output : [ { ' id ' : ' a ' , ' label ' : ' column a ' , ' type ' : ' number ' ,
' depth ' : 0 , ' container ' : ' dict ' , ' custom _ properties ' : { } } ,
{ ' id ' : ' b ' , ' label ' : ' b ' , ' type ' : ' number ' ,
' depth ' : 1 , ' container ' : ' dict ' , ' custom _ properties ' : { } } ,
{ ' id ' : ' c ' , ' label ' : ' c ' , ' type ' : ' string ' ,
' depth ' : 1 , ' container ' : ' dict ' , ' custom _ properties ' : { } } ]
input : { ( ' w ' , ' string ' , ' word ' ) : ( ' c ' , ' number ' , ' count ' ) }
output : [ { ' id ' : ' w ' , ' label ' : ' word ' , ' type ' : ' string ' ,
' depth ' : 0 , ' container ' : ' dict ' , ' custom _ properties ' : { } } ,
{ ' id ' : ' c ' , ' label ' : ' count ' , ' type ' : ' number ' ,
' depth ' : 1 , ' container ' : ' scalar ' , ' custom _ properties ' : { } } ]
input : { ' a ' : ( ' number ' , ' column a ' ) , ' b ' : ( ' string ' , ' column b ' ) }
output : [ { ' id ' : ' a ' , ' label ' : ' column a ' , ' type ' : ' number ' , ' depth ' : 0,
' container ' : ' dict ' , ' custom _ properties ' : { } } ,
{ ' id ' : ' b ' , ' label ' : ' column b ' , ' type ' : ' string ' , ' depth ' : 0,
' container ' : ' dict ' , ' custom _ properties ' : { } }
NOTE : there might be ambiguity in the case of a dictionary representation
of a single column . For example , the following description can be parsed
in 2 different ways : { ' a ' : ( ' b ' , ' c ' ) } can be thought of a single column
with the id ' a ' , of type ' b ' and the label ' c ' , or as 2 columns : one named
' a ' , and the other named ' b ' of type ' c ' . We choose the first option by
default , and in case the second option is the right one , it is possible to
make the key into a tuple ( i . e . { ( ' a ' , ) : ( ' b ' , ' c ' ) } ) or add more info
into the tuple , thus making it look like this : { ' a ' : ( ' b ' , ' c ' , ' b ' , { } ) }
- - second ' b ' is the label , and { } is the custom properties field ."""
|
# For the recursion step , we check for a scalar object ( string or tuple )
if isinstance ( table_description , ( six . string_types , tuple ) ) :
parsed_col = DataTable . ColumnTypeParser ( table_description )
parsed_col [ "depth" ] = depth
parsed_col [ "container" ] = "scalar"
return [ parsed_col ]
# Since it is not scalar , table _ description must be iterable .
if not hasattr ( table_description , "__iter__" ) :
raise DataTableException ( "Expected an iterable object, got %s" % type ( table_description ) )
if not isinstance ( table_description , dict ) : # We expects a non - dictionary iterable item .
columns = [ ]
for desc in table_description :
parsed_col = DataTable . ColumnTypeParser ( desc )
parsed_col [ "depth" ] = depth
parsed_col [ "container" ] = "iter"
columns . append ( parsed_col )
if not columns :
raise DataTableException ( "Description iterable objects should not" " be empty." )
return columns
# The other case is a dictionary
if not table_description :
raise DataTableException ( "Empty dictionaries are not allowed inside" " description" )
# To differentiate between the two cases of more levels below or this is
# the most inner dictionary , we consider the number of keys ( more then one
# key is indication for most inner dictionary ) and the type of the key and
# value in case of only 1 key ( if the type of key is string and the type of
# the value is a tuple of 0-3 items , we assume this is the most inner
# dictionary ) .
# NOTE : this way of differentiating might create ambiguity . See docs .
if ( len ( table_description ) != 1 or ( isinstance ( next ( six . iterkeys ( table_description ) ) , six . string_types ) and isinstance ( next ( six . itervalues ( table_description ) ) , tuple ) and len ( next ( six . itervalues ( table_description ) ) ) < 4 ) ) : # This is the most inner dictionary . Parsing types .
columns = [ ]
# We sort the items , equivalent to sort the keys since they are unique
for key , value in sorted ( table_description . items ( ) ) : # We parse the column type as ( key , type ) or ( key , type , label ) using
# ColumnTypeParser .
if isinstance ( value , tuple ) :
parsed_col = DataTable . ColumnTypeParser ( ( key , ) + value )
else :
parsed_col = DataTable . ColumnTypeParser ( ( key , value ) )
parsed_col [ "depth" ] = depth
parsed_col [ "container" ] = "dict"
columns . append ( parsed_col )
return columns
# This is an outer dictionary , must have at most one key .
parsed_col = DataTable . ColumnTypeParser ( sorted ( table_description . keys ( ) ) [ 0 ] )
parsed_col [ "depth" ] = depth
parsed_col [ "container" ] = "dict"
return ( [ parsed_col ] + DataTable . TableDescriptionParser ( sorted ( table_description . values ( ) ) [ 0 ] , depth = depth + 1 ) )
|
def set_ratio ( self , new_ratio ) :
"""Set a new conversion ratio immediately ."""
|
from samplerate . lowlevel import src_set_ratio
return src_set_ratio ( self . _state , new_ratio )
|
def _deserialize ( s , proto ) : # type : ( bytes , _ Proto ) - > _ Proto
'''Parse bytes into a in - memory proto
@ params
s is bytes containing serialized proto
proto is a in - memory proto object
@ return
The proto instance filled in by s'''
|
if not isinstance ( s , bytes ) :
raise ValueError ( 'Parameter s must be bytes, but got type: {}' . format ( type ( s ) ) )
if not ( hasattr ( proto , 'ParseFromString' ) and callable ( proto . ParseFromString ) ) :
raise ValueError ( 'No ParseFromString method is detected. ' '\ntype is {}' . format ( type ( proto ) ) )
decoded = cast ( Optional [ int ] , proto . ParseFromString ( s ) )
if decoded is not None and decoded != len ( s ) :
raise google . protobuf . message . DecodeError ( "Protobuf decoding consumed too few bytes: {} out of {}" . format ( decoded , len ( s ) ) )
return proto
|
def umount ( self , all = False , force = True ) :
"""unmount container filesystem
: param all : bool , option to unmount all mounted containers
: param force : bool , force the unmounting of specified containers ' root file system
: return : str , the output from cmd"""
|
# FIXME : handle error if unmount didn ' t work
options = [ ]
if force :
options . append ( '--force' )
if all :
options . append ( '--all' )
cmd = [ "podman" , "umount" ] + options + [ self . get_id ( ) if not all else "" ]
return run_cmd ( cmd , return_output = True )
|
def _finalize_block_blob ( self , ud , metadata ) : # type : ( Uploader , blobxfer . models . upload . Descriptor , dict ) - > None
"""Finalize Block blob
: param Uploader self : this
: param blobxfer . models . upload . Descriptor ud : upload descriptor
: param dict metadata : metadata dict"""
|
if not ud . entity . is_encrypted and ud . must_compute_md5 :
digest = blobxfer . util . base64_encode_as_string ( ud . md5 . digest ( ) )
else :
digest = None
blobxfer . operations . azure . blob . block . put_block_list ( ud . entity , ud . last_block_num , digest , metadata )
if blobxfer . util . is_not_empty ( ud . entity . replica_targets ) :
for ase in ud . entity . replica_targets :
blobxfer . operations . azure . blob . block . put_block_list ( ase , ud . last_block_num , digest , metadata )
|
def run_cmds_on_all_switches ( self , cmds ) :
"""Runs all cmds on all configured switches
This helper is used for ACL and rule creation / deletion as ACLs
and rules must exist on all switches ."""
|
for switch in self . _switches . values ( ) :
self . run_openstack_sg_cmds ( cmds , switch )
|
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) :
"""Generic Output Location Read from File Method"""
|
# Assign file extension attribute to file object
self . fileExtension = extension
# Open file and parse into a data structure
with open ( path , 'r' ) as f :
for line in f :
sline = line . strip ( ) . split ( )
if len ( sline ) == 1 :
self . numLocations = sline [ 0 ]
else : # Create GSSHAPY OutputLocation object
location = OutputLocation ( linkOrCellI = sline [ 0 ] , nodeOrCellJ = sline [ 1 ] )
# Associate OutputLocation with OutputLocationFile
location . outputLocationFile = self
|
def _handle_exception ( self , sender , exception = None ) :
"""Actual code handling the exception and sending it to honeybadger if it ' s enabled .
: param T sender : the object sending the exception event .
: param Exception exception : the exception to handle ."""
|
honeybadger . notify ( exception )
if self . reset_context_after_request :
self . _reset_context ( )
|
def is_all_field_none ( self ) :
""": rtype : bool"""
|
if self . _id_ is not None :
return False
if self . _created is not None :
return False
if self . _updated is not None :
return False
if self . _label_user_creator is not None :
return False
if self . _content is not None :
return False
return True
|
def _parseElfHeader ( self , data ) :
"""Returns the elf header"""
|
ehdr = self . __classes . EHDR . from_buffer ( data )
return EhdrData ( header = ehdr )
|
def chunks ( items , chunksize ) :
"""Turn generator sequence into sequence of chunks ."""
|
items = iter ( items )
for first in items :
chunk = chain ( ( first , ) , islice ( items , chunksize - 1 ) )
yield chunk
deque ( chunk , 0 )
|
def make_movie ( structures , output_filename = "movie.mp4" , zoom = 1.0 , fps = 20 , bitrate = "10000k" , quality = 1 , ** kwargs ) :
"""Generate a movie from a sequence of structures using vtk and ffmpeg .
Args :
structures ( [ Structure ] ) : sequence of structures
output _ filename ( str ) : filename for structure output . defaults to
movie . mp4
zoom ( float ) : A zoom to be applied to the visualizer . Defaults to 1.0.
fps ( int ) : Frames per second for the movie . Defaults to 20.
bitrate ( str ) : Video bitate . Defaults to " 10000k " ( fairly high
quality ) .
quality ( int ) : A quality scale . Defaults to 1.
\\ * \\ * kwargs : Any kwargs supported by StructureVis to modify the images
generated ."""
|
vis = StructureVis ( ** kwargs )
vis . show_help = False
vis . redraw ( )
vis . zoom ( zoom )
sigfig = int ( math . floor ( math . log10 ( len ( structures ) ) ) + 1 )
filename = "image{0:0" + str ( sigfig ) + "d}.png"
for i , s in enumerate ( structures ) :
vis . set_structure ( s )
vis . write_image ( filename . format ( i ) , 3 )
filename = "image%0" + str ( sigfig ) + "d.png"
args = [ "ffmpeg" , "-y" , "-i" , filename , "-q:v" , str ( quality ) , "-r" , str ( fps ) , "-b:v" , str ( bitrate ) , output_filename ]
subprocess . Popen ( args )
|
def _construct_axes_from_arguments ( self , args , kwargs , require_all = False , sentinel = None ) :
"""Construct and returns axes if supplied in args / kwargs .
If require _ all , raise if all axis arguments are not supplied
return a tuple of ( axes , kwargs ) .
sentinel specifies the default parameter when an axis is not
supplied ; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning ."""
|
# construct the args
args = list ( args )
for a in self . _AXIS_ORDERS : # if we have an alias for this axis
alias = self . _AXIS_IALIASES . get ( a )
if alias is not None :
if a in kwargs :
if alias in kwargs :
raise TypeError ( "arguments are mutually exclusive " "for [%s,%s]" % ( a , alias ) )
continue
if alias in kwargs :
kwargs [ a ] = kwargs . pop ( alias )
continue
# look for a argument by position
if a not in kwargs :
try :
kwargs [ a ] = args . pop ( 0 )
except IndexError :
if require_all :
raise TypeError ( "not enough/duplicate arguments " "specified!" )
axes = { a : kwargs . pop ( a , sentinel ) for a in self . _AXIS_ORDERS }
return axes , kwargs
|
def handle ( self , * args , ** opt ) :
"""Args :
* args :
* * opt :"""
|
d1_gmn . app . management . commands . util . util . log_setup ( opt [ "debug" ] )
logging . info ( "Running management command: {}" . format ( __name__ ) # util . get _ command _ name ( ) )
)
d1_gmn . app . management . commands . util . util . exit_if_other_instance_is_running ( __name__ )
self . _opt = opt
try : # profiler = profile . Profile ( )
# profiler . runcall ( self . _ handle )
# profiler . print _ stats ( )
self . _handle ( )
except d1_common . types . exceptions . DataONEException as e :
logging . error ( str ( e ) )
raise django . core . management . base . CommandError ( str ( e ) )
self . _events . dump_to_log ( )
|
def _read_geneset ( self , study_fn , pop_fn ) :
"""Open files containing genes . Return study genes and population genes ."""
|
pop = set ( _ . strip ( ) for _ in open ( pop_fn ) if _ . strip ( ) )
study = frozenset ( _ . strip ( ) for _ in open ( study_fn ) if _ . strip ( ) )
if next ( iter ( pop ) ) . isdigit ( ) :
pop = set ( int ( g ) for g in pop )
study = frozenset ( int ( g ) for g in study )
# some times the pop is a second group to compare , rather than the
# population in that case , we need to make sure the overlapping terms
# are removed first
if self . args . compare :
common = pop & study
pop |= study
pop -= common
study -= common
sys . stderr . write ( "removed %d overlapping items\n" % ( len ( common ) ) )
sys . stderr . write ( "Set 1: {0}, Set 2: {1}\n" . format ( len ( study ) , len ( pop ) ) )
return study , pop
|
def create_insert_dict_string ( self , tblname , d , PKfields = [ ] , fields = None , check_existing = False ) :
'''The main function of the insert _ dict functions .
This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database .
Simple function for inserting a dictionary whose keys match the fieldnames of tblname . The function returns two values , the
second of which is a dict containing the primary keys of the record . If a record already exists then no insertion is performed and
( False , the dictionary of existing primary keys ) is returned . Otherwise , the record is inserted into the database and ( True , d )
is returned .'''
|
if type ( PKfields ) == type ( "" ) :
PKfields = [ PKfields ]
if fields == None :
fields = sorted ( d . keys ( ) )
values = None
SQL = None
try : # Search for existing records
wherestr = [ ]
PKvalues = [ ]
for PKfield in PKfields :
if d [ PKfield ] == None :
wherestr . append ( "%s IS NULL" % PKfield )
else :
wherestr . append ( "%s=%%s" % PKfield )
PKvalues . append ( d [ PKfield ] )
PKfields = join ( PKfields , "," )
wherestr = join ( wherestr , " AND " )
record_exists = None
if check_existing :
record_exists = not ( not ( self . execute_select ( "SELECT %s FROM %s" % ( PKfields , tblname ) + " WHERE %s" % wherestr , parameters = tuple ( PKvalues ) , locked = False ) ) )
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % ( tblname , join ( fields , ", " ) , join ( [ '%s' for x in range ( len ( fields ) ) ] , ',' ) )
values = tuple ( [ d [ k ] for k in fields ] )
return SQL , values , record_exists
except Exception , e :
raise Exception ( "Error occurred during database insertion: '%s'. %s" % ( str ( e ) , traceback . format_exc ( ) ) )
|
def as_list ( self ) :
"""Returns a list of integers or ` None ` for each dimension .
Returns :
A list of integers or ` None ` for each dimension .
Raises :
ValueError : If ` self ` is an unknown shape with an unknown rank ."""
|
if self . _dims is None :
raise ValueError ( "as_list() is not defined on an unknown TensorShape." )
return [ dim . value for dim in self . _dims ]
|
def image ( self , url , title = "" , width = 800 ) :
"""* create MMD image link *
* * Key Arguments : * *
- ` ` title ` ` - - the title for the image
- ` ` url ` ` - - the image URL
- ` ` width ` ` - - the width in pixels of the image . Default * 800*
* * Return : * *
- ` ` imageLink ` ` - - the MMD image link
* * Usage : * *
To create a MMD image link :
. . code - block : : python
imageLink = md . image (
" http : / / www . thespacedoctor . co . uk / images / thespacedoctor _ icon _ white _ circle . png " , " thespacedoctor icon " , 400)
print imageLink
# OUTPUT :
# ! [ thespacedoctor icon ] [ thespacedoctor icon 20170228t130146.472262]
# [ thespacedoctor icon 20170228t130146.472262 ] : http : / / www . thespacedoctor . co . uk / images / thespacedoctor _ icon _ white _ circle . png " thespacedoctor icon " width = 400px"""
|
title = title . strip ( )
caption = title
now = datetime . now ( )
figId = now . strftime ( "%Y%m%dt%H%M%S.%f" )
if len ( title ) :
figId = "%(title)s %(figId)s" % locals ( )
imageLink = """\n\n![%(caption)s][%(figId)s]
[%(figId)s]: %(url)s "%(title)s" width=%(width)spx\n\n""" % locals ( )
return imageLink
|
def email ( value , whitelist = None ) :
"""Validate an email address .
This validator is based on ` Django ' s email validator ` _ . Returns
` ` True ` ` on success and : class : ` ~ validators . utils . ValidationFailure `
when validation fails .
Examples : :
> > > email ( ' someone @ example . com ' )
True
> > > email ( ' bogus @ @ ' )
ValidationFailure ( func = email , . . . )
. . _ Django ' s email validator :
https : / / github . com / django / django / blob / master / django / core / validators . py
. . versionadded : : 0.1
: param value : value to validate
: param whitelist : domain names to whitelist
: copyright : ( c ) Django Software Foundation and individual contributors .
: license : BSD"""
|
if whitelist is None :
whitelist = domain_whitelist
if not value or '@' not in value :
return False
user_part , domain_part = value . rsplit ( '@' , 1 )
if not user_regex . match ( user_part ) :
return False
if domain_part not in whitelist and not domain_regex . match ( domain_part ) : # Try for possible IDN domain - part
try :
domain_part = domain_part . encode ( 'idna' ) . decode ( 'ascii' )
return domain_regex . match ( domain_part )
except UnicodeError :
return False
return True
|
def _can_change_or_view ( model , user ) :
"""Return True iff ` user ` has either change or view permission
for ` model ` ."""
|
model_name = model . _meta . model_name
app_label = model . _meta . app_label
can_change = user . has_perm ( app_label + '.change_' + model_name )
can_view = user . has_perm ( app_label + '.view_' + model_name )
return can_change or can_view
|
def SSL_CTX_set_info_callback ( ctx , app_info_cb ) :
"""Set the info callback
: param callback : The Python callback to use
: return : None"""
|
def py_info_callback ( ssl , where , ret ) :
try :
app_info_cb ( SSL ( ssl ) , where , ret )
except :
pass
return
global _info_callback
_info_callback [ ctx ] = _rvoid_voidp_int_int ( py_info_callback )
_SSL_CTX_set_info_callback ( ctx , _info_callback [ ctx ] )
|
def mod_repo ( repo , ** kwargs ) :
'''Modify one or more values for a repo . If the repo does not exist , it will
be created , so long as the following values are specified :
repo or alias
alias by which Zypper refers to the repo
url , mirrorlist or baseurl
the URL for Zypper to reference
enabled
Enable or disable ( True or False ) repository ,
but do not remove if disabled .
refresh
Enable or disable ( True or False ) auto - refresh of the repository .
cache
Enable or disable ( True or False ) RPM files caching .
gpgcheck
Enable or disable ( True or False ) GPG check for this repository .
gpgautoimport : False
If set to True , automatically trust and import public GPG key for
the repository .
root
operate on a different root directory .
Key / Value pairs may also be removed from a repo ' s configuration by setting
a key to a blank value . Bear in mind that a name cannot be deleted , and a
URL can only be deleted if a ` ` mirrorlist ` ` is specified ( or vice versa ) .
CLI Examples :
. . code - block : : bash
salt ' * ' pkg . mod _ repo alias alias = new _ alias
salt ' * ' pkg . mod _ repo alias url = mirrorlist = http : / / host . com /'''
|
root = kwargs . get ( 'root' ) or None
repos_cfg = _get_configured_repos ( root = root )
added = False
# An attempt to add new one ?
if repo not in repos_cfg . sections ( ) :
url = kwargs . get ( 'url' , kwargs . get ( 'mirrorlist' , kwargs . get ( 'baseurl' ) ) )
if not url :
raise CommandExecutionError ( 'Repository \'{0}\' not found, and neither \'baseurl\' nor ' '\'mirrorlist\' was specified' . format ( repo ) )
if not _urlparse ( url ) . scheme :
raise CommandExecutionError ( 'Repository \'{0}\' not found and URL for baseurl/mirrorlist ' 'is malformed' . format ( repo ) )
# Is there already such repo under different alias ?
for alias in repos_cfg . sections ( ) :
repo_meta = _get_repo_info ( alias , repos_cfg = repos_cfg , root = root )
# Complete user URL , in case it is not
new_url = _urlparse ( url )
if not new_url . path :
new_url = _urlparse . ParseResult ( scheme = new_url . scheme , # pylint : disable = E1123
netloc = new_url . netloc , path = '/' , params = new_url . params , query = new_url . query , fragment = new_url . fragment )
base_url = _urlparse ( repo_meta [ 'baseurl' ] )
if new_url == base_url :
raise CommandExecutionError ( 'Repository \'{0}\' already exists as \'{1}\'.' . format ( repo , alias ) )
# Add new repo
__zypper__ ( root = root ) . xml . call ( 'ar' , url , repo )
# Verify the repository has been added
repos_cfg = _get_configured_repos ( root = root )
if repo not in repos_cfg . sections ( ) :
raise CommandExecutionError ( 'Failed add new repository \'{0}\' for unspecified reason. ' 'Please check zypper logs.' . format ( repo ) )
added = True
repo_info = _get_repo_info ( repo , root = root )
if ( not added and 'baseurl' in kwargs and not ( kwargs [ 'baseurl' ] == repo_info [ 'baseurl' ] ) ) : # Note : zypper does not support changing the baseurl
# we need to remove the repository and add it again with the new baseurl
repo_info . update ( kwargs )
repo_info . setdefault ( 'cache' , False )
del_repo ( repo , root = root )
return mod_repo ( repo , root = root , ** repo_info )
# Modify added or existing repo according to the options
cmd_opt = [ ]
global_cmd_opt = [ ]
call_refresh = False
if 'enabled' in kwargs :
cmd_opt . append ( kwargs [ 'enabled' ] and '--enable' or '--disable' )
if 'refresh' in kwargs :
cmd_opt . append ( kwargs [ 'refresh' ] and '--refresh' or '--no-refresh' )
if 'cache' in kwargs :
cmd_opt . append ( kwargs [ 'cache' ] and '--keep-packages' or '--no-keep-packages' )
if 'gpgcheck' in kwargs :
cmd_opt . append ( kwargs [ 'gpgcheck' ] and '--gpgcheck' or '--no-gpgcheck' )
if 'priority' in kwargs :
cmd_opt . append ( "--priority={0}" . format ( kwargs . get ( 'priority' , DEFAULT_PRIORITY ) ) )
if 'humanname' in kwargs :
cmd_opt . append ( "--name='{0}'" . format ( kwargs . get ( 'humanname' ) ) )
if kwargs . get ( 'gpgautoimport' ) is True :
global_cmd_opt . append ( '--gpg-auto-import-keys' )
call_refresh = True
if cmd_opt :
cmd_opt = global_cmd_opt + [ 'mr' ] + cmd_opt + [ repo ]
__zypper__ ( root = root ) . refreshable . xml . call ( * cmd_opt )
comment = None
if call_refresh : # when used with " zypper ar - - refresh " or " zypper mr - - refresh "
# - - gpg - auto - import - keys is not doing anything
# so we need to specifically refresh here with - - gpg - auto - import - keys
refresh_opts = global_cmd_opt + [ 'refresh' ] + [ repo ]
__zypper__ ( root = root ) . xml . call ( * refresh_opts )
elif not added and not cmd_opt :
comment = 'Specified arguments did not result in modification of repo'
repo = get_repo ( repo , root = root )
if comment :
repo [ 'comment' ] = comment
return repo
|
def reduce_after ( method ) :
'''reduce ( ) the result of this method call ( unless you already reduced it ) .'''
|
def new_method ( self , * args , ** kwargs ) :
result = method ( self , * args , ** kwargs )
if result == self :
return result
return result . reduce ( )
return new_method
|
def process_job ( self , job_request ) :
"""Validate , execute , and run the job request , wrapping it with any applicable job middleware .
: param job _ request : The job request
: type job _ request : dict
: return : A ` JobResponse ` object
: rtype : JobResponse
: raise : JobError"""
|
try : # Validate JobRequest message
validation_errors = [ Error ( code = error . code , message = error . message , field = error . pointer , ) for error in ( JobRequestSchema . errors ( job_request ) or [ ] ) ]
if validation_errors :
raise JobError ( errors = validation_errors )
# Add the client object in case a middleware wishes to use it
job_request [ 'client' ] = self . make_client ( job_request [ 'context' ] )
# Add the async event loop in case a middleware wishes to use it
job_request [ 'async_event_loop' ] = self . _async_event_loop
if hasattr ( self , '_async_event_loop_thread' ) :
job_request [ 'run_coroutine' ] = self . _async_event_loop_thread . run_coroutine
else :
job_request [ 'run_coroutine' ] = None
# Build set of middleware + job handler , then run job
wrapper = self . make_middleware_stack ( [ m . job for m in self . middleware ] , self . execute_job , )
job_response = wrapper ( job_request )
if 'correlation_id' in job_request [ 'context' ] :
job_response . context [ 'correlation_id' ] = job_request [ 'context' ] [ 'correlation_id' ]
except JobError as e :
self . metrics . counter ( 'server.error.job_error' ) . increment ( )
job_response = JobResponse ( errors = e . errors , )
except Exception as e : # Send an error response if no middleware caught this .
# Formatting the error might itself error , so try to catch that
self . metrics . counter ( 'server.error.unhandled_error' ) . increment ( )
return self . handle_job_exception ( e )
return job_response
|
def to_dict ( self ) :
"""Return a dict representation of KnwKBRVAL ."""
|
# FIXME remove ' id ' dependency from invenio modules
return { 'id' : self . m_key + "_" + str ( self . id_knwKB ) , 'key' : self . m_key , 'value' : self . m_value , 'kbid' : self . kb . id if self . kb else None , 'kbname' : self . kb . name if self . kb else None }
|
def _ssh_cmd ( self , * args ) :
"""Execute a gerrit command over SSH ."""
|
command = "gerrit {0}" . format ( " " . join ( args ) )
_ , stdout , stderr = self . _client . exec_command ( command )
return ( stdout . readlines ( ) , stderr . readlines ( ) )
|
def _get_spark_app_ids ( self , running_apps , requests_config , tags ) :
"""Traverses the Spark application master in YARN to get a Spark application ID .
Return a dictionary of { app _ id : ( app _ name , tracking _ url ) } for Spark applications"""
|
spark_apps = { }
for app_id , ( app_name , tracking_url ) in iteritems ( running_apps ) :
response = self . _rest_request_to_json ( tracking_url , SPARK_APPS_PATH , SPARK_SERVICE_CHECK , requests_config , tags )
for app in response :
app_id = app . get ( 'id' )
app_name = app . get ( 'name' )
if app_id and app_name :
spark_apps [ app_id ] = ( app_name , tracking_url )
return spark_apps
|
def bundle_attacks_with_goal ( sess , model , x , y , adv_x , attack_configs , run_counts , goal , report , report_path , attack_batch_size = BATCH_SIZE , eval_batch_size = BATCH_SIZE ) :
"""Runs attack bundling , working on one specific AttackGoal .
This function is mostly intended to be called by ` bundle _ attacks ` .
Reference : https : / / openreview . net / forum ? id = H1g0piA9tQ
: param sess : tf . session . Session
: param model : cleverhans . model . Model
: param x : numpy array containing clean example inputs to attack
: param y : numpy array containing true labels
: param adv _ x : numpy array containing the adversarial examples made so far
by earlier work in the bundling process
: param attack _ configs : list of AttackConfigs to run
: param run _ counts : dict mapping AttackConfigs to numpy arrays specifying
how many times they have been run on each example
: param goal : AttackGoal to run
: param report : ConfidenceReport
: param report _ path : str , the path the report will be saved to
: param attack _ batch _ size : int , batch size for generating adversarial examples
: param eval _ batch _ size : int , batch size for evaluating the model on adversarial examples"""
|
goal . start ( run_counts )
_logger . info ( "Running criteria for new goal..." )
criteria = goal . get_criteria ( sess , model , adv_x , y , batch_size = eval_batch_size )
assert 'correctness' in criteria
_logger . info ( "Accuracy: " + str ( criteria [ 'correctness' ] . mean ( ) ) )
assert 'confidence' in criteria
while not goal . is_satisfied ( criteria , run_counts ) :
run_batch_with_goal ( sess , model , x , y , adv_x , criteria , attack_configs , run_counts , goal , report , report_path , attack_batch_size = attack_batch_size )
# Save after finishing all goals .
# The incremental saves run on a timer . This save is needed so that the last
# few attacks after the timer don ' t get discarded
report . completed = True
save ( criteria , report , report_path , adv_x )
|
def read_setup_py_source ( self ) : # type : ( ) - > None
"""Read setup . py to string
: return :"""
|
if not self . setup_file_name :
self . setup_source = ""
if not self . setup_source :
self . setup_source = self . _read_file ( self . setup_file_name )
|
def indexlist ( self ) :
"""Returns the list of files in alphabetical order for index lookups .
: return [ ( < str > name , < str > url ) , . . ]"""
|
output = [ ( child . text ( 0 ) , child . url ( ) ) for child in self . children ( recursive = True ) ]
output . sort ( )
return output
|
def init_logger ( self ) :
"""Init logger ."""
|
if not self . result_logger :
if not os . path . exists ( self . local_dir ) :
os . makedirs ( self . local_dir )
if not self . logdir :
self . logdir = tempfile . mkdtemp ( prefix = "{}_{}" . format ( str ( self ) [ : MAX_LEN_IDENTIFIER ] , date_str ( ) ) , dir = self . local_dir )
elif not os . path . exists ( self . logdir ) :
os . makedirs ( self . logdir )
self . result_logger = UnifiedLogger ( self . config , self . logdir , upload_uri = self . upload_dir , loggers = self . loggers , sync_function = self . sync_function )
|
def del_property ( self , t_property_name , sync = True ) :
"""delete property from this transport . if this transport has no id then it ' s like sync = False .
: param t _ property _ name : property name to remove
: param sync : If sync = True ( default ) synchronize with Ariane server . If sync = False ,
add the property name on list to be deleted on next save ( ) .
: return :"""
|
LOGGER . debug ( "Transport.del_property" )
if not sync or self . id is None :
self . properties_2_rm . append ( t_property_name )
else :
params = SessionService . complete_transactional_req ( { 'ID' : self . id , 'propertyName' : t_property_name } )
if MappingService . driver_type != DriverFactory . DRIVER_REST :
params [ 'OPERATION' ] = 'removeTransportProperty'
args = { 'properties' : params }
else :
args = { 'http_operation' : 'GET' , 'operation_path' : 'update/properties/delete' , 'parameters' : params }
response = TransportService . requester . call ( args )
if MappingService . driver_type != DriverFactory . DRIVER_REST :
response = response . get ( )
if response . rc != 0 :
LOGGER . warning ( 'Transport.del_property - Problem while updating transport ' + self . name + ' properties. ' + 'Reason: ' + str ( response . response_content ) + ' - ' + str ( response . error_message ) + " (" + str ( response . rc ) + ")" )
if response . rc == 500 and ArianeMappingOverloadError . ERROR_MSG in response . error_message :
raise ArianeMappingOverloadError ( "Transport.del_property" , ArianeMappingOverloadError . ERROR_MSG )
# traceback . print _ stack ( )
else :
self . sync ( )
|
def submissions ( self ) :
"""List job submissions in workspace ."""
|
r = fapi . get_submissions ( self . namespace , self . name , self . api_url )
fapi . _check_response_code ( r , 200 )
return r . json ( )
|
def description ( self , description ) :
"""Sets the description of this AdditionalRecipient .
The description of the additional recipient .
: param description : The description of this AdditionalRecipient .
: type : str"""
|
if description is None :
raise ValueError ( "Invalid value for `description`, must not be `None`" )
if len ( description ) > 100 :
raise ValueError ( "Invalid value for `description`, length must be less than `100`" )
if len ( description ) < 1 :
raise ValueError ( "Invalid value for `description`, length must be greater than or equal to `1`" )
self . _description = description
|
def get_entities ( seq , suffix = False ) :
"""Gets entities from sequence .
Args :
seq ( list ) : sequence of labels .
Returns :
list : list of ( chunk _ type , chunk _ start , chunk _ end ) .
Example :
> > > from seqeval . metrics . sequence _ labeling import get _ entities
> > > seq = [ ' B - PER ' , ' I - PER ' , ' O ' , ' B - LOC ' ]
> > > get _ entities ( seq )
[ ( ' PER ' , 0 , 1 ) , ( ' LOC ' , 3 , 3 ) ]"""
|
# for nested list
if any ( isinstance ( s , list ) for s in seq ) :
seq = [ item for sublist in seq for item in sublist + [ 'O' ] ]
prev_tag = 'O'
prev_type = ''
begin_offset = 0
chunks = [ ]
for i , chunk in enumerate ( seq + [ 'O' ] ) :
if suffix :
tag = chunk [ - 1 ]
type_ = chunk . split ( '-' ) [ 0 ]
else :
tag = chunk [ 0 ]
type_ = chunk . split ( '-' ) [ - 1 ]
if end_of_chunk ( prev_tag , tag , prev_type , type_ ) :
chunks . append ( ( prev_type , begin_offset , i - 1 ) )
if start_of_chunk ( prev_tag , tag , prev_type , type_ ) :
begin_offset = i
prev_tag = tag
prev_type = type_
return chunks
|
def get_cdn_metadata ( self , container ) :
"""Returns a dictionary containing the CDN metadata for the container . If
the container does not exist , a NotFound exception is raised . If the
container exists , but is not CDN - enabled , a NotCDNEnabled exception is
raised ."""
|
uri = "%s/%s" % ( self . uri_base , utils . get_name ( container ) )
resp , resp_body = self . api . cdn_request ( uri , "HEAD" )
ret = dict ( resp . headers )
# Remove non - CDN headers
ret . pop ( "content-length" , None )
ret . pop ( "content-type" , None )
ret . pop ( "date" , None )
return ret
|
def set_var_log_arr ( self , value ) :
'''setter'''
|
if isinstance ( value , np . ndarray ) :
self . __var_log_arr = value
else :
raise TypeError ( )
|
def get ( self , block = True , timeout = None ) :
"""Get a Task from the queue
: param block : Block application until a Task is received
: param timeout : Timeout after n seconds
: return : : class : ` ~ redisqueue . AbstractTask ` instance
: exception : ConnectionError if queue is not connected"""
|
if not self . connected :
raise QueueNotConnectedError ( "Queue is not Connected" )
if block :
payload = self . __db . brpop ( self . _key , timeout = timeout )
else :
payload = self . __db . rpop ( self . _key )
if not payload :
return None
task = self . task_class ( payload [ 1 ] )
# if task was marked as unique then
# remove the unique _ hash from lock table
if task . unique :
self . __db . srem ( self . _lock_key , task . unique_hash ( ) )
return task
|
def _read ( self , fp , fpname ) :
"""A direct copy of the py2.4 version of the super class ' s _ read method
to assure it uses ordered dicts . Had to change one line to make it work .
Future versions have this fixed , but in fact its quite embarrassing for the
guys not to have done it right in the first place !
Removed big comments to make it more compact .
Made sure it ignores initial whitespace as git uses tabs"""
|
cursect = None
# None , or a dictionary
optname = None
lineno = 0
is_multi_line = False
e = None
# None , or an exception
def string_decode ( v ) :
if v [ - 1 ] == '\\' :
v = v [ : - 1 ]
# end cut trailing escapes to prevent decode error
if PY3 :
return v . encode ( defenc ) . decode ( 'unicode_escape' )
else :
return v . decode ( 'string_escape' )
# end
# end
while True : # we assume to read binary !
line = fp . readline ( ) . decode ( defenc )
if not line :
break
lineno = lineno + 1
# comment or blank line ?
if line . strip ( ) == '' or self . re_comment . match ( line ) :
continue
if line . split ( None , 1 ) [ 0 ] . lower ( ) == 'rem' and line [ 0 ] in "rR" : # no leading whitespace
continue
# is it a section header ?
mo = self . SECTCRE . match ( line . strip ( ) )
if not is_multi_line and mo :
sectname = mo . group ( 'header' ) . strip ( )
if sectname in self . _sections :
cursect = self . _sections [ sectname ]
elif sectname == cp . DEFAULTSECT :
cursect = self . _defaults
else :
cursect = self . _dict ( ( ( '__name__' , sectname ) , ) )
self . _sections [ sectname ] = cursect
self . _proxies [ sectname ] = None
# So sections can ' t start with a continuation line
optname = None
# no section header in the file ?
elif cursect is None :
raise cp . MissingSectionHeaderError ( fpname , lineno , line )
# an option line ?
elif not is_multi_line :
mo = self . OPTCRE . match ( line )
if mo : # We might just have handled the last line , which could contain a quotation we want to remove
optname , vi , optval = mo . group ( 'option' , 'vi' , 'value' )
if vi in ( '=' , ':' ) and ';' in optval and not optval . strip ( ) . startswith ( '"' ) :
pos = optval . find ( ';' )
if pos != - 1 and optval [ pos - 1 ] . isspace ( ) :
optval = optval [ : pos ]
optval = optval . strip ( )
if optval == '""' :
optval = ''
# end handle empty string
optname = self . optionxform ( optname . rstrip ( ) )
if len ( optval ) > 1 and optval [ 0 ] == '"' and optval [ - 1 ] != '"' :
is_multi_line = True
optval = string_decode ( optval [ 1 : ] )
# end handle multi - line
cursect [ optname ] = optval
else : # check if it ' s an option with no value - it ' s just ignored by git
if not self . OPTVALUEONLY . match ( line ) :
if not e :
e = cp . ParsingError ( fpname )
e . append ( lineno , repr ( line ) )
continue
else :
line = line . rstrip ( )
if line . endswith ( '"' ) :
is_multi_line = False
line = line [ : - 1 ]
# end handle quotations
cursect [ optname ] += string_decode ( line )
# END parse section or option
# END while reading
# if any parsing errors occurred , raise an exception
if e :
raise e
|
def resize ( self , capacity ) :
"""Re - sizes the ` Array ` by appending new * array elements * or
removing * array elements * from the end .
: param int capacity : new capacity of the ` Array ` in number of * array elements * ."""
|
count = max ( int ( capacity ) , 0 ) - len ( self )
if count == 0 :
pass
elif - count == len ( self ) :
self . clear ( )
elif count > 0 :
for i in range ( count ) :
self . append ( )
else :
for i in range ( abs ( count ) ) :
self . pop ( )
|
def add_bookmark ( self , url , favorite = False , archive = False , allow_duplicates = True ) :
"""Adds given bookmark to the authenticated user .
: param url : URL of the article to bookmark
: param favorite : whether or not the bookmark should be favorited
: param archive : whether or not the bookmark should be archived
: param allow _ duplicates : whether or not to allow duplicate bookmarks to
be created for a given url"""
|
rdb_url = self . _generate_url ( 'bookmarks' )
params = { "url" : url , "favorite" : int ( favorite ) , "archive" : int ( archive ) , "allow_duplicates" : int ( allow_duplicates ) }
return self . post ( rdb_url , params )
|
def create_choice_model ( data , alt_id_col , obs_id_col , choice_col , specification , model_type , intercept_ref_pos = None , shape_ref_pos = None , names = None , intercept_names = None , shape_names = None , nest_spec = None , mixing_id_col = None , mixing_vars = None ) :
"""Parameters
data : string or pandas dataframe .
If ` data ` is a string , it should be an absolute or relative path to
a CSV file containing the long format data for this choice model .
Note long format has one row per available alternative for each
observation . If ` data ` is a pandas dataframe , ` data ` should already
be in long format .
alt _ id _ col : string .
Should denote the column in data that contains the alternative
identifiers for each row .
obs _ id _ col : string .
Should denote the column in data that contains the observation
identifiers for each row .
choice _ col : string .
Should denote the column in data which contains the ones and zeros
that denote whether or not the given row corresponds to the chosen
alternative for the given individual .
specification : OrderedDict .
Keys are a proper subset of the columns in ` long _ form _ df ` . Values are
either a list or a single string , ` all _ diff ` or ` all _ same ` . If a list ,
the elements should be :
1 ) single objects that are within the alternative ID column of
` long _ form _ df `
2 ) lists of objects that are within the alternative ID column of
` long _ form _ df ` . For each single object in the list , a unique
column will be created ( i . e . there will be a unique
coefficient for that variable in the corresponding utility
equation of the corresponding alternative ) . For lists within
the ` specification _ dict ` values , a single column will be
created for all the alternatives within iterable ( i . e . there
will be one common coefficient for the variables in the
iterable ) .
model _ type : string .
Denotes the model type of the choice _ model being instantiated .
Should be one of the following values :
- " MNL "
- " Asym "
- " Cloglog "
- " Scobit "
- " Uneven "
- " Nested Logit "
- " Mixed Logit "
intercept _ ref _ pos : int , optional .
Valid only when the intercepts being estimated are not part of the
index . Specifies the alternative in the ordered array of unique
alternative ids whose intercept or alternative - specific constant is
not estimated , to ensure model identifiability . Default = = None .
shape _ ref _ pos : int , optional .
Specifies the alternative in the ordered array of unique
alternative ids whose shape parameter is not estimated , to ensure
model identifiability . Default = = None .
names : OrderedDict or None , optional .
Should have the same keys as ` specification ` . For each key :
- if the corresponding value in ` specification ` is
" all _ same " , then there should be a single string as the value
in names .
- if the corresponding value in ` specification ` is " all _ diff " ,
then there should be a list of strings as the value in names .
There should be one string in the value in names for each
possible alternative .
- if the corresponding value in ` specification ` is a list , then
there should be a list of strings as the value in names .
There should be one string the value in names per item in the
value in ` specification ` .
Default = = None .
intercept _ names : list of strings or None , optional .
If a list is passed , then the list should have the same number of
elements as there are possible alternatives in data , minus 1 . Each
element of the list should be the name of the corresponding
alternative ' s intercept term , in sorted order of the possible
alternative IDs . If None is passed , the resulting names that are
shown in the estimation results will be
[ " Outside _ ASC _ { } " . format ( x ) for x in shape _ names ] . Default = None .
shape _ names : list of strings or None , optional .
If a list is passed , then the list should have the same number of
elements as there are possible alternative IDs in data . Each
element of the list should be a string denoting the name of the
corresponding alternative , in sorted order of the possible
alternative IDs . The resulting names which are shown in the
estimation results will be
[ " shape _ { } " . format ( x ) for x in shape _ names ] . Default = None .
nest _ spec : OrderedDict or None , optional .
Keys are strings that define the name of the nests . Values are
lists of alternative ids , denoting which alternatives belong to
which nests . Each alternative id only be associated with a single
nest ! Default = = None .
mixing _ id _ col : str , or None , optional .
Should be a column heading in ` data ` . Should denote the column in
` data ` which contains the identifiers of the units of observation
over which the coefficients of the model are thought to be randomly
distributed . If ` model _ type = = " Mixed Logit " ` , then ` mixing _ id _ col `
must be passed . Default = = None .
mixing _ vars : list , or None , optional .
All elements of the list should be strings . Each string should be
present in the values of ` names . values ( ) ` and they ' re associated
variables should only be index variables ( i . e . part of the design
matrix ) . If ` model _ type = = " Mixed Logit " ` , then ` mixing _ vars ` must
be passed . Default = = None .
Returns
model _ obj : instantiation of the Choice Model Class corresponding
to the model type passed as the function argument . The returned
object will have been instantiated with the arguments passed to
this function ."""
|
# Make sure the model type is valid
ensure_valid_model_type ( model_type , valid_model_types )
# Carry out the appropriate instantiation process for the chosen
# choice model
model_kwargs = { "intercept_ref_pos" : intercept_ref_pos , "shape_ref_pos" : shape_ref_pos , "names" : names , "intercept_names" : intercept_names , "shape_names" : shape_names , "nest_spec" : nest_spec , "mixing_id_col" : mixing_id_col , "mixing_vars" : mixing_vars }
return model_type_to_class [ model_type ] ( data , alt_id_col , obs_id_col , choice_col , specification , ** model_kwargs )
|
def emit ( self , span_datas ) :
""": type span _ datas : list of : class :
` ~ opencensus . trace . span _ data . SpanData `
: param list of opencensus . trace . span _ data . SpanData span _ datas :
SpanData tuples to emit"""
|
envelopes = [ self . span_data_to_envelope ( sd ) for sd in span_datas ]
result = self . _transmit ( envelopes )
if result > 0 :
self . storage . put ( envelopes , result )
|
def pop ( self , key , default = None ) :
"Standard pop semantics for all mapping types"
|
if not isinstance ( key , tuple ) :
key = ( key , )
return self . data . pop ( key , default )
|
def open ( filename , mode = "r" , iline = 189 , xline = 193 , strict = True , ignore_geometry = False , endian = 'big' ) :
"""Open a segy file .
Opens a segy file and tries to figure out its sorting , inline numbers ,
crossline numbers , and offsets , and enables reading and writing to this
file in a simple manner .
For reading , the access mode ` r ` is preferred . All write operations will
raise an exception . For writing , the mode ` r + ` is preferred ( as ` rw ` would
truncate the file ) . Any mode with ` w ` will raise an error . The modes used
are standard C file modes ; please refer to that documentation for a
complete reference .
Open should be used together with python ' s ` ` with ` ` statement . Please refer
to the examples . When the ` ` with ` ` statement is used the file will
automatically be closed when the routine completes or an exception is
raised .
By default , segyio tries to open in ` ` strict ` ` mode . This means the file will
be assumed to represent a geometry with consistent inline , crosslines and
offsets . If strict is False , segyio will still try to establish a geometry ,
but it won ' t abort if it fails . When in non - strict mode is opened ,
geometry - dependent modes such as iline will raise an error .
If ` ` ignore _ geometry = True ` ` , segyio will * not * try to build iline / xline or
other geometry related structures , which leads to faster opens . This is
essentially the same as using ` ` strict = False ` ` on a file that has no
geometry .
Parameters
filename : str
Path to file to open
mode : { ' r ' , ' r + ' }
File access mode , read - only ( ' r ' , default ) or read - write ( ' r + ' )
iline : int or segyio . TraceField
Inline number field in the trace headers . Defaults to 189 as per the
SEG - Y rev1 specification
xline : int or segyio . TraceField
Crossline number field in the trace headers . Defaults to 193 as per the
SEG - Y rev1 specification
strict : bool , optional
Abort if a geometry cannot be inferred . Defaults to True .
ignore _ geometry : bool , optional
Opt out on building geometry information , useful for e . g . shot
organised files . Defaults to False .
endian : { ' big ' , ' msb ' , ' little ' , ' lsb ' }
File endianness , big / msb ( default ) or little / lsb
Returns
file : segyio . SegyFile
An open segyio file handle
Raises
ValueError
If the mode string contains ' w ' , as it would truncate the file
Notes
. . versionadded : : 1.1
. . versionchanged : : 1.8
endian argument
When a file is opened non - strict , only raw traces access is allowed , and
using modes such as ` ` iline ` ` raise an error .
Examples
Open a file in read - only mode :
> > > with segyio . open ( path , " r " ) as f :
. . . print ( f . ilines )
[1 , 2 , 3 , 4 , 5]
Open a file in read - write mode :
> > > with segyio . open ( path , " r + " ) as f :
. . . f . trace = np . arange ( 100)
Open two files at once :
> > > with segyio . open ( path ) as src , segyio . open ( path , " r + " ) as dst :
. . . dst . trace = src . trace # copy all traces from src to dst
Open a file little - endian file :
> > > with segyio . open ( path , endian = ' little ' ) as f :
. . . f . trace [ 0]"""
|
if 'w' in mode :
problem = 'w in mode would truncate the file'
solution = 'use r+ to open in read-write'
raise ValueError ( ', ' . join ( ( problem , solution ) ) )
endians = { 'little' : 256 , # (1 < < 8)
'lsb' : 256 , 'big' : 0 , 'msb' : 0 , }
if endian not in endians :
problem = 'unknown endianness {}, expected one of: '
opts = ' ' . join ( endians . keys ( ) )
raise ValueError ( problem . format ( endian ) + opts )
from . import _segyio
fd = _segyio . segyiofd ( str ( filename ) , mode , endians [ endian ] )
fd . segyopen ( )
metrics = fd . metrics ( )
f = segyio . SegyFile ( fd , filename = str ( filename ) , mode = mode , iline = iline , xline = xline , endian = endian , )
try :
dt = segyio . tools . dt ( f , fallback_dt = 4000.0 ) / 1000.0
t0 = f . header [ 0 ] [ segyio . TraceField . DelayRecordingTime ]
samples = metrics [ 'samplecount' ]
f . _samples = ( numpy . arange ( samples ) * dt ) + t0
except :
f . close ( )
raise
if ignore_geometry :
return f
return infer_geometry ( f , metrics , iline , xline , strict )
|
def create_ecdsa_public_and_private_from_pem ( pem , password = None ) :
"""< Purpose >
Create public and private ECDSA keys from a private ' pem ' . The public and
private keys are strings in PEM format :
public : ' - - - - - BEGIN PUBLIC KEY - - - - - . . . - - - - - END PUBLIC KEY - - - - - ' ,
private : ' - - - - - BEGIN EC PRIVATE KEY - - - - - . . . - - - - - END EC PRIVATE KEY - - - - - ' } }
> > > junk , private = generate _ public _ and _ private ( )
> > > public , private = create _ ecdsa _ public _ and _ private _ from _ pem ( private )
> > > securesystemslib . formats . PEMECDSA _ SCHEMA . matches ( public )
True
> > > securesystemslib . formats . PEMECDSA _ SCHEMA . matches ( private )
True
> > > passphrase = ' secret '
> > > encrypted _ pem = create _ ecdsa _ encrypted _ pem ( private , passphrase )
> > > public , private = create _ ecdsa _ public _ and _ private _ from _ pem ( encrypted _ pem , passphrase )
> > > securesystemslib . formats . PEMECDSA _ SCHEMA . matches ( public )
True
> > > securesystemslib . formats . PEMECDSA _ SCHEMA . matches ( private )
True
< Arguments >
pem :
A string in PEM format . The private key is extracted and returned in
an ecdsakey object .
password : ( optional )
The password , or passphrase , to decrypt the private part of the ECDSA key
if it is encrypted . ' password ' is not used directly as the encryption
key , a stronger encryption key is derived from it .
< Exceptions >
securesystemslib . exceptions . FormatError , if the arguments are improperly
formatted .
securesystemslib . exceptions . UnsupportedAlgorithmError , if the ECDSA key
pair could not be extracted , possibly due to an unsupported algorithm .
< Side Effects >
None .
< Returns >
A dictionary containing the ECDSA keys and other identifying information .
Conforms to ' securesystemslib . formats . ECDSAKEY _ SCHEMA ' ."""
|
# Does ' pem ' have the correct format ?
# This check will ensure ' pem ' conforms to
# ' securesystemslib . formats . ECDSARSA _ SCHEMA ' .
securesystemslib . formats . PEMECDSA_SCHEMA . check_match ( pem )
if password is not None :
securesystemslib . formats . PASSWORD_SCHEMA . check_match ( password )
password = password . encode ( 'utf-8' )
else :
logger . debug ( 'The password/passphrase is unset. The PEM is expected' ' to be unencrypted.' )
public = None
private = None
# Generate the public and private ECDSA keys . The pyca / cryptography library
# performs the actual import operation .
try :
private = load_pem_private_key ( pem . encode ( 'utf-8' ) , password = password , backend = default_backend ( ) )
except ( ValueError , cryptography . exceptions . UnsupportedAlgorithm ) as e :
raise securesystemslib . exceptions . CryptoError ( 'Could not import private' ' PEM.\n' + str ( e ) )
public = private . public_key ( )
# Serialize public and private keys to PEM format .
private = private . private_bytes ( encoding = serialization . Encoding . PEM , format = serialization . PrivateFormat . TraditionalOpenSSL , encryption_algorithm = serialization . NoEncryption ( ) )
public = public . public_bytes ( encoding = serialization . Encoding . PEM , format = serialization . PublicFormat . SubjectPublicKeyInfo )
return public . decode ( 'utf-8' ) , private . decode ( 'utf-8' )
|
async def generate_image ( self , imgtype , face = None , hair = None ) :
"""Generate a basic image using the auto - image endpoint of weeb . sh .
This function is a coroutine .
Parameters :
imgtype : str - type of the generation to create , possible types are awooo , eyes , or won .
face : str - only used with awooo type , defines color of face
hair : str - only used with awooo type , defines color of hair / fur
Return Type : image data"""
|
if not isinstance ( imgtype , str ) :
raise TypeError ( "type of 'imgtype' must be str." )
if face and not isinstance ( face , str ) :
raise TypeError ( "type of 'face' must be str." )
if hair and not isinstance ( hair , str ) :
raise TypeError ( "type of 'hair' must be str." )
if ( face or hair ) and imgtype != 'awooo' :
raise InvalidArguments ( '\'face\' and \'hair\' are arguments only available on the \'awoo\' image type' )
url = f'https://api.weeb.sh/auto-image/generate?type={imgtype}' + ( "&face=" + face if face else "" ) + ( "&hair=" + hair if hair else "" )
async with aiohttp . ClientSession ( ) as session :
async with session . get ( url , headers = self . __headers ) as resp :
if resp . status == 200 :
return await resp . read ( )
else :
raise Exception ( ( await resp . json ( ) ) [ 'message' ] )
|
def run_get_clusters_from_file ( self , clusters_infile , all_ref_seqs , rename_dict = None ) :
'''Instead of running cdhit , gets the clusters info from the input file .'''
|
if rename_dict is None :
rename_dict = { }
# check that every sequence in the clusters file can be
# found in the fasta file
seq_reader = pyfastaq . sequences . file_reader ( self . infile )
names_list_from_fasta_file = [ seq . id for seq in seq_reader ]
names_set_from_fasta_file = set ( names_list_from_fasta_file )
clusters = self . _load_user_clusters_file ( clusters_infile , all_ref_seqs , rename_dict = rename_dict )
if len ( names_set_from_fasta_file ) != len ( names_list_from_fasta_file ) :
raise Error ( 'At least one duplicate name in fasta file ' + self . infile + '. Cannot continue' )
names_from_clusters_file = set ( )
for new_names in clusters . values ( ) :
names_from_clusters_file . update ( new_names )
if not names_set_from_fasta_file . issubset ( names_from_clusters_file ) :
raise Error ( 'Some names in fasta file "' + self . infile + '" not given in cluster file. Cannot continue' )
return clusters
|
def read ( self , sensors ) :
"""Read a set of keys ."""
|
payload = { 'destDev' : [ ] , 'keys' : list ( set ( [ s . key for s in sensors ] ) ) }
if self . sma_sid is None :
yield from self . new_session ( )
if self . sma_sid is None :
return False
body = yield from self . _fetch_json ( URL_VALUES , payload = payload )
# On the first 401 error we close the session which will re - login
if body . get ( 'err' ) == 401 :
_LOGGER . warning ( "401 error detected, closing session to force " "another login attempt" )
self . close_session ( )
return False
_LOGGER . debug ( json . dumps ( body ) )
for sen in sensors :
if sen . extract_value ( body ) :
_LOGGER . debug ( "%s\t= %s %s" , sen . name , sen . value , sen . unit )
return True
|
def create_pool ( module , max_conns , * args , ** kwargs ) :
"""Create a connection pool appropriate to the driver module ' s capabilities ."""
|
if not hasattr ( module , 'threadsafety' ) :
raise NotSupported ( "Cannot determine driver threadsafety." )
if max_conns < 1 :
raise ValueError ( "Minimum number of connections is 1." )
if module . threadsafety >= 2 :
return Pool ( module , max_conns , * args , ** kwargs )
if module . threadsafety >= 1 :
return DummyPool ( module , * args , ** kwargs )
raise ValueError ( "Bad threadsafety level: %d" % module . threadsafety )
|
def unload ( self , keepables = None ) :
"""Unload all unneeded datasets .
Datasets are considered unneeded if they weren ' t directly requested
or added to the Scene by the user or they are no longer needed to
generate composites that have yet to be generated .
Args :
keepables ( iterable ) : DatasetIDs to keep whether they are needed
or not ."""
|
to_del = [ ds_id for ds_id , projectable in self . datasets . items ( ) if ds_id not in self . wishlist and ( not keepables or ds_id not in keepables ) ]
for ds_id in to_del :
LOG . debug ( "Unloading dataset: %r" , ds_id )
del self . datasets [ ds_id ]
|
def factorial ( N ) :
"""Compute the factorial of N .
If N < = 10 , use a fast lookup table ; otherwise use scipy . special . factorial"""
|
if N < len ( FACTORIALS ) :
return FACTORIALS [ N ]
else :
from scipy import special
return int ( special . factorial ( N ) )
|
def _sim_atoi_inner ( self , str_addr , region , base = 10 , read_length = None ) :
"""Return the result of invoking the atoi simprocedure on ` str _ addr ` ."""
|
from . . import SIM_PROCEDURES
strtol = SIM_PROCEDURES [ 'libc' ] [ 'strtol' ]
return strtol . strtol_inner ( str_addr , self . state , region , base , True , read_length = read_length )
|
def GetRunlevelsNonLSB ( states ) :
"""Accepts a string and returns a list of strings of numeric LSB runlevels ."""
|
if not states :
return set ( )
convert_table = { "0" : "0" , "1" : "1" , "2" : "2" , "3" : "3" , "4" : "4" , "5" : "5" , "6" : "6" , # SysV , Gentoo , Solaris , HP - UX all allow an alpha variant
# for single user . https : / / en . wikipedia . org / wiki / Runlevel
"S" : "1" , "s" : "1" }
_LogInvalidRunLevels ( states , convert_table )
return set ( [ convert_table [ s ] for s in states . split ( ) if s in convert_table ] )
|
def findall_operations_with_gate_type ( self , gate_type : Type [ T_DESIRED_GATE_TYPE ] ) -> Iterable [ Tuple [ int , ops . GateOperation , T_DESIRED_GATE_TYPE ] ] :
"""Find the locations of all gate operations of a given type .
Args :
gate _ type : The type of gate to find , e . g . XPowGate or
MeasurementGate .
Returns :
An iterator ( index , operation , gate ) ' s for operations with the given
gate type ."""
|
result = self . findall_operations ( lambda operation : bool ( ops . op_gate_of_type ( operation , gate_type ) ) )
for index , op in result :
gate_op = cast ( ops . GateOperation , op )
yield index , gate_op , cast ( T_DESIRED_GATE_TYPE , gate_op . gate )
|
def adjust_text ( texts , x = None , y = None , add_objects = None , ax = None , expand_text = ( 1.05 , 1.2 ) , expand_points = ( 1.05 , 1.2 ) , expand_objects = ( 1.05 , 1.2 ) , expand_align = ( 1.05 , 1.2 ) , autoalign = 'xy' , va = 'center' , ha = 'center' , force_text = ( 0.1 , 0.25 ) , force_points = ( 0.2 , 0.5 ) , force_objects = ( 0.1 , 0.25 ) , lim = 500 , precision = 0.01 , only_move = { 'points' : 'xy' , 'text' : 'xy' , 'objects' : 'xy' } , avoid_text = True , avoid_points = True , avoid_self = True , save_steps = False , save_prefix = '' , save_format = 'png' , add_step_numbers = True , on_basemap = False , * args , ** kwargs ) :
"""Iteratively adjusts the locations of texts .
Call adjust _ text the very last , after all plotting ( especially
anything that can change the axes limits ) has been done . This is
because to move texts the function needs to use the dimensions of
the axes , and without knowing the final size of the plots the
results will be completely nonsensical , or suboptimal .
First moves all texts that are outside the axes limits
inside . Then in each iteration moves all texts away from each
other and from points . In the end hides texts and substitutes them
with annotations to link them to the respective points .
Parameters
texts : list
A list of : obj : ` matplotlib . text . Text ` objects to adjust .
Other Parameters
x : array _ like
x - coordinates of points to repel from ; if not provided only uses text
coordinates .
y : array _ like
y - coordinates of points to repel from ; if not provided only uses text
coordinates
add _ objects : list or PathCollection
a list of additional matplotlib objects to avoid ; they must have a
` . get _ window _ extent ( ) ` method ; alternatively , a PathCollection or a
list of Bbox objects .
ax : matplotlib axe , default is current axe ( plt . gca ( ) )
axe object with the plot
expand _ text : array _ like , default ( 1.05 , 1.2)
a tuple / list / . . . with 2 multipliers ( x , y ) by which to expand the
bounding box of texts when repelling them from each other .
expand _ points : array _ like , default ( 1.05 , 1.2)
a tuple / list / . . . with 2 multipliers ( x , y ) by which to expand the
bounding box of texts when repelling them from points .
expand _ objects : array _ like , default ( 1.05 , 1.2)
a tuple / list / . . . with 2 multipliers ( x , y ) by which to expand the
bounding box of texts when repelling them from other objects .
expand _ align : array _ like , default ( 1.05 , 1.2)
a tuple / list / . . . with 2 multipliers ( x , y ) by which to expand the
bounding box of texts when autoaligning texts .
autoalign : str or boolean { ' xy ' , ' x ' , ' y ' , True , False } , default ' xy '
Direction in wich the best alignement will be determined
- ' xy ' or True , best alignment of all texts determined in all
directions automatically before running the iterative adjustment
( overriding va and ha ) ,
- ' x ' , will only align horizontally ,
- ' y ' , will only align vertically ,
- False , do nothing ( i . e . preserve va and ha )
va : str , default ' center '
vertical alignment of texts
ha : str , default ' center '
horizontal alignment of texts ,
force _ text : tuple , default ( 0.1 , 0.25)
the repel force from texts is multiplied by this value
force _ points : tuple , default ( 0.2 , 0.5)
the repel force from points is multiplied by this value
force _ objects : float , default ( 0.1 , 0.25)
same as other forces , but for repelling additional objects
lim : int , default 500
limit of number of iterations
precision : float , default 0.01
iterate until the sum of all overlaps along both x and y are less than
this amount , as a fraction of the total widths and heights ,
respectively . May need to increase for complicated situations .
only _ move : dict , default { ' points ' : ' xy ' , ' text ' : ' xy ' , ' objects ' : ' xy ' }
a dict to restrict movement of texts to only certain axes for certain
types of overlaps .
Valid keys are ' points ' , ' text ' , and ' objects ' .
Valid values are ' ' , ' x ' , ' y ' , and ' xy ' .
For example , only _ move = { ' points ' : ' y ' , ' text ' : ' xy ' , ' objects ' : ' xy ' }
forbids moving texts along the x axis due to overlaps with points .
avoid _ text : bool , default True
whether to repel texts from each other .
avoid _ points : bool , default True
whether to repel texts from points . Can be helpful to switch off in
extremely crowded plots .
avoid _ self : bool , default True
whether to repel texts from its original positions .
save _ steps : bool , default False
whether to save intermediate steps as images .
save _ prefix : str , default ' '
if ` save _ steps ` is True , a path and / or prefix to the saved steps .
save _ format : str , default ' png '
if ` save _ steps ` is True , a format to save the steps into .
add _ step _ numbers : bool , default True
if ` save _ steps ` is True , whether to add step numbers as titles to the
images of saving steps .
on _ basemap : bool , default False
whether your plot uses the basemap library , stops labels going over the
edge of the map .
args and kwargs :
any arguments will be fed into obj : ` ax . annotate ` after all the
optimization is done just for plotting the connecting arrows if
required .
Return
int
Number of iteration"""
|
plt . draw ( )
if ax is None :
ax = plt . gca ( )
r = get_renderer ( ax . get_figure ( ) )
orig_xy = [ get_text_position ( text , ax = ax ) for text in texts ]
orig_x = [ xy [ 0 ] for xy in orig_xy ]
orig_y = [ xy [ 1 ] for xy in orig_xy ]
force_objects = float_to_tuple ( force_objects )
force_text = float_to_tuple ( force_text )
force_points = float_to_tuple ( force_points )
# xdiff = np . diff ( ax . get _ xlim ( ) ) [ 0]
# ydiff = np . diff ( ax . get _ ylim ( ) ) [ 0]
bboxes = get_bboxes ( texts , r , ( 1.0 , 1.0 ) , ax )
sum_width = np . sum ( list ( map ( lambda bbox : bbox . width , bboxes ) ) )
sum_height = np . sum ( list ( map ( lambda bbox : bbox . height , bboxes ) ) )
if not any ( list ( map ( lambda val : 'x' in val , only_move . values ( ) ) ) ) :
precision_x = np . inf
else :
precision_x = precision * sum_width
if not any ( list ( map ( lambda val : 'y' in val , only_move . values ( ) ) ) ) :
precision_y = np . inf
else :
precision_y = precision * sum_height
if x is None :
if y is None :
if avoid_self :
x , y = orig_x , orig_y
else :
x , y = [ ] , [ ]
else :
raise ValueError ( 'Please specify both x and y, or neither' )
if y is None :
raise ValueError ( 'Please specify both x and y, or neither' )
if add_objects is None :
text_from_objects = False
add_bboxes = [ ]
else :
try :
add_bboxes = get_bboxes ( add_objects , r , ( 1 , 1 ) , ax )
except :
raise ValueError ( "Can't get bounding boxes from add_objects - is'\
it a flat list of matplotlib objects?" )
return
text_from_objects = True
for text in texts :
text . set_va ( va )
text . set_ha ( ha )
if save_steps :
if add_step_numbers :
plt . title ( 'Before' )
plt . savefig ( '%s%s.%s' % ( save_prefix , '000a' , save_format ) , format = save_format , dpi = 150 )
elif on_basemap :
ax . draw ( r )
if autoalign :
if autoalign is True :
autoalign = 'xy'
for i in range ( 2 ) :
texts = optimally_align_text ( x , y , texts , expand = expand_align , add_bboxes = add_bboxes , direction = autoalign , renderer = r , ax = ax )
if save_steps :
if add_step_numbers :
plt . title ( 'Autoaligned' )
plt . savefig ( '%s%s.%s' % ( save_prefix , '000b' , save_format ) , format = save_format , dpi = 150 )
elif on_basemap :
ax . draw ( r )
texts = repel_text_from_axes ( texts , ax , renderer = r , expand = expand_points )
history = [ ( np . inf , np . inf ) ] * 10
for i in xrange ( lim ) : # q1 , q2 = [ np . inf , np . inf ] , [ np . inf , np . inf ]
if avoid_text :
d_x_text , d_y_text , q1 = repel_text ( texts , renderer = r , ax = ax , expand = expand_text )
else :
d_x_text , d_y_text , q1 = [ 0 ] * len ( texts ) , [ 0 ] * len ( texts ) , ( 0 , 0 )
if avoid_points :
d_x_points , d_y_points , q2 = repel_text_from_points ( x , y , texts , ax = ax , renderer = r , expand = expand_points )
else :
d_x_points , d_y_points , q2 = [ 0 ] * len ( texts ) , [ 0 ] * len ( texts ) , ( 0 , 0 )
if text_from_objects :
d_x_objects , d_y_objects , q3 = repel_text_from_bboxes ( add_bboxes , texts , ax = ax , renderer = r , expand = expand_objects )
else :
d_x_objects , d_y_objects , q3 = [ 0 ] * len ( texts ) , [ 0 ] * len ( texts ) , ( 0 , 0 )
if only_move :
if 'text' in only_move :
if 'x' not in only_move [ 'text' ] :
d_x_text = np . zeros_like ( d_x_text )
if 'y' not in only_move [ 'text' ] :
d_y_text = np . zeros_like ( d_y_text )
if 'points' in only_move :
if 'x' not in only_move [ 'points' ] :
d_x_points = np . zeros_like ( d_x_points )
if 'y' not in only_move [ 'points' ] :
d_y_points = np . zeros_like ( d_y_points )
if 'objects' in only_move :
if 'x' not in only_move [ 'objects' ] :
d_x_objects = np . zeros_like ( d_x_objects )
if 'y' not in only_move [ 'objects' ] :
d_y_objects = np . zeros_like ( d_y_objects )
dx = ( np . array ( d_x_text ) * force_text [ 0 ] + np . array ( d_x_points ) * force_points [ 0 ] + np . array ( d_x_objects ) * force_objects [ 0 ] )
dy = ( np . array ( d_y_text ) * force_text [ 1 ] + np . array ( d_y_points ) * force_points [ 1 ] + np . array ( d_y_objects ) * force_objects [ 1 ] )
qx = np . sum ( [ q [ 0 ] for q in [ q1 , q2 , q3 ] ] )
qy = np . sum ( [ q [ 1 ] for q in [ q1 , q2 , q3 ] ] )
histm = np . max ( np . array ( history ) , axis = 0 )
history . pop ( 0 )
history . append ( ( qx , qy ) )
move_texts ( texts , dx , dy , bboxes = get_bboxes ( texts , r , ( 1 , 1 ) , ax ) , ax = ax )
if save_steps :
if add_step_numbers :
plt . title ( i + 1 )
plt . savefig ( '%s%s.%s' % ( save_prefix , '{0:03}' . format ( i + 1 ) , save_format ) , format = save_format , dpi = 150 )
elif on_basemap :
ax . draw ( r )
# Stop if we ' ve reached the precision threshold , or if the x and y displacement
# are both greater than the max over the last 10 iterations ( suggesting a
# failure to converge )
if ( qx < precision_x and qy < precision_y ) or np . all ( [ qx , qy ] >= histm ) :
break
# Now adding arrows from texts to their original locations if required
if 'arrowprops' in kwargs :
bboxes = get_bboxes ( texts , r , ( 1 , 1 ) , ax )
kwap = kwargs . pop ( 'arrowprops' )
for j , ( bbox , text ) in enumerate ( zip ( bboxes , texts ) ) :
ap = { 'patchA' : text }
# Ensure arrow is clipped by the text
ap . update ( kwap )
# Add arrowprops from kwargs
ax . annotate ( "" , # Add an arrow from the text to the point
xy = ( orig_xy [ j ] ) , xytext = get_midpoint ( bbox ) , arrowprops = ap , * args , ** kwargs )
if save_steps :
if add_step_numbers :
plt . title ( i + 1 )
plt . savefig ( '%s%s.%s' % ( save_prefix , '{0:03}' . format ( i + 1 ) , save_format ) , format = save_format , dpi = 150 )
elif on_basemap :
ax . draw ( r )
return i + 1
|
def get_date_yyyymmdd ( yyyymmdd ) :
"""Return datetime . date given string ."""
|
return date ( int ( yyyymmdd [ : 4 ] ) , int ( yyyymmdd [ 4 : 6 ] , base = 10 ) , int ( yyyymmdd [ 6 : ] , base = 10 ) )
|
def pr_num ( self ) :
"""Return the PR number or None if not on a PR"""
|
result = get_pr_num ( repo = self . repo )
if result is None :
result = get_travis_pr_num ( )
return result
|
def _parseline ( self , line ) :
"""All lines come to this method .
: param line : a to parse
: returns : the number of rows to jump and parse the next data line or
return the code error - 1"""
|
sline = line . split ( SEPARATOR )
segment = sline [ 0 ]
handlers = { SEGMENT_HEADER : self . _handle_header , SEGMENT_EOF : self . _handle_eof , SEGMENT_RESULT : self . _handle_result_line , SEGMENT_OBSERVATION_ORDER : self . _handle_new_record }
handler = handlers . get ( segment )
if handler :
return handler ( sline )
return 0
|
def adapters ( self , adapters ) :
"""Sets the number of Ethernet adapters for this VMware VM instance .
: param adapters : number of adapters"""
|
# VMware VMs are limited to 10 adapters
if adapters > 10 :
raise VMwareError ( "Number of adapters above the maximum supported of 10" )
self . _ethernet_adapters . clear ( )
for adapter_number in range ( 0 , adapters ) :
self . _ethernet_adapters [ adapter_number ] = EthernetAdapter ( )
self . _adapters = len ( self . _ethernet_adapters )
log . info ( "VMware VM '{name}' [{id}] has changed the number of Ethernet adapters to {adapters}" . format ( name = self . name , id = self . id , adapters = adapters ) )
|
def msjd ( theta ) :
"""Mean squared jumping distance ."""
|
s = 0.
for p in theta . dtype . names :
s += np . sum ( np . diff ( theta [ p ] , axis = 0 ) ** 2 )
return s
|
def StreamFile ( self , filedesc , offset = 0 , amount = None ) :
"""Streams chunks of a given file starting at given offset .
Args :
filedesc : A ` file ` object to stream .
offset : An integer offset at which the file stream should start on .
amount : An upper bound on number of bytes to read .
Returns :
Generator over ` Chunk ` instances ."""
|
reader = FileReader ( filedesc , offset = offset )
return self . Stream ( reader , amount = amount )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.