signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def _resolve_view_params ( self , request , defaults , * args , ** kwargs ) :
"""Resolves view params with least ammount of resistance .
Firstly check for params on urls passed args , then on class init args or members ,
and lastly on class get methods ."""
|
params = copy . copy ( defaults )
params . update ( self . params )
params . update ( kwargs )
resolved_params = { }
extra_context = { }
for key in params : # grab from provided params .
value = params [ key ]
# otherwise grab from existing params
if value == None :
value = self . params [ key ] if self . params . has_key ( key ) else None
# otherwise grab from class method
if value == None :
value = getattr ( self , 'get_%s' % key ) ( request , * args , ** kwargs ) if getattr ( self , 'get_%s' % key , None ) else None
if key in defaults :
resolved_params [ key ] = value
else :
extra_context [ key ] = value
if extra_context :
try :
resolved_params [ 'extra_context' ] . update ( extra_context )
except AttributeError :
resolved_params [ 'extra_context' ] = extra_context
return resolved_params
|
def format_filename ( filename , shorten = False ) :
"""Formats a filename for user display . The main purpose of this
function is to ensure that the filename can be displayed at all . This
will decode the filename to unicode if necessary in a way that it will
not fail . Optionally , it can shorten the filename to not include the
full path to the filename .
: param filename : formats a filename for UI display . This will also convert
the filename into unicode without failing .
: param shorten : this optionally shortens the filename to strip of the
path that leads up to it ."""
|
if shorten :
filename = os . path . basename ( filename )
return filename_to_ui ( filename )
|
def readCovarianceMatrixFile ( cfile , readCov = True , readEig = True ) :
"""reading in similarity matrix
cfile File containing the covariance matrix . The corresponding ID file must be specified in cfile . id )"""
|
covFile = cfile + '.cov'
evalFile = cfile + '.cov.eval'
evecFile = cfile + '.cov.evec'
RV = { }
if readCov :
assert os . path . exists ( covFile ) , '%s is missing.' % covFile
RV [ 'K' ] = SP . loadtxt ( covFile )
if readEig :
assert os . path . exists ( evalFile ) , '%s is missing.' % evalFile
assert os . path . exists ( evecFile ) , '%s is missing.' % evecFile
RV [ 'eval' ] = SP . loadtxt ( evalFile )
RV [ 'evec' ] = SP . loadtxt ( evecFile )
return RV
|
def _cmp_by_origin ( path1 , path2 ) :
"""Select the best path based on origin attribute .
IGP is preferred over EGP ; EGP is preferred over Incomplete .
If both paths have same origin , we return None ."""
|
def get_origin_pref ( origin ) :
if origin . value == BGP_ATTR_ORIGIN_IGP :
return 3
elif origin . value == BGP_ATTR_ORIGIN_EGP :
return 2
elif origin . value == BGP_ATTR_ORIGIN_INCOMPLETE :
return 1
else :
LOG . error ( 'Invalid origin value encountered %s.' , origin )
return 0
origin1 = path1 . get_pattr ( BGP_ATTR_TYPE_ORIGIN )
origin2 = path2 . get_pattr ( BGP_ATTR_TYPE_ORIGIN )
assert origin1 is not None and origin2 is not None
# If both paths have same origins
if origin1 . value == origin2 . value :
return None
# Translate origin values to preference .
origin1 = get_origin_pref ( origin1 )
origin2 = get_origin_pref ( origin2 )
# Return preferred path .
if origin1 == origin2 :
return None
elif origin1 > origin2 :
return path1
return path2
|
def getShocks ( self ) :
'''Finds the effective permanent and transitory shocks this period by combining the aggregate
and idiosyncratic shocks of each type .
Parameters
None
Returns
None'''
|
IndShockConsumerType . getShocks ( self )
# Update idiosyncratic shocks
self . TranShkNow = self . TranShkNow * self . TranShkAggNow * self . wRteNow
self . PermShkNow = self . PermShkNow * self . PermShkAggNow
|
async def unicode_type ( self , elem ) :
"""Unicode type
: param elem :
: return :"""
|
if self . writing :
return await x . dump_unicode ( self . iobj , elem )
else :
return await x . load_unicode ( self . iobj )
|
def _client_data ( self , client ) :
"""Returns a dict that represents the client specified
Keys : obj , id , url , name"""
|
data = { }
if client :
data [ 'obj' ] = client
data [ 'id' ] = client . id
data [ 'url' ] = client . absolute_url ( )
data [ 'name' ] = to_utf8 ( client . getName ( ) )
return data
|
def GetEntries ( self , parser_mediator , match = None , ** unused_kwargs ) :
"""Extract device information from the iPod plist .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
match ( Optional [ dict [ str : object ] ] ) : keys extracted from PLIST _ KEYS ."""
|
devices = match . get ( 'Devices' , { } )
for device_identifier , device_information in iter ( devices . items ( ) ) :
datetime_value = device_information . get ( 'Connected' , None )
if not datetime_value :
continue
event_data = IPodPlistEventData ( )
event_data . device_id = device_identifier
# TODO : refactor .
for key , value in iter ( device_information . items ( ) ) :
if key == 'Connected' :
continue
attribute_name = key . lower ( ) . replace ( ' ' , '_' )
setattr ( event_data , attribute_name , value )
event = time_events . PythonDatetimeEvent ( datetime_value , definitions . TIME_DESCRIPTION_LAST_CONNECTED )
parser_mediator . ProduceEventWithEventData ( event , event_data )
|
def _iter ( self ) :
"""Generate ( name , est , weight ) tuples excluding None transformers"""
|
get_weight = ( self . transformer_weights or { } ) . get
return ( ( name , trans , get_weight ( name ) ) for name , trans in self . transformer_list if trans is not None )
|
def MaxPool ( a , k , strides , padding , data_format ) :
"""Maximum pooling op ."""
|
if data_format . decode ( "ascii" ) == "NCHW" :
a = np . rollaxis ( a , 1 , - 1 ) ,
patches = _pool_patches ( a , k , strides , padding . decode ( "ascii" ) )
pool = np . amax ( patches , axis = tuple ( range ( - len ( k ) , 0 ) ) )
if data_format . decode ( "ascii" ) == "NCHW" :
pool = np . rollaxis ( pool , - 1 , 1 )
return pool ,
|
def snapshot_status ( repository = None , snapshot = None , ignore_unavailable = False , hosts = None , profile = None ) :
'''. . versionadded : : 2017.7.0
Obtain status of all currently running snapshots .
repository
Particular repository to look for snapshots
snapshot
Snapshot name
ignore _ unavailable
Ignore unavailable snapshots
CLI example : :
salt myminion elasticsearch . snapshot _ status ignore _ unavailable = True'''
|
es = _get_instance ( hosts , profile )
try :
return es . snapshot . status ( repository = repository , snapshot = snapshot , ignore_unavailable = ignore_unavailable )
except elasticsearch . TransportError as e :
raise CommandExecutionError ( "Cannot obtain snapshot status, server returned code {0} with message {1}" . format ( e . status_code , e . error ) )
|
def authenticateRequest ( self , service_request , username , password , ** kwargs ) :
"""Processes an authentication request . If no authenticator is supplied ,
then authentication succeeds .
@ return : Returns a C { bool } based on the result of authorization . A
value of C { False } will stop processing the request and return an
error to the client .
@ rtype : C { bool }"""
|
authenticator = self . getAuthenticator ( service_request )
if authenticator is None :
return True
args = ( username , password )
if hasattr ( authenticator , '_pyamf_expose_request' ) :
http_request = kwargs . get ( 'http_request' , None )
args = ( http_request , ) + args
return authenticator ( * args ) == True
|
async def push_transaction_async ( self ) :
"""Increment async transaction depth ."""
|
await self . connect_async ( loop = self . loop )
depth = self . transaction_depth_async ( )
if not depth :
conn = await self . _async_conn . acquire ( )
self . _task_data . set ( 'conn' , conn )
self . _task_data . set ( 'depth' , depth + 1 )
|
def get_output_jsonpath ( self , sub_output = None ) :
"""Attempt to build a JSONPath filter for this ExtractorProcessor
that captures how to get at the outputs of the wrapped Extractor"""
|
output_jsonpath_field = self . get_output_jsonpath_field ( sub_output )
metadata = self . extractor . get_metadata ( )
metadata [ 'source' ] = str ( self . input_fields )
extractor_filter = ""
is_first = True
for key , value in metadata . iteritems ( ) :
if is_first :
is_first = False
else :
extractor_filter = extractor_filter + " & "
if isinstance ( value , basestring ) :
extractor_filter = extractor_filter + "{}=\"{}\"" . format ( key , re . sub ( '(?<=[^\\\])\"' , "'" , value ) )
elif isinstance ( value , types . ListType ) :
extractor_filter = extractor_filter + "{}={}" . format ( key , str ( value ) )
output_jsonpath = "{}[?{}].result.value" . format ( output_jsonpath_field , extractor_filter )
return output_jsonpath
|
def to_abivars ( self ) :
"""Returns a dictionary with the abinit variables"""
|
return { # Spectral function
"nomegasf" : self . nomegasf , "domegasf" : self . domegasf , "spmeth" : self . spmeth , # Frequency mesh for the polarizability
"nfreqre" : self . nfreqre , "freqremax" : self . freqremax , "nfreqim" : self . nfreqim , "freqremin" : self . freqremin , }
|
def open_external_editor ( filename = None , sql = None ) :
"""Open external editor , wait for the user to type in their query , return
the query .
: return : list with one tuple , query as first element ."""
|
message = None
filename = filename . strip ( ) . split ( ' ' , 1 ) [ 0 ] if filename else None
sql = sql or ''
MARKER = '# Type your query above this line.\n'
# Populate the editor buffer with the partial sql ( if available ) and a
# placeholder comment .
query = click . edit ( u'{sql}\n\n{marker}' . format ( sql = sql , marker = MARKER ) , filename = filename , extension = '.sql' )
if filename :
try :
with open ( filename , encoding = 'utf-8' ) as f :
query = f . read ( )
except IOError :
message = 'Error reading file: %s.' % filename
if query is not None :
query = query . split ( MARKER , 1 ) [ 0 ] . rstrip ( '\n' )
else : # Don ' t return None for the caller to deal with .
# Empty string is ok .
query = sql
return ( query , message )
|
def add_method ( obj , func , name = None ) :
"""Adds an instance method to an object ."""
|
if name is None :
name = func . __name__
if sys . version_info < ( 3 , ) :
method = types . MethodType ( func , obj , obj . __class__ )
else :
method = types . MethodType ( func , obj )
setattr ( obj , name , method )
|
def make_setup_state ( self , app : 'Quart' , first_registration : bool , * , url_prefix : Optional [ str ] = None , ) -> 'BlueprintSetupState' :
"""Return a blueprint setup state instance .
Arguments :
first _ registration : True if this is the first registration
of this blueprint on the app .
url _ prefix : An optional prefix to all rules"""
|
return BlueprintSetupState ( self , app , first_registration , url_prefix = url_prefix )
|
def Reinit ( self , pid , auto_symfile_loading = True ) :
"""Reinitializes the object with a new pid .
Since all modes might need access to this object at any time , this object
needs to be long - lived . To make this clear in the API , this shorthand is
supplied .
Args :
pid : the pid of the target process
auto _ symfile _ loading : whether the symbol file should automatically be
loaded by gdb ."""
|
self . ShutDownGdb ( )
self . __init__ ( pid , auto_symfile_loading , architecture = self . arch )
|
def create_userena_profile ( self , user ) :
"""Creates an : class : ` UserenaSignup ` instance for this user .
: param user :
Django : class : ` User ` instance .
: return : The newly created : class : ` UserenaSignup ` instance ."""
|
if isinstance ( user . username , text_type ) :
user . username = smart_text ( user . username )
salt , activation_key = generate_sha1 ( user . username )
try :
profile = self . get ( user = user )
except self . model . DoesNotExist :
profile = self . create ( user = user , activation_key = activation_key )
return profile
|
def hdr ( data , filename ) :
"""write ENVI header files
Parameters
data : str or dict
the file or dictionary to get the info from
filename : str
the HDR file to write
Returns"""
|
hdrobj = data if isinstance ( data , HDRobject ) else HDRobject ( data )
hdrobj . write ( filename )
|
def get_bucket ( self , hash_name , bucket_key ) :
"""Returns bucket content as list of tuples ( vector , data ) ."""
|
results = [ ]
for row in self . _get_bucket_rows ( hash_name , bucket_key ) :
val_dict = pickle . loads ( row )
# Depending on type ( sparse or not ) reconstruct vector
if 'sparse' in val_dict : # Fill these for COO creation
row = [ ]
col = [ ]
data = [ ]
# For each non - zero element , append values
for e in val_dict [ 'nonzeros' ] :
row . append ( e [ 0 ] )
# Row index
data . append ( e [ 1 ] )
# Value
col . append ( 0 )
# Column index ( always 0)
# Create numpy arrays for COO creation
coo_row = numpy . array ( row , dtype = numpy . int32 )
coo_col = numpy . array ( col , dtype = numpy . int32 )
coo_data = numpy . array ( data )
# Create COO sparse vector
vector = scipy . sparse . coo_matrix ( ( coo_data , ( coo_row , coo_col ) ) , shape = ( val_dict [ 'dim' ] , 1 ) )
else :
vector = numpy . fromstring ( val_dict [ 'vector' ] , dtype = val_dict [ 'dtype' ] )
# Add data to result tuple , if present
results . append ( ( vector , val_dict . get ( 'data' ) ) )
return results
|
def get_contingency_tables ( self ) :
"""Create an Array of ContingencyTable objects for each probability threshold .
Returns :
Array of ContingencyTable objects"""
|
return np . array ( [ ContingencyTable ( * ct ) for ct in self . contingency_tables . values ] )
|
def lookup ( id = None , artist_amg_id = None , upc = None , country = 'US' , media = 'all' , entity = None , attribute = None , limit = 50 ) :
"""Returns the result of the lookup of the specified id , artist _ amg _ id or upc in an array of result _ item ( s )
: param id : String . iTunes ID of the artist , album , track , ebook or software
: param artist _ amg _ id : String . All Music Guide ID of the artist
: param upc : String . UPCs / EANs
: param country : String . The two - letter country code for the store you want to search .
For a full list of the codes : http : / / en . wikipedia . org / wiki / % 20ISO _ 3166-1 _ alpha - 2
: param media : String . The media type you want to search for . Example : music
: param entity : String . The type of results you want returned , relative to the specified media type . Example : musicArtist .
Full list : musicArtist , musicTrack , album , musicVideo , mix , song
: param attribute : String . The attribute you want to search for in the stores , relative to the specified media type .
: param limit : Integer . The number of search results you want the iTunes Store to return .
: return : An array of result _ item ( s )"""
|
# If none of the basic lookup arguments are provided , raise a ValueError
if id is None and artist_amg_id is None and upc is None :
raise ValueError ( lookup_no_ids )
lookup_url = _url_lookup_builder ( id , artist_amg_id , upc , country , media , entity , attribute , limit )
r = requests . get ( lookup_url )
try :
json = r . json ( ) [ 'results' ]
result_count = r . json ( ) [ 'resultCount' ]
except :
raise ConnectionError ( general_no_connection )
if result_count == 0 :
raise LookupError ( lookup_error )
return _get_result_list ( json )
|
def namer ( cls , imageUrl , pageUrl ) :
"""Image file name is UNIX time stamp & something for most of the comics . . ."""
|
start = ''
tsmatch = compile ( r'/(\d+)-' ) . search ( imageUrl )
if tsmatch :
start = datetime . utcfromtimestamp ( int ( tsmatch . group ( 1 ) ) ) . strftime ( "%Y-%m-%d" )
else : # There were only chapter 1 , page 4 and 5 not matching when writing
# this . . .
start = '2015-04-11x'
return start + "-" + pageUrl . rsplit ( '/' , 1 ) [ - 1 ]
|
def create_ca_signed_cert ( ca_name , CN , days = 365 , cacert_path = None , ca_filename = None , cert_path = None , cert_filename = None , digest = 'sha256' , cert_type = None , type_ext = False , replace = False ) :
'''Create a Certificate ( CERT ) signed by a named Certificate Authority ( CA )
If the certificate file already exists , the function just returns assuming
the CERT already exists .
The CN * must * match an existing CSR generated by create _ csr . If it
does not , this method does nothing .
ca _ name
name of the CA
CN
common name matching the certificate signing request
days
number of days certificate is valid , default is 365 ( 1 year )
cacert _ path
absolute path to ca certificates root directory
ca _ filename
alternative filename for the CA
. . versionadded : : 2015.5.3
cert _ path
full path to the certificates directory
cert _ filename
alternative filename for the certificate , useful when using special
characters in the CN . If this option is set it will override
the certificate filename output effects of ` ` cert _ type ` ` .
` ` type _ ext ` ` will be completely overridden .
. . versionadded : : 2015.5.3
digest
The message digest algorithm . Must be a string describing a digest
algorithm supported by OpenSSL ( by EVP _ get _ digestbyname , specifically ) .
For example , " md5 " or " sha1 " . Default : ' sha256'
replace
Replace this certificate even if it exists
. . versionadded : : 2015.5.1
cert _ type
string . Either ' server ' or ' client ' ( see create _ csr ( ) for details ) .
If create _ csr ( type _ ext = True ) this function * * must * * be called with the
same cert _ type so it can find the CSR file .
. . note : :
create _ csr ( ) defaults to cert _ type = ' server ' ; therefore , if it was also
called with type _ ext , cert _ type becomes a required argument for
create _ ca _ signed _ cert ( )
type _ ext
bool . If set True , use ` ` cert _ type ` ` as an extension to the CN when
formatting the filename .
e . g . : some _ subject _ CN _ server . crt or some _ subject _ CN _ client . crt
This facilitates the context where both types are required for the same
subject
If ` ` cert _ filename ` ` is ` not None ` , setting ` ` type _ ext ` ` has no
effect
If the following values were set :
. . code - block : : text
ca . cert _ base _ path = ' / etc / pki '
ca _ name = ' koji '
CN = ' test . egavas . org '
the resulting signed certificate would be written in the following
location :
. . code - block : : text
/ etc / pki / koji / certs / test . egavas . org . crt
CLI Example :
. . code - block : : bash
salt ' * ' tls . create _ ca _ signed _ cert test localhost'''
|
ret = { }
set_ca_path ( cacert_path )
if not ca_filename :
ca_filename = '{0}_ca_cert' . format ( ca_name )
if not cert_path :
cert_path = '{0}/{1}/certs' . format ( cert_base_path ( ) , ca_name )
if type_ext :
if not cert_type :
log . error ( 'type_ext = True but cert_type is unset. ' 'Certificate not written.' )
return ret
elif cert_type :
CN_ext = '_{0}' . format ( cert_type )
else :
CN_ext = ''
csr_filename = '{0}{1}' . format ( CN , CN_ext )
if not cert_filename :
cert_filename = '{0}{1}' . format ( CN , CN_ext )
if not replace and os . path . exists ( os . path . join ( os . path . sep . join ( '{0}/{1}/certs/{2}.crt' . format ( cert_base_path ( ) , ca_name , cert_filename ) . split ( '/' ) ) ) ) :
return 'Certificate "{0}" already exists' . format ( cert_filename )
try :
maybe_fix_ssl_version ( ca_name , cacert_path = cacert_path , ca_filename = ca_filename )
with salt . utils . files . fopen ( '{0}/{1}/{2}.crt' . format ( cert_base_path ( ) , ca_name , ca_filename ) ) as fhr :
ca_cert = OpenSSL . crypto . load_certificate ( OpenSSL . crypto . FILETYPE_PEM , fhr . read ( ) )
with salt . utils . files . fopen ( '{0}/{1}/{2}.key' . format ( cert_base_path ( ) , ca_name , ca_filename ) ) as fhr :
ca_key = OpenSSL . crypto . load_privatekey ( OpenSSL . crypto . FILETYPE_PEM , fhr . read ( ) )
except IOError :
ret [ 'retcode' ] = 1
ret [ 'comment' ] = 'There is no CA named "{0}"' . format ( ca_name )
return ret
try :
csr_path = '{0}/{1}.csr' . format ( cert_path , csr_filename )
with salt . utils . files . fopen ( csr_path ) as fhr :
req = OpenSSL . crypto . load_certificate_request ( OpenSSL . crypto . FILETYPE_PEM , fhr . read ( ) )
except IOError :
ret [ 'retcode' ] = 1
ret [ 'comment' ] = 'There is no CSR that matches the CN "{0}"' . format ( cert_filename )
return ret
exts = [ ]
try :
exts . extend ( req . get_extensions ( ) )
except AttributeError :
try : # see : http : / / bazaar . launchpad . net / ~ exarkun / pyopenssl / master / revision / 189
# support is there from quite a long time , but without API
# so we mimic the newly get _ extensions method present in ultra
# recent pyopenssl distros
log . info ( 'req.get_extensions() not supported in pyOpenSSL versions ' 'prior to 0.15. Processing extensions internally. ' 'Your version: %s' , OpenSSL_version )
native_exts_obj = OpenSSL . _util . lib . X509_REQ_get_extensions ( req . _req )
for i in _range ( OpenSSL . _util . lib . sk_X509_EXTENSION_num ( native_exts_obj ) ) :
ext = OpenSSL . crypto . X509Extension . __new__ ( OpenSSL . crypto . X509Extension )
ext . _extension = OpenSSL . _util . lib . sk_X509_EXTENSION_value ( native_exts_obj , i )
exts . append ( ext )
except Exception :
log . error ( 'X509 extensions are unsupported in pyOpenSSL ' 'versions prior to 0.14. Upgrade required to ' 'use extensions. Current version: %s' , OpenSSL_version )
cert = OpenSSL . crypto . X509 ( )
cert . set_version ( 2 )
cert . set_subject ( req . get_subject ( ) )
cert . gmtime_adj_notBefore ( 0 )
cert . gmtime_adj_notAfter ( int ( days ) * 24 * 60 * 60 )
cert . set_serial_number ( _new_serial ( ca_name ) )
cert . set_issuer ( ca_cert . get_subject ( ) )
cert . set_pubkey ( req . get_pubkey ( ) )
cert . add_extensions ( exts )
cert . sign ( ca_key , salt . utils . stringutils . to_str ( digest ) )
cert_full_path = '{0}/{1}.crt' . format ( cert_path , cert_filename )
with salt . utils . files . fopen ( cert_full_path , 'wb+' ) as crt :
crt . write ( salt . utils . stringutils . to_bytes ( OpenSSL . crypto . dump_certificate ( OpenSSL . crypto . FILETYPE_PEM , cert ) ) )
_write_cert_to_database ( ca_name , cert )
return 'Created Certificate for "{0}": "{1}/{2}.crt"' . format ( CN , cert_path , cert_filename )
|
def replace_series_data ( self , chartSpace ) :
"""Rewrite the series data under * chartSpace * using the chart data
contents . All series - level formatting is left undisturbed . If
the chart data contains fewer series than * chartSpace * , the extra
series in * chartSpace * are deleted . If * chart _ data * contains more
series than the * chartSpace * element , new series are added to the
last plot in the chart and series formatting is " cloned " from the
last series in that plot ."""
|
plotArea , date_1904 = chartSpace . plotArea , chartSpace . date_1904
chart_data = self . _chart_data
self . _adjust_ser_count ( plotArea , len ( chart_data ) )
for ser , series_data in zip ( plotArea . sers , chart_data ) :
self . _rewrite_ser_data ( ser , series_data , date_1904 )
|
def shared_memory ( attrs = None , where = None ) :
'''Return shared _ memory information from osquery
CLI Example :
. . code - block : : bash
salt ' * ' osquery . shared _ memory'''
|
if __grains__ [ 'os_family' ] in [ 'RedHat' , 'Debian' ] :
return _osquery_cmd ( table = 'shared_memory' , attrs = attrs , where = where )
return { 'result' : False , 'comment' : 'Only available on Red Hat or Debian based systems.' }
|
def handle_features ( self , device_features ) :
"""Handles features of the device"""
|
self . device_features = device_features
if device_features and 'zone' in device_features :
for zone in device_features [ 'zone' ] :
zone_id = zone . get ( 'id' )
if zone_id in self . zones :
_LOGGER . debug ( "handle_features: %s" , zone_id )
input_list = zone . get ( 'input_list' , [ ] )
input_list . sort ( )
self . zones [ zone_id ] . source_list = input_list
|
def cellsim ( self , cellindex , return_just_cell = False ) :
"""Do the actual simulations of LFP , using synaptic spike times from
network simulation .
Parameters
cellindex : int
cell index between 0 and population size - 1.
return _ just _ cell : bool
If True , return only the ` LFPy . Cell ` object
if False , run full simulation , return None .
Returns
None or ` LFPy . Cell ` object
See also
hybridLFPy . csd , LFPy . Cell , LFPy . Synapse , LFPy . RecExtElectrode"""
|
tic = time ( )
cell = LFPy . Cell ( ** self . cellParams )
cell . set_pos ( ** self . pop_soma_pos [ cellindex ] )
cell . set_rotation ( ** self . rotations [ cellindex ] )
if return_just_cell : # with several cells , NEURON can only hold one cell at the time
allsecnames = [ ]
allsec = [ ]
for sec in cell . allseclist :
allsecnames . append ( sec . name ( ) )
for seg in sec :
allsec . append ( sec . name ( ) )
cell . allsecnames = allsecnames
cell . allsec = allsec
return cell
else :
self . insert_all_synapses ( cellindex , cell )
# electrode object where LFPs are calculated
electrode = LFPy . RecExtElectrode ( ** self . electrodeParams )
if self . calculateCSD :
cell . tvec = np . arange ( cell . totnsegs )
cell . imem = np . eye ( cell . totnsegs )
csdcoeff = csd . true_lam_csd ( cell , self . populationParams [ 'radius' ] , electrode . z )
csdcoeff *= 1E6
# nA mum ^ - 3 - > muA mm ^ - 3 conversion
del cell . tvec , cell . imem
cell . simulate ( electrode , dotprodcoeffs = [ csdcoeff ] , ** self . simulationParams )
cell . CSD = helpers . decimate ( cell . dotprodresults [ 0 ] , q = self . decimatefrac )
else :
cell . simulate ( electrode , ** self . simulationParams )
cell . LFP = helpers . decimate ( electrode . LFP , q = self . decimatefrac )
cell . x = electrode . x
cell . y = electrode . y
cell . z = electrode . z
cell . electrodecoeff = electrode . electrodecoeff
# put all necessary cell output in output dict
for attrbt in self . savelist :
attr = getattr ( cell , attrbt )
if type ( attr ) == np . ndarray :
self . output [ cellindex ] [ attrbt ] = attr . astype ( 'float32' )
else :
try :
self . output [ cellindex ] [ attrbt ] = attr
except :
self . output [ cellindex ] [ attrbt ] = str ( attr )
self . output [ cellindex ] [ 'srate' ] = 1E3 / self . dt_output
print ( 'cell %s population %s in %.2f s' % ( cellindex , self . y , time ( ) - tic ) )
|
def validate_categories ( categories ) :
"""Take an iterable of source categories and raise ValueError if some
of them are invalid ."""
|
if not set ( categories ) <= Source . categories :
invalid = list ( set ( categories ) - Source . categories )
raise ValueError ( 'Invalid categories: %s' % invalid )
|
def set_chain_info ( self , chain_id , chain_name , num_groups ) :
"""Set the chain information .
: param chain _ id : the asym chain id from mmCIF
: param chain _ name : the auth chain id from mmCIF
: param num _ groups : the number of groups this chain has"""
|
self . chain_id_list . append ( chain_id )
self . chain_name_list . append ( chain_name )
self . groups_per_chain . append ( num_groups )
|
def load ( filename , loader = None , implicit_tuple = True , env = { } , schema = None ) :
"""Load and evaluate a GCL expression from a file ."""
|
with open ( filename , 'r' ) as f :
return loads ( f . read ( ) , filename = filename , loader = loader , implicit_tuple = implicit_tuple , env = env , schema = schema )
|
def file_adapter ( file_or_path ) :
"""Context manager that works similar to ` ` open ( file _ path ) ` ` but also accepts already openned file - like objects ."""
|
if is_file ( file_or_path ) :
file_obj = file_or_path
else :
file_obj = open ( file_or_path , 'rb' )
yield file_obj
file_obj . close ( )
|
def set_idlemax ( self , idlemax ) :
"""Sets CPU idle max value
: param idlemax : idle max value ( integer )"""
|
is_running = yield from self . is_running ( )
if is_running : # router is running
yield from self . _hypervisor . send ( 'vm set_idle_max "{name}" 0 {idlemax}' . format ( name = self . _name , idlemax = idlemax ) )
log . info ( 'Router "{name}" [{id}]: idlemax updated from {old_idlemax} to {new_idlemax}' . format ( name = self . _name , id = self . _id , old_idlemax = self . _idlemax , new_idlemax = idlemax ) )
self . _idlemax = idlemax
|
def _rspiral ( width , height ) :
"""Reversed spiral generator .
Parameters
width : ` int `
Spiral width .
height : ` int `
Spiral height .
Returns
` generator ` of ( ` int ` , ` int ` )
Points ."""
|
x0 = 0
y0 = 0
x1 = width - 1
y1 = height - 1
while x0 < x1 and y0 < y1 :
for x in range ( x0 , x1 ) :
yield x , y0
for y in range ( y0 , y1 ) :
yield x1 , y
for x in range ( x1 , x0 , - 1 ) :
yield x , y1
for y in range ( y1 , y0 , - 1 ) :
yield x0 , y
x0 += 1
y0 += 1
x1 -= 1
y1 -= 1
if x0 == x1 :
for y in range ( y0 , y1 + 1 ) :
yield x0 , y
elif y0 == y1 :
for x in range ( x0 , x1 + 1 ) :
yield x , y0
|
def export_json ( self , filename ) :
"""Export graph in JSON form to the given file ."""
|
json_graph = self . to_json ( )
with open ( filename , 'wb' ) as f :
f . write ( json_graph . encode ( 'utf-8' ) )
|
def state ( self , context ) :
"""Get instance state .
: param resort . engine . execution . Context context :
Current execution context .
: rtype :
str
: return :
Instance state name ."""
|
state = None
for line in self . read ( context , [ "status" , context . resolve ( self . __name ) ] ) :
if line [ 2 ] == "state" :
state = line [ 3 ] . strip ( )
return state
|
def visitStringFacet ( self , ctx : ShExDocParser . StringFacetContext ) :
"""stringFacet : stringLength INTEGER | REGEXP REGEXP _ FLAGS
stringLength : KW _ LENGTH | KW _ MINLENGTH | KW _ MAXLENGTH"""
|
if ctx . stringLength ( ) :
slen = jsg . Integer ( ctx . INTEGER ( ) . getText ( ) )
if ctx . stringLength ( ) . KW_LENGTH ( ) :
self . nodeconstraint . length = slen
elif ctx . stringLength ( ) . KW_MINLENGTH ( ) :
self . nodeconstraint . minlength = slen
else :
self . nodeconstraint . maxlength = slen
else :
self . nodeconstraint . pattern = jsg . String ( self . context . fix_re_escapes ( ctx . REGEXP ( ) . getText ( ) [ 1 : - 1 ] ) )
if ctx . REGEXP_FLAGS ( ) :
self . nodeconstraint . flags = jsg . String ( ctx . REGEXP_FLAGS ( ) . getText ( ) )
|
def plot_iso ( axis , step , var ) :
"""Plot isocontours of scalar field .
Args :
axis ( : class : ` matplotlib . axes . Axes ` ) : the axis handler of an
existing matplotlib figure where the isocontours should
be plotted .
step ( : class : ` ~ stagpy . stagyydata . _ Step ` ) : a step of a StagyyData
instance .
var ( str ) : the scalar field name ."""
|
xmesh , ymesh , fld = get_meshes_fld ( step , var )
if conf . field . shift :
fld = np . roll ( fld , conf . field . shift , axis = 0 )
axis . contour ( xmesh , ymesh , fld , linewidths = 1 )
|
def update_line ( self , trace , xdata , ydata , side = 'left' , draw = False , update_limits = True ) :
"""update a single trace , for faster redraw"""
|
x = self . conf . get_mpl_line ( trace )
x . set_data ( xdata , ydata )
datarange = [ xdata . min ( ) , xdata . max ( ) , ydata . min ( ) , ydata . max ( ) ]
self . conf . set_trace_datarange ( datarange , trace = trace )
axes = self . axes
if side == 'right' :
axes = self . get_right_axes ( )
if update_limits :
self . set_viewlimits ( )
if draw :
self . draw ( )
|
def FlipAllowed ( self ) :
"""Raise an error if the not keyword is used where it is not allowed ."""
|
if not hasattr ( self , 'flipped' ) :
raise errors . ParseError ( 'Not defined.' )
if not self . flipped :
return
if self . current_expression . operator :
if not self . current_expression . operator . lower ( ) in ( 'is' , 'contains' , 'inset' , 'equals' ) :
raise errors . ParseError ( 'Keyword \'not\' does not work against operator: {0:s}' . format ( self . current_expression . operator ) )
|
def dcs_modules ( ) :
"""Get names of DCS modules , depending on execution environment . If being packaged with PyInstaller ,
modules aren ' t discoverable dynamically by scanning source directory because ` FrozenImporter ` doesn ' t
implement ` iter _ modules ` method . But it is still possible to find all potential DCS modules by
iterating through ` toc ` , which contains list of all " frozen " resources ."""
|
dcs_dirname = os . path . dirname ( __file__ )
module_prefix = __package__ + '.'
if getattr ( sys , 'frozen' , False ) :
importer = pkgutil . get_importer ( dcs_dirname )
return [ module for module in list ( importer . toc ) if module . startswith ( module_prefix ) and module . count ( '.' ) == 2 ]
else :
return [ module_prefix + name for _ , name , is_pkg in pkgutil . iter_modules ( [ dcs_dirname ] ) if not is_pkg ]
|
def find_video_by_id ( self , video_id ) :
"""doc : http : / / open . youku . com / docs / doc ? id = 44"""
|
url = 'https://openapi.youku.com/v2/videos/show_basic.json'
params = { 'client_id' : self . client_id , 'video_id' : video_id }
r = requests . get ( url , params = params )
check_error ( r )
return r . json ( )
|
def generate ( data , format = "auto" ) :
"""Converts input chemical formats to json and optimizes structure .
Args :
data : A string or file representing a chemical
format : The format of the ` data ` variable ( default is ' auto ' )
The ` format ` can be any value specified by Open Babel
( http : / / openbabel . org / docs / 2.3.1 / FileFormats / Overview . html ) . The ' auto '
option uses the extension for files ( ie . my _ file . mol - > mol ) and defaults
to SMILES ( smi ) for strings ."""
|
# Support both files and strings and attempt to infer file type
try :
with open ( data ) as in_file :
if format == 'auto' :
format = data . split ( '.' ) [ - 1 ]
data = in_file . read ( )
except :
if format == 'auto' :
format = 'smi'
return format_converter . convert ( data , format , 'json' )
|
def sgn ( x ) :
"""Return the sign of x .
Return a positive integer if x > 0 , 0 if x = = 0 , and a negative integer if
x < 0 . Raise ValueError if x is a NaN .
This function is equivalent to cmp ( x , 0 ) , but more efficient ."""
|
x = BigFloat . _implicit_convert ( x )
if is_nan ( x ) :
raise ValueError ( "Cannot take sign of a NaN." )
return mpfr . mpfr_sgn ( x )
|
def save_pip ( self , out_dir ) :
"""Saves the current working set of pip packages to requirements . txt"""
|
try :
import pkg_resources
installed_packages = [ d for d in iter ( pkg_resources . working_set ) ]
installed_packages_list = sorted ( [ "%s==%s" % ( i . key , i . version ) for i in installed_packages ] )
with open ( os . path . join ( out_dir , 'requirements.txt' ) , 'w' ) as f :
f . write ( "\n" . join ( installed_packages_list ) )
except Exception as e :
logger . error ( "Error saving pip packages" )
|
def setup ( self ) :
"""Defers loading until needed .
Compares the existing mapping for each language with the current codebase .
If they differ , it automatically updates the index ."""
|
# Get the existing mapping & cache it . We ' ll compare it
# during the ` ` update ` ` & if it doesn ' t match , we ' ll put the new
# mapping .
for language in self . languages :
self . index_name = self . _index_name_for_language ( language )
try :
self . existing_mapping [ language ] = self . conn . indices . get_mapping ( index = self . index_name )
except NotFoundError :
pass
except Exception :
if not self . silently_fail :
raise
unified_index = haystack . connections [ self . connection_alias ] . get_unified_index ( )
self . content_field_name , field_mapping = self . build_schema ( unified_index . all_searchfields ( ) , language )
current_mapping = { 'modelresult' : { 'properties' : field_mapping , '_boost' : { 'name' : 'boost' , 'null_value' : 1.0 } } }
if current_mapping != self . existing_mapping [ language ] :
try : # Make sure the index is there first .
self . conn . indices . create ( index = self . index_name , body = self . DEFAULT_SETTINGS , ignore = 400 )
self . conn . indices . put_mapping ( index = self . index_name , doc_type = 'modelresult' , body = current_mapping )
self . existing_mapping [ language ] = current_mapping
except Exception :
if not self . silently_fail :
raise
self . setup_complete = True
|
def get_bit_values ( number , size = 32 ) :
"""Get bit values as a list for a given number
> > > get _ bit _ values ( 1 ) = = [ 0 ] * 31 + [ 1]
True
> > > get _ bit _ values ( 0xDEADBEEF )
[1 , 1 , 0 , 1 , 1 , 1 , 1 , 0 , 1 , 0 , 1 , 0 , 1 , 1 , 0 , 1 , \
1 , 0 , 1 , 1 , 1 , 1 , 1 , 0 , 1 , 1 , 1 , 0 , 1 , 1 , 1 , 1]
You may override the default word size of 32 - bits to match your actual
application .
> > > get _ bit _ values ( 0x3 , 2)
[1 , 1]
> > > get _ bit _ values ( 0x3 , 4)
[0 , 0 , 1 , 1]"""
|
number += 2 ** size
return list ( map ( int , bin ( number ) [ - size : ] ) )
|
def enabled_checker ( func ) :
"""Access decorator which checks if a RPC method is enabled by our configuration"""
|
@ wraps ( func )
def wrap ( self , * args , ** kwargs ) :
if self . allowed_methods and isinstance ( self . allowed_methods , list ) and func . __name__ not in self . allowed_methods :
raise Exception ( "Method {} is disabled" . format ( func . __name__ ) )
return func ( self , * args , ** kwargs )
return wrap
|
def sample ( self , idx ) :
"""return a tuple of ( s , r , a , o ) ,
where s is of shape self . _ output _ shape , which is
[ H , W , ( hist _ len + 1 ) * channel ] if input is ( H , W , channel )"""
|
idx = ( self . _curr_pos + idx ) % self . _curr_size
k = self . history_len + 1
if idx + k <= self . _curr_size :
state = self . state [ idx : idx + k ]
reward = self . reward [ idx : idx + k ]
action = self . action [ idx : idx + k ]
isOver = self . isOver [ idx : idx + k ]
else :
end = idx + k - self . _curr_size
state = self . _slice ( self . state , idx , end )
reward = self . _slice ( self . reward , idx , end )
action = self . _slice ( self . action , idx , end )
isOver = self . _slice ( self . isOver , idx , end )
ret = self . _pad_sample ( state , reward , action , isOver )
return ret
|
def _clear_ignore ( endpoint_props ) :
'''Both _ clear _ dict and _ ignore _ keys in a single iteration .'''
|
return dict ( ( prop_name , prop_val ) for prop_name , prop_val in six . iteritems ( endpoint_props ) if prop_name not in _DO_NOT_COMPARE_FIELDS and prop_val is not None )
|
def save_dot ( self , fd ) :
"""Saves a representation of the case in the Graphviz DOT language ."""
|
from pylon . io import DotWriter
DotWriter ( self ) . write ( fd )
|
def __register_class ( self , parsed_config ) :
"""Register the class implementing this config , so we only add it once .
Args :
parsed _ config : The JSON object with the API configuration being added .
Raises :
ApiConfigurationError : If the class has already been registered ."""
|
methods = parsed_config . get ( 'methods' )
if not methods :
return
# Determine the name of the class that implements this configuration .
service_classes = set ( )
for method in methods . itervalues ( ) :
rosy_method = method . get ( 'rosyMethod' )
if rosy_method and '.' in rosy_method :
method_class = rosy_method . split ( '.' , 1 ) [ 0 ]
service_classes . add ( method_class )
for service_class in service_classes :
if service_class in self . __registered_classes :
raise api_exceptions . ApiConfigurationError ( 'API class %s has already been registered.' % service_class )
self . __registered_classes . add ( service_class )
|
async def send ( self , message : Union [ Data , Iterable [ Data ] , AsyncIterable [ Data ] ] ) -> None :
"""This coroutine sends a message .
It sends a string ( : class : ` str ` ) as a text frame and a bytes - like
object ( : class : ` bytes ` , : class : ` bytearray ` , or : class : ` memoryview ` )
as a binary frame .
It also accepts an iterable or an asynchronous iterator of strings or
bytes - like objects . Each item is treated as a message fragment and
sent in its own frame . All items must be of the same type , or else
: meth : ` send ` will raise a : exc : ` TypeError ` and the connection will be
closed .
It raises a : exc : ` TypeError ` for other inputs ."""
|
await self . ensure_open ( )
# Unfragmented message - - this case must be handled first because
# strings and bytes - like objects are iterable .
if isinstance ( message , ( str , bytes , bytearray , memoryview ) ) :
opcode , data = prepare_data ( message )
await self . write_frame ( True , opcode , data )
# Fragmented message - - regular iterator .
elif isinstance ( message , Iterable ) : # Work around https : / / github . com / python / mypy / issues / 6227
message = cast ( Iterable [ Data ] , message )
iter_message = iter ( message )
# First fragment .
try :
message_chunk = next ( iter_message )
except StopIteration :
return
opcode , data = prepare_data ( message_chunk )
await self . write_frame ( False , opcode , data )
# Other fragments .
for message_chunk in iter_message :
confirm_opcode , data = prepare_data ( message_chunk )
if confirm_opcode != opcode : # We ' re half - way through a fragmented message and we can ' t
# complete it . This makes the connection unusable .
self . fail_connection ( 1011 )
raise TypeError ( "data contains inconsistent types" )
await self . write_frame ( False , OP_CONT , data )
# Final fragment .
await self . write_frame ( True , OP_CONT , b"" )
# Fragmented message - - asynchronous iterator
elif isinstance ( message , AsyncIterable ) : # aiter _ message = aiter ( message ) without aiter
aiter_message = type ( message ) . __aiter__ ( message )
# First fragment .
try : # message _ chunk = anext ( aiter _ message ) without anext
message_chunk = await type ( aiter_message ) . __anext__ ( aiter_message )
except StopAsyncIteration :
return
opcode , data = prepare_data ( message_chunk )
await self . write_frame ( False , opcode , data )
# Other fragments .
async for message_chunk in aiter_message :
confirm_opcode , data = prepare_data ( message_chunk )
if confirm_opcode != opcode : # We ' re half - way through a fragmented message and we can ' t
# complete it . This makes the connection unusable .
self . fail_connection ( 1011 )
raise TypeError ( "data contains inconsistent types" )
await self . write_frame ( False , OP_CONT , data )
# Final fragment .
await self . write_frame ( True , OP_CONT , b"" )
else :
raise TypeError ( "data must be bytes, str, or iterable" )
|
def validate_argmax_with_skipna ( skipna , args , kwargs ) :
"""If ' Series . argmax ' is called via the ' numpy ' library ,
the third parameter in its signature is ' out ' , which
takes either an ndarray or ' None ' , so check if the
' skipna ' parameter is either an instance of ndarray or
is None , since ' skipna ' itself should be a boolean"""
|
skipna , args = process_skipna ( skipna , args )
validate_argmax ( args , kwargs )
return skipna
|
def update ( self , name , modifiers , dtype , kind ) :
"""Updates the attributes for the function instance , handles name changes
in the parent module as well ."""
|
self . update_name ( name )
self . modifiers = modifiers
self . dtype = dtype
self . kind = kind
self . update_dtype ( )
|
def setup_environment ( chip , args_file = None ) :
"""Setup the SCons environment for compiling arm cortex code .
This will return an env that has all of the correct settings and create a
command line arguments file for GCC that contains all of the required
flags . The use of a command line argument file passed with @ . / file _ path is
important since there can be many flags that exceed the maximum allowed length
of a command line on Windows ."""
|
config = ConfigManager ( )
# Make sure we never get MSVC settings for windows since that has the wrong command line flags for gcc
if platform . system ( ) == 'Windows' :
env = Environment ( tools = [ 'mingw' ] , ENV = os . environ )
else :
env = Environment ( tools = [ 'default' ] , ENV = os . environ )
env [ 'INCPREFIX' ] = '-I"'
env [ 'INCSUFFIX' ] = '"'
env [ 'CPPDEFPREFIX' ] = ''
env [ 'CPPDEFSUFFIX' ] = ''
env [ 'CPPPATH' ] = chip . includes ( )
env [ 'ARCH' ] = chip
# Setup Cross Compiler
env [ 'CC' ] = 'arm-none-eabi-gcc'
env [ 'AS' ] = 'arm-none-eabi-gcc'
env [ 'LINK' ] = 'arm-none-eabi-gcc'
env [ 'AR' ] = 'arm-none-eabi-ar'
env [ 'RANLIB' ] = 'arm-none-eabi-ranlib'
# AS command line is by default setup for call as directly so we need
# to modify it to call via * - gcc to allow for preprocessing
env [ 'ASCOM' ] = "$AS $ASFLAGS -o $TARGET -c $SOURCES"
# Setup nice display strings unless we ' re asked to show raw commands
if not config . get ( 'build:show-commands' ) :
env [ 'CCCOMSTR' ] = "Compiling $TARGET"
env [ 'ARCOMSTR' ] = "Building static library $TARGET"
env [ 'RANLIBCOMSTR' ] = "Indexing static library $TARGET"
env [ 'LINKCOMSTR' ] = "Linking $TARGET"
# Setup Compiler Flags
env [ 'CCFLAGS' ] = chip . combined_properties ( 'cflags' )
env [ 'LINKFLAGS' ] = chip . combined_properties ( 'ldflags' )
env [ 'ARFLAGS' ] . append ( chip . combined_properties ( 'arflags' ) )
# There are default ARFLAGS that are necessary to keep
env [ 'ASFLAGS' ] . append ( chip . combined_properties ( 'asflags' ) )
# Add in compile tile definitions
defines = utilities . build_defines ( chip . property ( 'defines' , { } ) )
env [ 'CPPDEFINES' ] = defines
if args_file is not None :
env [ 'CCCOM' ] = "$CC $CCFLAGS $CPPFLAGS @{} -c -o $TARGET $SOURCES" . format ( args_file )
# Setup Target Architecture
env [ 'CCFLAGS' ] . append ( '-mcpu=%s' % chip . property ( 'cpu' ) )
env [ 'ASFLAGS' ] . append ( '-mcpu=%s' % chip . property ( 'cpu' ) )
env [ 'LINKFLAGS' ] . append ( '-mcpu=%s' % chip . property ( 'cpu' ) )
# Initialize library paths ( all libraries are added via dependencies )
env [ 'LIBPATH' ] = [ ]
env [ 'LIBS' ] = [ ]
return env
|
def pretty_date ( the_datetime ) :
"""Attempt to return a human - readable time delta string ."""
|
# Source modified from
# http : / / stackoverflow . com / a / 5164027/176978
diff = datetime . utcnow ( ) - the_datetime
if diff . days > 7 or diff . days < 0 :
return the_datetime . strftime ( '%A %B %d, %Y' )
elif diff . days == 1 :
return '1 day ago'
elif diff . days > 1 :
return '{0} days ago' . format ( diff . days )
elif diff . seconds <= 1 :
return 'just now'
elif diff . seconds < 60 :
return '{0} seconds ago' . format ( diff . seconds )
elif diff . seconds < 120 :
return '1 minute ago'
elif diff . seconds < 3600 :
return '{0} minutes ago' . format ( int ( round ( diff . seconds / 60 ) ) )
elif diff . seconds < 7200 :
return '1 hour ago'
else :
return '{0} hours ago' . format ( int ( round ( diff . seconds / 3600 ) ) )
|
def _parse_dict ( features , new_names ) :
"""Helping function of ` _ parse _ features ` that parses a list ."""
|
feature_collection = OrderedDict ( )
for feature_type , feature_names in features . items ( ) :
try :
feature_type = FeatureType ( feature_type )
except ValueError :
ValueError ( 'Failed to parse {}, keys of the dictionary have to be instances ' 'of {}' . format ( features , FeatureType . __name__ ) )
feature_collection [ feature_type ] = feature_collection . get ( feature_type , OrderedDict ( ) )
if feature_names is ... :
feature_collection [ feature_type ] = ...
if feature_type . has_dict ( ) and feature_collection [ feature_type ] is not ... :
feature_collection [ feature_type ] . update ( FeatureParser . _parse_feature_names ( feature_names , new_names ) )
return feature_collection
|
def unicodify ( filename ) :
"""Make sure filename is Unicode .
Because the tarfile module on Python 2 doesn ' t return Unicode ."""
|
if isinstance ( filename , bytes ) :
return filename . decode ( locale . getpreferredencoding ( ) )
else :
return filename
|
def _rm_name_match ( s1 , s2 ) :
"""determine whether two sequence names from a repeatmasker alignment match .
: return : True if they are the same string , or if one forms a substring of the
other , else False"""
|
m_len = min ( len ( s1 ) , len ( s2 ) )
return s1 [ : m_len ] == s2 [ : m_len ]
|
def from_string ( cls , alg_str ) :
"""Creates a location from a two character string consisting of
the file then rank written in algebraic notation .
Examples : e4 , b5 , a7
: type : alg _ str : str
: rtype : Location"""
|
try :
return cls ( int ( alg_str [ 1 ] ) - 1 , ord ( alg_str [ 0 ] ) - 97 )
except ValueError as e :
raise ValueError ( "Location.from_string {} invalid: {}" . format ( alg_str , e ) )
|
def merge ( filehandle_1 , filehandle_2 , output_filehandle ) :
"""Merges together two files maintaining sorted order ."""
|
line2 = filehandle_2 . readline ( )
for line1 in filehandle_1 . readlines ( ) :
while line2 != '' and line2 <= line1 :
output_filehandle . write ( line2 )
line2 = filehandle_2 . readline ( )
output_filehandle . write ( line1 )
while line2 != '' :
output_filehandle . write ( line2 )
line2 = filehandle_2 . readline ( )
|
def parse_expmethodresponse ( self , tup_tree ) : # pylint : disable = unused - argument
"""This function not implemented ."""
|
raise CIMXMLParseError ( _format ( "Internal Error: Parsing support for element {0!A} is not " "implemented" , name ( tup_tree ) ) , conn_id = self . conn_id )
|
def p_statement ( self , p ) :
"""statement : OPTION _ AND _ VALUE"""
|
p [ 0 ] = [ 'statement' , p [ 1 ] [ 0 ] , p [ 1 ] [ 1 ] ]
if self . options . get ( 'lowercasenames' ) :
p [ 0 ] [ 1 ] = p [ 0 ] [ 1 ] . lower ( )
if ( not self . options . get ( 'nostripvalues' ) and not hasattr ( p [ 0 ] [ 2 ] , 'is_single_quoted' ) and not hasattr ( p [ 0 ] [ 2 ] , 'is_double_quoted' ) ) :
p [ 0 ] [ 2 ] = p [ 0 ] [ 2 ] . rstrip ( )
|
def create_new_example ( self , foo = '' , a = '' , b = '' ) :
"""Entity object factory ."""
|
return create_new_example ( foo = foo , a = a , b = b )
|
def _merge_adjacent_rows ( self , rows ) :
"""Resolves adjacent and overlapping rows . Overlapping rows are resolved as follows :
* The interval with the most recent begin date prevails for the overlapping period .
* If the begin dates are the same the interval with the most recent end date prevails .
* If the begin and end dates are equal the last row in the data set prevails .
Identical ( excluding begin and end date ) adjacent rows are replace with a single row .
: param list [ dict [ str , T ] ] rows : The rows in a group ( i . e . with the same natural key ) .
: rtype : list [ dict [ str , T ] ]"""
|
ret = list ( )
prev_row = None
for row in rows :
if prev_row :
relation = Allen . relation ( prev_row [ self . _key_start_date ] , prev_row [ self . _key_end_date ] , row [ self . _key_start_date ] , row [ self . _key_end_date ] )
if relation is None : # row holds an invalid interval ( prev _ row always holds a valid interval ) . Hence , the join is empty .
return [ ]
elif relation == Allen . X_BEFORE_Y : # Two rows with distinct intervals .
# prev _ row : | - - - - |
# row : | - - - - - |
ret . append ( prev_row )
prev_row = row
elif relation == Allen . X_MEETS_Y : # The two rows are adjacent .
# prev _ row : | - - - - - |
# row : | - - - - - |
if self . _equal ( prev_row , row ) : # The two rows are identical ( except for start and end date ) and adjacent . Combine the two rows
# into one row .
prev_row [ self . _key_end_date ] = row [ self . _key_end_date ]
else : # Rows are adjacent but not identical .
ret . append ( prev_row )
prev_row = row
elif relation == Allen . X_OVERLAPS_WITH_Y : # prev _ row overlaps row . Should not occur with proper reference data .
# prev _ row : | - - - - - |
# row : | - - - - - |
if self . _equal ( prev_row , row ) : # The two rows are identical ( except for start and end date ) and overlapping . Combine the two
# rows into one row .
prev_row [ self . _key_end_date ] = row [ self . _key_end_date ]
else : # Rows are overlapping but not identical .
prev_row [ self . _key_end_date ] = row [ self . _key_start_date ] - 1
ret . append ( prev_row )
prev_row = row
elif relation == Allen . X_STARTS_Y : # prev _ row start row . Should not occur with proper reference data .
# prev _ row : | - - - - - |
# row : | - - - - - |
prev_row = row
elif relation == Allen . X_EQUAL_Y : # Can happen when the reference data sets are joined without respect for date intervals .
# prev _ row : | - - - - - |
# row : | - - - - - |
prev_row = row
elif relation == Allen . X_DURING_Y_INVERSE : # row during prev _ row . Should not occur with proper reference data .
# prev _ row : | - - - - - |
# row : | - - - - - |
# Note : the interval with the most recent start date prevails . Hence , the interval after
# row [ self . _ key _ end _ date ] is discarded .
if self . _equal ( prev_row , row ) :
prev_row [ self . _key_end_date ] = row [ self . _key_end_date ]
else :
prev_row [ self . _key_end_date ] = row [ self . _key_start_date ] - 1
ret . append ( prev_row )
prev_row = row
elif relation == Allen . X_FINISHES_Y_INVERSE : # row finishes prev _ row . Should not occur with proper reference data .
# prev _ row : | - - - - - |
# row : | - - - - - |
if not self . _equal ( prev_row , row ) :
prev_row [ self . _key_end_date ] = row [ self . _key_start_date ] - 1
ret . append ( prev_row )
prev_row = row
# Note : if the two rows are identical ( except for start and end date ) nothing to do .
else : # Note : The rows are sorted such that prev _ row [ self . _ key _ begin _ date ] < = row [ self . _ key _ begin _ date ] .
# Hence the following relation should not occur : X _ DURING _ Y , X _ FINISHES _ Y , X _ BEFORE _ Y _ INVERSE ,
# X _ MEETS _ Y _ INVERSE , X _ OVERLAPS _ WITH _ Y _ INVERSE , and X _ STARTS _ Y _ INVERSE . Hence , we covered all 13
# relations in Allen ' s interval algebra .
raise ValueError ( 'Data is not sorted properly. Relation: {0}' . format ( relation ) )
elif row [ self . _key_start_date ] <= row [ self . _key_end_date ] : # row is the first valid row .
prev_row = row
if prev_row :
ret . append ( prev_row )
return ret
|
def findCfgFileForPkg ( pkgName , theExt , pkgObj = None , taskName = None ) :
"""Locate the configuration files for / from / within a given python package .
pkgName is a string python package name . This is used unless pkgObj
is given , in which case pkgName is taken from pkgObj . _ _ name _ _ .
theExt is either ' . cfg ' or ' . cfgspc ' . If the task name is known , it is
given as taskName , otherwise one is determined using the pkgName .
Returns a tuple of ( package - object , cfg - file - name ) ."""
|
# arg check
ext = theExt
if ext [ 0 ] != '.' :
ext = '.' + theExt
# Do the import , if needed
pkgsToTry = { }
if pkgObj :
pkgsToTry [ pkgObj . __name__ ] = pkgObj
else : # First try something simple like a regular or dotted import
try :
fl = [ ]
if pkgName . find ( '.' ) > 0 :
fl = [ pkgName [ : pkgName . rfind ( '.' ) ] , ]
pkgsToTry [ str ( pkgName ) ] = __import__ ( str ( pkgName ) , fromlist = fl )
except :
throwIt = True
# One last case to try is something like " csc _ kill " from
# " acstools . csc _ kill " , but this convenience capability will only be
# allowed if the parent pkg ( acstools ) has already been imported .
if isinstance ( pkgName , string_types ) and pkgName . find ( '.' ) < 0 :
matches = [ x for x in sys . modules . keys ( ) if x . endswith ( "." + pkgName ) ]
if len ( matches ) > 0 :
throwIt = False
for mmm in matches :
pkgsToTry [ mmm ] = sys . modules [ mmm ]
if throwIt :
raise NoCfgFileError ( "Unfound package or " + ext + " file via: " + "import " + str ( pkgName ) )
# Now that we have the package object ( or a few of them to try ) , for each
# one find the . cfg or . cfgspc file , and return
# Return as soon as ANY match is found .
for aPkgName in pkgsToTry :
aPkg = pkgsToTry [ aPkgName ]
path = os . path . dirname ( aPkg . __file__ )
if len ( path ) < 1 :
path = '.'
flist = irafutils . rglob ( path , "*" + ext )
if len ( flist ) < 1 :
continue
# Go through these and find the first one for the assumed or given task
# name . The task name for ' BigBlackBox . drizzle ' would be ' drizzle ' .
if taskName is None :
taskName = aPkgName . split ( "." ) [ - 1 ]
flist . sort ( )
for f in flist : # A . cfg file gets checked for _ task _ name _ = val , but a . cfgspc file
# will have a string check function signature as the val .
if ext == '.cfg' :
itsTask = getEmbeddedKeyVal ( f , TASK_NAME_KEY , '' )
else : # . cfgspc
sigStr = getEmbeddedKeyVal ( f , TASK_NAME_KEY , '' )
# . cfgspc file MUST have an entry for TASK _ NAME _ KEY w / a default
itsTask = vtor_checks . sigStrToKwArgsDict ( sigStr ) [ 'default' ]
if itsTask == taskName : # We ' ve found the correct file in an installation area . Return
# the package object and the found file .
return aPkg , f
# What , are you still here ?
raise NoCfgFileError ( 'No valid ' + ext + ' files found in package: "' + str ( pkgName ) + '" for task: "' + str ( taskName ) + '"' )
|
def gradient ( self , wrt ) :
"""Gets the autodiff of current symbol .
This function can only be used if current symbol is a loss function .
. . note : : This function is currently not implemented .
Parameters
wrt : Array of String
keyword arguments of the symbol that the gradients are taken .
Returns
grad : Symbol
A gradient Symbol with returns to be the corresponding gradients ."""
|
handle = SymbolHandle ( )
c_wrt = c_str_array ( wrt )
check_call ( _LIB . MXSymbolGrad ( self . handle , mx_uint ( len ( wrt ) ) , c_wrt , ctypes . byref ( handle ) ) )
return Symbol ( handle )
|
def _parse_properties ( response , result_class ) :
'''Extracts out resource properties and metadata information .
Ignores the standard http headers .'''
|
if response is None or response . headers is None :
return None
props = result_class ( )
for key , value in response . headers :
info = GET_PROPERTIES_ATTRIBUTE_MAP . get ( key )
if info :
if info [ 0 ] is None :
setattr ( props , info [ 1 ] , info [ 2 ] ( value ) )
else :
attr = getattr ( props , info [ 0 ] )
setattr ( attr , info [ 1 ] , info [ 2 ] ( value ) )
return props
|
def get_modifications ( self ) :
"""Extract Modification INDRA Statements ."""
|
# Find all event frames that are a type of protein modification
qstr = "$.events.frames[(@.type is 'protein-modification')]"
res = self . tree . execute ( qstr )
if res is None :
return
# Extract each of the results when possible
for r in res : # The subtype of the modification
modification_type = r . get ( 'subtype' )
# Skip negated events ( i . e . something doesn ' t happen )
epistemics = self . _get_epistemics ( r )
if epistemics . get ( 'negated' ) :
continue
annotations , context = self . _get_annot_context ( r )
frame_id = r [ 'frame_id' ]
args = r [ 'arguments' ]
site = None
theme = None
# Find the substrate ( the " theme " agent here ) and the
# site and position it is modified on
for a in args :
if self . _get_arg_type ( a ) == 'theme' :
theme = a [ 'arg' ]
elif self . _get_arg_type ( a ) == 'site' :
site = a [ 'text' ]
theme_agent , theme_coords = self . _get_agent_from_entity ( theme )
if site is not None :
mods = self . _parse_site_text ( site )
else :
mods = [ ( None , None ) ]
for mod in mods : # Add up to one statement for each site
residue , pos = mod
# Now we need to look for all regulation event to get to the
# enzymes ( the " controller " here )
qstr = "$.events.frames[(@.type is 'regulation') and " + "(@.arguments[0].arg is '%s')]" % frame_id
reg_res = self . tree . execute ( qstr )
reg_res = list ( reg_res )
for reg in reg_res :
controller_agent , controller_coords = None , None
for a in reg [ 'arguments' ] :
if self . _get_arg_type ( a ) == 'controller' :
controller = a . get ( 'arg' )
if controller is not None :
controller_agent , controller_coords = self . _get_agent_from_entity ( controller )
break
# Check the polarity of the regulation and if negative ,
# flip the modification type .
# For instance , negative - regulation of a phosphorylation
# will become an ( indirect ) dephosphorylation
reg_subtype = reg . get ( 'subtype' )
if reg_subtype == 'negative-regulation' :
modification_type = modtype_to_inverse . get ( modification_type )
if not modification_type :
logger . warning ( 'Unhandled modification type: %s' % modification_type )
continue
sentence = reg [ 'verbose-text' ]
annotations [ 'agents' ] [ 'coords' ] = [ controller_coords , theme_coords ]
ev = Evidence ( source_api = 'reach' , text = sentence , annotations = annotations , pmid = self . citation , context = context , epistemics = epistemics )
args = [ controller_agent , theme_agent , residue , pos , ev ]
# Here ModStmt is a sub - class of Modification
ModStmt = modtype_to_modclass . get ( modification_type )
if ModStmt is None :
logger . warning ( 'Unhandled modification type: %s' % modification_type )
else : # Handle this special case here because only
# enzyme argument is needed
if modification_type == 'autophosphorylation' :
args = [ theme_agent , residue , pos , ev ]
self . statements . append ( ModStmt ( * args ) )
|
def is_strict_subclass ( value , klass ) :
"""Check that ` value ` is a subclass of ` klass ` but that it is not actually
` klass ` . Unlike issubclass ( ) , does not raise an exception if ` value ` is
not a type ."""
|
return ( isinstance ( value , type ) and issubclass ( value , klass ) and value is not klass )
|
def show_system_monitor_output_switch_status_switch_state ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
show_system_monitor = ET . Element ( "show_system_monitor" )
config = show_system_monitor
output = ET . SubElement ( show_system_monitor , "output" )
switch_status = ET . SubElement ( output , "switch-status" )
switch_state = ET . SubElement ( switch_status , "switch-state" )
switch_state . text = kwargs . pop ( 'switch_state' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def on_pumprequest ( self , event ) :
"""Activates or deactivates a connected pump .
: param event :"""
|
self . log ( "Updating pump status: " , event . controlvalue )
self . _set_digital_pin ( self . _pump_channel , event . controlvalue )
|
def cleanup_service ( self , factory , svc_registration ) : # type : ( Any , ServiceRegistration ) - > bool
"""If this bundle used that factory , releases the reference ; else does
nothing
: param factory : The service factory
: param svc _ registration : The ServiceRegistration object
: return : True if the bundle was using the factory , else False"""
|
svc_ref = svc_registration . get_reference ( )
try : # " service " for factories , " services " for prototypes
services , _ = self . __factored . pop ( svc_ref )
except KeyError :
return False
else :
if svc_ref . is_prototype ( ) and services :
for service in services :
try :
factory . unget_service_instance ( self . __bundle , svc_registration , service )
except Exception : # Ignore instance - level exceptions , potential errors
# will reappear in unget _ service ( )
pass
# Call the factory
factory . unget_service ( self . __bundle , svc_registration )
# No more association
svc_ref . unused_by ( self . __bundle )
return True
|
def unpack_archive ( filename , extract_dir , progress_filter = default_filter , drivers = None ) :
"""Unpack ` filename ` to ` extract _ dir ` , or raise ` ` UnrecognizedFormat ` `
` progress _ filter ` is a function taking two arguments : a source path
internal to the archive ( ' / ' - separated ) , and a filesystem path where it
will be extracted . The callback must return the desired extract path
( which may be the same as the one passed in ) , or else ` ` None ` ` to skip
that file or directory . The callback can thus be used to report on the
progress of the extraction , as well as to filter the items extracted or
alter their extraction paths .
` drivers ` , if supplied , must be a non - empty sequence of functions with the
same signature as this function ( minus the ` drivers ` argument ) , that raise
` ` UnrecognizedFormat ` ` if they do not support extracting the designated
archive type . The ` drivers ` are tried in sequence until one is found that
does not raise an error , or until all are exhausted ( in which case
` ` UnrecognizedFormat ` ` is raised ) . If you do not supply a sequence of
drivers , the module ' s ` ` extraction _ drivers ` ` constant will be used , which
means that ` ` unpack _ zipfile ` ` and ` ` unpack _ tarfile ` ` will be tried , in that
order ."""
|
for driver in drivers or extraction_drivers :
try :
driver ( filename , extract_dir , progress_filter )
except UnrecognizedFormat :
continue
else :
return
else :
raise UnrecognizedFormat ( "Not a recognized archive type: %s" % filename )
|
def loads ( s , model ) :
"""Deserialize PENMAN graphs from a string
Args :
s ( str ) : serialized PENMAN graphs
model : Xmrs subclass instantiated from decoded triples
Returns :
a list of objects ( of class * model * )"""
|
graphs = penman . loads ( s , cls = XMRSCodec )
xs = [ model . from_triples ( g . triples ( ) ) for g in graphs ]
return xs
|
def _stage_user_code_in_s3 ( self ) :
"""Upload the user training script to s3 and return the location .
Returns : s3 uri"""
|
local_mode = self . output_path . startswith ( 'file://' )
if self . code_location is None and local_mode :
code_bucket = self . sagemaker_session . default_bucket ( )
code_s3_prefix = '{}/source' . format ( self . _current_job_name )
kms_key = None
elif self . code_location is None :
code_bucket , _ = parse_s3_url ( self . output_path )
code_s3_prefix = '{}/source' . format ( self . _current_job_name )
kms_key = self . output_kms_key
else :
code_bucket , key_prefix = parse_s3_url ( self . code_location )
code_s3_prefix = '/' . join ( filter ( None , [ key_prefix , self . _current_job_name , 'source' ] ) )
output_bucket , _ = parse_s3_url ( self . output_path )
kms_key = self . output_kms_key if code_bucket == output_bucket else None
return tar_and_upload_dir ( session = self . sagemaker_session . boto_session , bucket = code_bucket , s3_key_prefix = code_s3_prefix , script = self . entry_point , directory = self . source_dir , dependencies = self . dependencies , kms_key = kms_key )
|
def parse_sysctl ( text ) :
'''Parse sysctl output .'''
|
lines = text . splitlines ( )
results = { }
for line in lines :
key , _ , value = line . decode ( 'ascii' ) . partition ( ': ' )
if key == 'hw.memsize' :
value = int ( value )
elif key == 'vm.swapusage' :
values = value . split ( ) [ 2 : : 3 ]
# every third token
su_unit = values [ 0 ] [ - 1 ] . lower ( )
# get unit , ' M '
PAGESIZE = 1024
if su_unit == 'm' :
PAGESIZE = 1024 * 1024
value = [ ( float ( val [ : - 1 ] ) * PAGESIZE ) for val in values ]
results [ key ] = value
return results
|
def predict_cdf ( self , X , quantile , nsamples = 200 , likelihood_args = ( ) ) :
r"""Predictive cumulative density function of a Bayesian GLM .
Parameters
X : ndarray
( N * , d ) array query input dataset ( N * samples , D dimensions ) .
quantile : float
The predictive probability , : math : ` p ( y ^ * \ leq \ text { quantile } |
\ mathbf { x } ^ * , \ mathbf { X } , y ) ` .
nsamples : int , optional
Number of samples for sampling the predictive CDF .
likelihood _ args : sequence , optional
sequence of arguments to pass to the likelihood function . These are
non - learnable parameters . They can be scalars or arrays of length
nsamples : int , optional
The number of samples to draw from the posterior in order to
approximate the predictive mean and variance .
Returns
p : ndarray
The probability of y * < = quantile for the query inputs , X * of shape
p _ min : ndarray
The minimum sampled values of the predicted probability ( same shape
as p )
p _ max : ndarray
The maximum sampled values of the predicted probability ( same shape
as p )"""
|
# Get latent function samples
N = X . shape [ 0 ]
ps = np . empty ( ( N , nsamples ) )
fsamples = self . _sample_func ( X , nsamples )
# Push samples though likelihood cdf
cdfarg = tuple ( chain ( atleast_list ( self . like_hypers_ ) , likelihood_args ) )
for i , f in enumerate ( fsamples ) :
ps [ : , i ] = self . likelihood . cdf ( quantile , f , * cdfarg )
# Average transformed samples ( MC integration )
p = ps . mean ( axis = 1 )
p_min = ps . min ( axis = 1 )
p_max = ps . max ( axis = 1 )
return p , p_min , p_max
|
def upload_and_update ( path , token , cleanup ) :
"""The egg that the provided path points to will be uploaded to Databricks .
All jobs which use the same major version of the library will be updated
to use the new version , and all version of this library in the production
folder with the same major version and a lower minor version will
be deleted .
Unlike ` upload ` , ` upload _ and _ update ` does not ask for a folder because it
relies on the production folder specified in the config . This is to
protect against accidentally updating jobs to versions of a library still
in testing / development .
All egg names already in Databricks must be properly formatted
with versions of the form < name > - 0.0.0."""
|
config = _load_config ( CFG_FILE )
token = _resolve_input ( token , 'token' , 'token' , config )
folder = _resolve_input ( None , 'folder' , 'prod_folder' , config )
update_databricks ( logger , path , token , folder , update_jobs = True , cleanup = cleanup )
|
def process_result ( transmute_func , context , result , exc , content_type ) :
"""process a result :
transmute _ func : the transmute _ func function that returned the response .
context : the transmute _ context to use .
result : the return value of the function , which will be serialized and
returned back in the API .
exc : the exception object . For Python 2 , the traceback should
be attached via the _ _ traceback _ _ attribute . This is done automatically
in Python 3.
content _ type : the content type that request is requesting for a return type .
( e . g . application / json )"""
|
if isinstance ( result , Response ) :
response = result
else :
response = Response ( result = result , code = transmute_func . success_code , success = True )
if exc :
if isinstance ( exc , APIException ) :
response . result = str ( exc )
response . success = False
response . code = exc . code
else :
reraise ( type ( exc ) , exc , getattr ( exc , "__traceback__" , None ) )
else :
return_type = transmute_func . get_response_by_code ( response . code )
if return_type :
response . result = context . serializers . dump ( return_type , response . result )
try :
content_type = str ( content_type )
serializer = context . contenttype_serializers [ content_type ]
except NoSerializerFound :
serializer = context . contenttype_serializers . default
content_type = serializer . main_type
if response . success :
result = context . response_shape . create_body ( attr . asdict ( response ) )
response . result = result
else :
response . result = attr . asdict ( response )
body = serializer . dump ( response . result )
# keeping the return type a dict to
# reduce performance overhead .
return { "body" : body , "code" : response . code , "content-type" : content_type , "headers" : response . headers , }
|
def transpose ( self ) :
"""transpose operation of self
Returns
Matrix : Matrix
transpose of self"""
|
if not self . isdiagonal :
return type ( self ) ( x = self . __x . copy ( ) . transpose ( ) , row_names = self . col_names , col_names = self . row_names , autoalign = self . autoalign )
else :
return type ( self ) ( x = self . __x . copy ( ) , row_names = self . row_names , col_names = self . col_names , isdiagonal = True , autoalign = self . autoalign )
|
def quality_to_apply ( self ) :
"""Value of quality parameter to use in processing request .
Simple substitution of ' native ' or ' default ' if no quality
parameter is specified ."""
|
if ( self . request . quality is None ) :
if ( self . api_version <= '1.1' ) :
return ( 'native' )
else :
return ( 'default' )
return ( self . request . quality )
|
def _convert ( s , re_pattern , syllable_function , add_apostrophes = False , remove_apostrophes = False , separate_syllables = False ) :
"""Convert a string ' s syllables to a different transcription system ."""
|
original = s
new = ''
while original :
match = re . search ( re_pattern , original , re . IGNORECASE | re . UNICODE )
if match is None and original : # There are no more matches , but the given string isn ' t fully
# processed yet .
new += original
break
match_start , match_end = match . span ( )
if match_start > 0 : # Handle extra characters before matched syllable .
if ( new and remove_apostrophes and match_start == 1 and original [ 0 ] == "'" ) :
pass
# Remove the apostrophe between Pinyin syllables .
if separate_syllables : # Separate syllables by a space .
new += ' '
else :
new += original [ 0 : match_start ]
else : # Matched syllable starts immediately .
if new and separate_syllables : # Separate syllables by a space .
new += ' '
elif ( new and add_apostrophes and match . group ( ) [ 0 ] . lower ( ) in _UNACCENTED_VOWELS ) :
new += "'"
# Convert the matched syllable .
new += syllable_function ( match . group ( ) )
original = original [ match_end : ]
return new
|
def from_settings_product ( cls , environments , babblings , interest_models , sensorimotor_models , evaluate_at , testcases = None , same_testcases = False ) :
"""Creates a ExperimentPool with the product of all the given settings .
: param environments : e . g . [ ( ' simple _ arm ' , ' default ' ) , ( ' simple _ arm ' , ' high _ dimensional ' ) ]
: type environments : list of ( environment name , config name )
: param babblings : e . g . [ ' motor ' , ' goal ' ]
: type bablings : list of babbling modes
: param interest _ models : e . g . [ ( ' random ' , ' default ' ) ]
: type interest _ models : list of ( interest model name , config name )
: param sensorimotor _ models : e . g . [ ( ' non _ parametric ' , ' default ' ) ]
: type sensorimotor _ models : list of ( sensorimotor model name , config name )
: param evaluate _ at : indices defining when to evaluate
: type evaluate _ at : list of int
: param bool same _ testcases : whether to use the same testcases for all experiments"""
|
l = itertools . product ( environments , babblings , interest_models , sensorimotor_models )
settings = [ make_settings ( env , bab , im , sm , env_conf , im_conf , sm_conf ) for ( ( env , env_conf ) , bab , ( im , im_conf ) , ( sm , sm_conf ) ) in l ]
return cls ( settings , evaluate_at , testcases , same_testcases )
|
def handle_hooks ( stage , hooks , provider , context ) :
"""Used to handle pre / post _ build hooks .
These are pieces of code that we want to run before / after the builder
builds the stacks .
Args :
stage ( string ) : The current stage ( pre _ run , post _ run , etc ) .
hooks ( list ) : A list of : class : ` stacker . config . Hook ` containing the
hooks to execute .
provider ( : class : ` stacker . provider . base . BaseProvider ` ) : The provider
the current stack is using .
context ( : class : ` stacker . context . Context ` ) : The current stacker
context ."""
|
if not hooks :
logger . debug ( "No %s hooks defined." , stage )
return
hook_paths = [ ]
for i , h in enumerate ( hooks ) :
try :
hook_paths . append ( h . path )
except KeyError :
raise ValueError ( "%s hook #%d missing path." % ( stage , i ) )
logger . info ( "Executing %s hooks: %s" , stage , ", " . join ( hook_paths ) )
for hook in hooks :
data_key = hook . data_key
required = hook . required
kwargs = hook . args or { }
enabled = hook . enabled
if not enabled :
logger . debug ( "hook with method %s is disabled, skipping" , hook . path )
continue
try :
method = load_object_from_string ( hook . path )
except ( AttributeError , ImportError ) :
logger . exception ( "Unable to load method at %s:" , hook . path )
if required :
raise
continue
try :
result = method ( context = context , provider = provider , ** kwargs )
except Exception :
logger . exception ( "Method %s threw an exception:" , hook . path )
if required :
raise
continue
if not result :
if required :
logger . error ( "Required hook %s failed. Return value: %s" , hook . path , result )
sys . exit ( 1 )
logger . warning ( "Non-required hook %s failed. Return value: %s" , hook . path , result )
else :
if isinstance ( result , collections . Mapping ) :
if data_key :
logger . debug ( "Adding result for hook %s to context in " "data_key %s." , hook . path , data_key )
context . set_hook_data ( data_key , result )
else :
logger . debug ( "Hook %s returned result data, but no data " "key set, so ignoring." , hook . path )
|
def sdot ( U , V ) :
'''Computes the tensorproduct reducing last dimensoin of U with first dimension of V .
For matrices , it is equal to regular matrix product .'''
|
nu = U . ndim
# nv = V . ndim
return np . tensordot ( U , V , axes = ( nu - 1 , 0 ) )
|
def track_field ( field ) :
"""Returns whether the given field should be tracked by Auditlog .
Untracked fields are many - to - many relations and relations to the Auditlog LogEntry model .
: param field : The field to check .
: type field : Field
: return : Whether the given field should be tracked .
: rtype : bool"""
|
from auditlog . models import LogEntry
# Do not track many to many relations
if field . many_to_many :
return False
# Do not track relations to LogEntry
if getattr ( field , 'remote_field' , None ) is not None and field . remote_field . model == LogEntry :
return False
# 1.8 check
elif getattr ( field , 'rel' , None ) is not None and field . rel . to == LogEntry :
return False
return True
|
def pw_score_jaccard ( self , s1 : ClassId , s2 : ClassId ) -> SimScore :
"""Calculate jaccard index of inferred associations of two subjects
| ancs ( s1 ) / \ ancs ( s2 ) |
| ancs ( s1 ) \ / ancs ( s2 ) |"""
|
am = self . assocmodel
a1 = am . inferred_types ( s1 )
a2 = am . inferred_types ( s2 )
num_union = len ( a1 | a2 )
if num_union == 0 :
return 0.0
return len ( a1 & a2 ) / num_union
|
def copy_version_to_package ( path ) :
"""Copy the single source of truth version number into the package as well ."""
|
init_file = os . path . join ( path , "__init__.py" )
with open ( init_file , "r" ) as original_file :
lines = original_file . readlines ( )
with open ( init_file , "w" ) as new_file :
for line in lines :
if "__version__" not in line :
new_file . write ( line )
else :
new_file . write ( "__version__ = \"{}\"\n" . format ( VERSION ) )
|
def set_vflip ( self , val ) :
"""Flip all the images in the animation list vertically ."""
|
self . __vertical_flip = val
for image in self . images :
image . v_flip = val
|
def getOutEdges ( self , vertex , rawResults = False ) :
"""An alias for getEdges ( ) that returns only the out Edges"""
|
return self . getEdges ( vertex , inEdges = False , outEdges = True , rawResults = rawResults )
|
def percentiles ( a , pcts , axis = None ) :
"""Like scoreatpercentile but can take and return array of percentiles .
Parameters
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None , computes scores over this axis
Returns
scores : array
array of scores at requested percentiles
first dimension is length of object passed to ` ` pcts ` `"""
|
scores = [ ]
try :
n = len ( pcts )
except TypeError :
pcts = [ pcts ]
n = 0
for i , p in enumerate ( pcts ) :
if axis is None :
score = stats . scoreatpercentile ( a . ravel ( ) , p )
else :
score = N . apply_along_axis ( stats . scoreatpercentile , axis , a , p )
scores . append ( score )
scores = N . asarray ( scores )
if not n :
scores = scores . squeeze ( )
return scores
|
def decode_chain_list ( in_bytes ) :
"""Convert a list of bytes to a list of strings . Each string is of length mmtf . CHAIN _ LEN
: param in _ bytes : the input bytes
: return the decoded list of strings"""
|
bstrings = numpy . frombuffer ( in_bytes , numpy . dtype ( 'S' + str ( mmtf . utils . constants . CHAIN_LEN ) ) )
return [ s . decode ( "ascii" ) . strip ( mmtf . utils . constants . NULL_BYTE ) for s in bstrings ]
|
def get_feature_names ( host_name , client_name , client_pass ) :
"""Get the names of all features in a PServer client .
Inputs : - host _ name : A string containing the address of the machine where the PServer instance is hosted .
- client _ name : The PServer client name .
- client _ pass : The PServer client ' s password .
Output : - feature _ names : A python list of feature names ."""
|
# Construct request .
request = construct_request ( model_type = "pers" , client_name = client_name , client_pass = client_pass , command = "getftrdef" , values = "ftr=*" )
# Send request .
request_result = send_request ( host_name , request )
# Extract a python list from xml object .
feature_names = list ( )
append_feature_name = feature_names . append
if request_result is not None :
feature_names_xml = request_result . text
tree = etree . parse ( StringIO ( feature_names_xml ) )
root = tree . getroot ( )
xml_rows = root . findall ( "row/ftr" )
for xml_row in xml_rows :
append_feature_name ( xml_row . text )
return feature_names
|
def DEFAULT_RENAMER ( L , Names = None ) :
"""Renames overlapping column names of numpy ndarrays with structured dtypes
Rename the columns by using a simple convention :
* If ` L ` is a list , it will append the number in the list to the key
associated with the array .
* If ` L ` is a dictionary , the algorithm will append the string
representation of the key associated with an array to the overlapping
columns from that array .
Default renamer function used by : func : ` tabular . spreadsheet . join `
* * Parameters * *
* * L * * : list or dictionary
Numpy recarrays with columns to be renamed .
* * Returns * *
* * D * * : dictionary of dictionaries
Dictionary mapping each input numpy recarray to a
dictionary mapping each original column name to its new
name following the convention above ."""
|
if isinstance ( L , dict ) :
Names = L . keys ( )
LL = L . values ( )
else :
if Names == None :
Names = range ( len ( L ) )
else :
assert len ( Names ) == len ( L )
LL = L
commons = Commons ( [ l . dtype . names for l in LL ] )
D = { }
for ( i , l ) in zip ( Names , LL ) :
d = { }
for c in commons :
if c in l . dtype . names :
d [ c ] = c + '_' + str ( i )
if d :
D [ i ] = d
return D
|
def get_perm_name ( cls , action , full = True ) :
"""Return the name of the permission for a given model and action .
By default it returns the full permission name ` app _ label . perm _ codename ` . If ` full = False ` , it returns only the
` perm _ codename ` ."""
|
codename = "{}_{}" . format ( action , cls . __name__ . lower ( ) )
if full :
return "{}.{}" . format ( cls . _meta . app_label , codename )
return codename
|
def RGB_to_XYZ ( cobj , target_illuminant = None , * args , ** kwargs ) :
"""RGB to XYZ conversion . Expects 0-255 RGB values .
Based off of : http : / / www . brucelindbloom . com / index . html ? Eqn _ RGB _ to _ XYZ . html"""
|
# Will contain linearized RGB channels ( removed the gamma func ) .
linear_channels = { }
if isinstance ( cobj , sRGBColor ) :
for channel in [ 'r' , 'g' , 'b' ] :
V = getattr ( cobj , 'rgb_' + channel )
if V <= 0.04045 :
linear_channels [ channel ] = V / 12.92
else :
linear_channels [ channel ] = math . pow ( ( V + 0.055 ) / 1.055 , 2.4 )
elif isinstance ( cobj , BT2020Color ) :
if kwargs . get ( 'is_12_bits_system' ) :
a , b , c = 1.0993 , 0.0181 , 0.081697877417347
else :
a , b , c = 1.099 , 0.018 , 0.08124794403514049
for channel in [ 'r' , 'g' , 'b' ] :
V = getattr ( cobj , 'rgb_' + channel )
if V <= c :
linear_channels [ channel ] = V / 4.5
else :
linear_channels [ channel ] = math . pow ( ( V + ( a - 1 ) ) / a , 1 / 0.45 )
else : # If it ' s not sRGB . . .
gamma = cobj . rgb_gamma
for channel in [ 'r' , 'g' , 'b' ] :
V = getattr ( cobj , 'rgb_' + channel )
linear_channels [ channel ] = math . pow ( V , gamma )
# Apply an RGB working space matrix to the XYZ values ( matrix mul ) .
xyz_x , xyz_y , xyz_z = apply_RGB_matrix ( linear_channels [ 'r' ] , linear_channels [ 'g' ] , linear_channels [ 'b' ] , rgb_type = cobj , convtype = "rgb_to_xyz" )
if target_illuminant is None :
target_illuminant = cobj . native_illuminant
# The illuminant of the original RGB object . This will always match
# the RGB colorspace ' s native illuminant .
illuminant = cobj . native_illuminant
xyzcolor = XYZColor ( xyz_x , xyz_y , xyz_z , illuminant = illuminant )
# This will take care of any illuminant changes for us ( if source
# illuminant ! = target illuminant ) .
xyzcolor . apply_adaptation ( target_illuminant )
return xyzcolor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.