signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def Columns ( iterable ) :
"""Returns a string of column names for MySQL INSERTs .
To account for Iterables with undefined order ( dicts before Python 3.6 ) ,
this function sorts column names .
Examples :
> > > Columns ( { " password " : " foo " , " name " : " bar " } )
u ' ( ` name ` , ` password ` ) '
Args :
iterable : The iterable of strings to be used as column names .
Returns : A string containing a tuple of sorted comma - separated column names ."""
|
columns = sorted ( iterable )
return "({})" . format ( ", " . join ( "`{}`" . format ( col ) for col in columns ) )
|
def list_loadbalancers ( call = None ) :
'''Return a list of the loadbalancers that are on the provider'''
|
if call == 'action' :
raise SaltCloudSystemExit ( 'The avail_images function must be called with ' '-f or --function, or with the --list-loadbalancers option' )
ret = { }
conn = get_conn ( )
datacenter = get_datacenter ( conn )
for item in conn . list_loadbalancers ( datacenter [ 'id' ] ) [ 'items' ] :
lb = { 'id' : item [ 'id' ] }
lb . update ( item [ 'properties' ] )
ret [ lb [ 'name' ] ] = lb
return ret
|
def strip_from_ansi_esc_sequences ( text ) :
"""find ANSI escape sequences in text and remove them
: param text : str
: return : list , should be passed to ListBox"""
|
# esc [ + values + control character
# h , l , p commands are complicated , let ' s ignore them
seq_regex = r"\x1b\[[0-9;]*[mKJusDCBAfH]"
regex = re . compile ( seq_regex )
start = 0
response = ""
for match in regex . finditer ( text ) :
end = match . start ( )
response += text [ start : end ]
start = match . end ( )
response += text [ start : len ( text ) ]
return response
|
def metatab_derived_handler ( m ) :
"""Create local Zip , Excel and Filesystem packages
: param m :
: param skip _ if _ exists :
: return :"""
|
from metapack . exc import PackageError
from metapack . util import get_materialized_data_cache
from shutil import rmtree
create_list = [ ]
url = None
doc = MetapackDoc ( m . mt_file )
env = get_lib_module_dict ( doc )
package_dir = m . package_root
if m . args . package_directory : # If this is set , the FS package will be built to m . package _ root , but the
# file packages will be built to package _ dir
package_dir = parse_app_url ( m . args . package_directory )
update_name ( m . mt_file , fail_on_missing = False , report_unchanged = False )
process_schemas ( m . mt_file , cache = m . cache , clean = m . args . clean , report_found = False )
nv_name = m . args . nonversion_name
nv_link = m . args . nonversion_link
# Remove any data that may have been cached , for instance , from Jupyter notebooks
rmtree ( get_materialized_data_cache ( doc ) , ignore_errors = True )
reuse_resources = m . args . reuse_resources
try : # Always create a filesystem package before ZIP or Excel , so we can use it as a source for
# data for the other packages . This means that Transform processes and programs only need
# to be run once .
_ , url , created = make_filesystem_package ( m . mt_file , m . package_root , m . cache , env , m . args . force , False , nv_link , reuse_resources = reuse_resources )
create_list . append ( ( 'fs' , url , created ) )
lb_path = Path ( m . package_root . fspath , 'last_build' )
if created or not lb_path . exists ( ) :
Path ( m . package_root . fspath , 'last_build' ) . touch ( )
m . mt_file = url
env = { }
# Don ' t need it anymore , since no more programs will be run .
if m . args . excel is not False :
_ , url , created = make_excel_package ( m . mt_file , package_dir , m . cache , env , m . args . force , nv_name , nv_link )
create_list . append ( ( 'xlsx' , url , created ) )
if m . args . zip is not False :
_ , url , created = make_zip_package ( m . mt_file , package_dir , m . cache , env , m . args . force , nv_name , nv_link )
create_list . append ( ( 'zip' , url , created ) )
if m . args . csv is not False :
_ , url , created = make_csv_package ( m . mt_file , package_dir , m . cache , env , m . args . force , nv_name , nv_link )
create_list . append ( ( 'csv' , url , created ) )
except PackageError as e :
err ( "Failed to generate package: {}" . format ( e ) )
index_packages ( m )
return create_list
|
def _python_psk_client_callback ( ssl_id , hint ) :
"""Called by _ sslpsk . c to return the ( psk , identity ) tuple for the socket with
the specified ssl socket ."""
|
if ssl_id not in _callbacks :
return ( "" , "" )
else :
res = _callbacks [ ssl_id ] ( hint )
return res if isinstance ( res , tuple ) else ( res , "" )
|
def valid_ip_prefix ( ip_prefix ) :
"""Perform a sanity check on ip _ prefix .
Arguments :
ip _ prefix ( str ) : The IP - Prefix to validate
Returns :
True if ip _ prefix is a valid IPv4 address with prefix length 32 or a
valid IPv6 address with prefix length 128 , otherwise False"""
|
try :
ip_prefix = ipaddress . ip_network ( ip_prefix )
except ValueError :
return False
else :
if ip_prefix . version == 4 and ip_prefix . max_prefixlen != 32 :
return False
if ip_prefix . version == 6 and ip_prefix . max_prefixlen != 128 :
return False
return True
|
def _get_folds ( n_rows , n_folds , use_stored ) :
"""Get the used CV folds"""
|
# n _ folds = self . _ n _ folds
# use _ stored = self . _ use _ stored _ folds
# n _ rows = self . _ n _ rows
if use_stored is not None : # path = ' ~ / concise / data - offline / lw - pombe / cv _ folds _ 5 . json '
with open ( os . path . expanduser ( use_stored ) ) as json_file :
json_data = json . load ( json_file )
# check if we have the same number of rows and folds :
if json_data [ 'N_rows' ] != n_rows :
raise Exception ( 'N_rows from folds doesnt match the number of rows of X_seq, X_feat, y' )
if json_data [ 'N_folds' ] != n_folds :
raise Exception ( 'n_folds dont match' , json_data [ 'N_folds' ] , n_folds )
kf = [ ( np . array ( train ) , np . array ( test ) ) for ( train , test ) in json_data [ 'folds' ] ]
else :
kf = KFold ( n_splits = n_folds ) . split ( np . zeros ( ( n_rows , 1 ) ) )
# store in a list
i = 1
folds = [ ]
for train , test in kf :
fold = "fold_" + str ( i )
folds . append ( ( fold , train , test ) )
i = i + 1
return folds
|
def get_xpath_frequencydistribution ( paths ) :
"""Build and return a frequency distribution over xpath occurrences ."""
|
# " html / body / div / div / text " - > [ " html " , " body " , " div " , " div " , " text " ]
splitpaths = [ p . split ( '/' ) for p in paths ]
# get list of " parentpaths " by right - stripping off the last xpath - node ,
# effectively getting the parent path
parentpaths = [ '/' . join ( p [ : - 1 ] ) for p in splitpaths ]
# build frequency distribution
parentpaths_counter = Counter ( parentpaths )
return parentpaths_counter . most_common ( )
|
def switch_to_frame ( self , frame , timeout = settings . SMALL_TIMEOUT ) :
"""Sets driver control to the specified browser frame ."""
|
if self . timeout_multiplier and timeout == settings . SMALL_TIMEOUT :
timeout = self . __get_new_timeout ( timeout )
page_actions . switch_to_frame ( self . driver , frame , timeout )
|
def BC_Mirror ( self ) :
"""Mirrors qs across the boundary on either the west ( left ) or east ( right )
side , depending on the selections .
This can , for example , produce a scenario in which you are observing
a mountain range up to the range crest ( or , more correctly , the halfway
point across the mountain range ) ."""
|
if self . BC_W == 'Mirror' :
i = 0
# self . l2 [ i ] + = np . nan
# self . l1 [ i ] + = np . nan
self . c0 [ i ] += 0
self . r1 [ i ] += self . l1_coeff_i [ i ]
self . r2 [ i ] += self . l2_coeff_i [ i ]
i = 1
# self . l2 [ i ] + = np . nan
self . l1 [ i ] += 0
self . c0 [ i ] += self . l2_coeff_i [ i ]
self . r1 [ i ] += 0
self . r2 [ i ] += 0
if self . BC_E == 'Mirror' :
i = - 2
self . l2 [ i ] += 0
self . l1 [ i ] += 0
self . c0 [ i ] += self . r2_coeff_i [ i ]
self . r1 [ i ] += 0
# self . r2 [ i ] + = np . nan
i = - 1
self . l2 [ i ] += self . r2_coeff_i [ i ]
self . l1 [ i ] += self . r1_coeff_i [ i ]
self . c0 [ i ] += 0
|
def _filter_max_length ( example , max_length = 256 ) :
"""Indicates whether the example ' s length is lower than the maximum length ."""
|
return tf . logical_and ( tf . size ( example [ 0 ] ) <= max_length , tf . size ( example [ 1 ] ) <= max_length )
|
def with_siblings ( graph , outputs ) :
"""Include all missing siblings ."""
|
siblings = set ( )
for node in outputs :
siblings |= graph . siblings ( node )
return siblings
|
def __argument ( self , ttype , tvalue ) :
"""Argument parsing method
This method acts as an entry point for ' argument ' parsing .
Syntax :
string - list / number / tag
: param ttype : current token type
: param tvalue : current token value
: return : False if an error is encountered , True otherwise"""
|
if ttype in [ "multiline" , "string" ] :
return self . __curcommand . check_next_arg ( "string" , tvalue . decode ( "utf-8" ) )
if ttype in [ "number" , "tag" ] :
return self . __curcommand . check_next_arg ( ttype , tvalue . decode ( "ascii" ) )
if ttype == "left_bracket" :
self . __cstate = self . __stringlist
self . __curstringlist = [ ]
self . __set_expected ( "string" )
return True
condition = ( ttype in [ "left_cbracket" , "comma" ] and self . __curcommand . non_deterministic_args )
if condition :
self . __curcommand . reassign_arguments ( )
# rewind lexer
self . lexer . pos -= 1
return True
return False
|
def temperature ( header : str , temp : Number , unit : str = 'C' ) -> str :
"""Format temperature details into a spoken word string"""
|
if not ( temp and temp . value ) :
return header + ' unknown'
if unit in SPOKEN_UNITS :
unit = SPOKEN_UNITS [ unit ]
use_s = '' if temp . spoken in ( 'one' , 'minus one' ) else 's'
return ' ' . join ( ( header , temp . spoken , 'degree' + use_s , unit ) )
|
def stalk_at ( self , pid , address , action = None ) :
"""Sets a one shot code breakpoint at the given process and address .
If instead of an address you pass a label , the breakpoint may be
deferred until the DLL it points to is loaded .
@ see : L { break _ at } , L { dont _ stalk _ at }
@ type pid : int
@ param pid : Process global ID .
@ type address : int or str
@ param address :
Memory address of code instruction to break at . It can be an
integer value for the actual address or a string with a label
to be resolved .
@ type action : function
@ param action : ( Optional ) Action callback function .
See L { define _ code _ breakpoint } for more details .
@ rtype : bool
@ return : C { True } if the breakpoint was set immediately , or C { False } if
it was deferred ."""
|
bp = self . __set_break ( pid , address , action , oneshot = True )
return bp is not None
|
def _nodes_to_values ( self ) :
"""Returns list of list of ( Node , string _ value ) tuples ."""
|
def is_none ( slice ) :
return slice [ 0 ] == - 1 and slice [ 1 ] == - 1
def get ( slice ) :
return self . string [ slice [ 0 ] : slice [ 1 ] ]
return [ ( varname , get ( slice ) , slice ) for varname , slice in self . _nodes_to_regs ( ) if not is_none ( slice ) ]
|
def _create_date_slug ( self ) :
"""Prefixes the slug with the ` ` published _ on ` ` date ."""
|
if not self . pk : # haven ' t saved this yet , so use today ' s date
d = utc_now ( )
elif self . published and self . published_on : # use the actual published on date
d = self . published_on
elif self . updated_on : # default to the last - updated date
d = self . updated_on
self . date_slug = u"{0}/{1}" . format ( d . strftime ( "%Y/%m/%d" ) , self . slug )
|
def _create_error ( self , status_code ) :
"""Construct an error message in jsend format .
: param int status _ code : The status code to translate into an error message
: return : A dictionary in jsend format with the error and the code
: rtype : dict"""
|
return jsend . error ( message = ComodoCA . status_code [ status_code ] , code = status_code )
|
def check_configuration_string ( self , config_string , is_job = True , external_name = False ) :
"""Check whether the given job or task configuration string
is well - formed ( if ` ` is _ bstring ` ` is ` ` True ` ` )
and it has all the required parameters .
: param string config _ string : the byte string or Unicode string to be checked
: param bool is _ job : if ` ` True ` ` , ` ` config _ string ` ` is a job config string
: param bool external _ name : if ` ` True ` ` , the task name is provided externally ,
and it is not required to appear
in the config string
: rtype : : class : ` ~ aeneas . validator . ValidatorResult `"""
|
if is_job :
self . log ( u"Checking job configuration string" )
else :
self . log ( u"Checking task configuration string" )
self . result = ValidatorResult ( )
if self . _are_safety_checks_disabled ( u"check_configuration_string" ) :
return self . result
if is_job :
required_parameters = self . JOB_REQUIRED_PARAMETERS
elif external_name :
required_parameters = self . TASK_REQUIRED_PARAMETERS_EXTERNAL_NAME
else :
required_parameters = self . TASK_REQUIRED_PARAMETERS
is_bstring = gf . is_bytes ( config_string )
if is_bstring :
self . log ( u"Checking that config_string is well formed" )
self . check_raw_string ( config_string , is_bstring = True )
if not self . result . passed :
return self . result
config_string = gf . safe_unicode ( config_string )
self . log ( u"Checking required parameters" )
parameters = gf . config_string_to_dict ( config_string , self . result )
self . _check_required_parameters ( required_parameters , parameters )
self . log ( [ u"Checking config_string: returning %s" , self . result . passed ] )
return self . result
|
def select_larva ( self ) :
"""Select all larva ."""
|
action = sc_pb . Action ( )
action . action_ui . select_larva . SetInParent ( )
# Adds the empty proto field .
return action
|
def age ( self ) :
"""int , the estimated age of the person .
Note that A DOB object is based on a date - range and the exact date is
usually unknown so for age calculation the the middle of the range is
assumed to be the real date - of - birth ."""
|
if self . date_range is None :
return
dob = self . date_range . middle
today = datetime . date . today ( )
if ( today . month , today . day ) < ( dob . month , dob . day ) :
return today . year - dob . year - 1
else :
return today . year - dob . year
|
def abort ( self , exception = exc . ConnectError ) :
"""Aborts a connection and puts all pending futures into an error state .
If ` ` sys . exc _ info ( ) ` ` is set ( i . e . this is being called in an exception
handler ) then pending futures will have that exc info set . Otherwise
the given ` ` exception ` ` parameter is used ( defaults to
` ` ConnectError ` ` ) ."""
|
log . warn ( "Aborting connection to %s:%s" , self . host , self . port )
def abort_pending ( f ) :
exc_info = sys . exc_info ( )
if any ( exc_info ) :
f . set_exc_info ( exc_info )
else :
f . set_exception ( exception ( self . host , self . port ) )
for pending in self . drain_all_pending ( ) :
abort_pending ( pending )
|
def adapt ( self ) :
r"""Update the proposal using the points
stored in ` ` self . samples [ - 1 ] ` ` and the parameters which can be set via
: py : meth : ` . set _ adapt _ params ` .
In the above referenced function ' s docstring , the algorithm is
described in detail . If the resulting matrix is not a valid covariance ,
its offdiagonal elements are set to zero and a warning is printed . If
that also fails , the proposal ' s covariance matrix is divided by the
` ` covar _ scale _ multiplier ` ` : math : ` \ beta ` .
. . note : :
This function only uses the points obtained during the last run ."""
|
last_run = self . samples [ - 1 ]
accept_rate = float ( self . _last_accept_count ) / len ( last_run )
# careful with rowvar !
# in this form it is expected that each column of ` ` points ` `
# represents sampling values of a variable
# this is the case if points is a list of sampled points
covar_estimator = _np . cov ( last_run , rowvar = 0 )
# update sigma
time_dependent_damping_factor = 1. / self . adapt_count ** self . damping
self . unscaled_sigma = ( 1 - time_dependent_damping_factor ) * self . unscaled_sigma + time_dependent_damping_factor * covar_estimator
self . _update_scale_factor ( accept_rate )
scaled_sigma = self . covar_scale_factor * self . unscaled_sigma
# increase count now before proposal update . It may fail and raise an exception .
self . adapt_count += 1
try :
self . proposal . update ( scaled_sigma )
except _np . linalg . LinAlgError :
print ( "WARNING: Markov chain self adaptation failed; trying diagonalization ... " , end = '' )
# try to insert offdiagonal elements only
diagonal_matrix = _np . zeros_like ( scaled_sigma )
_np . fill_diagonal ( diagonal_matrix , _np . diag ( scaled_sigma ) )
try :
self . proposal . update ( diagonal_matrix )
print ( 'success' )
except _np . linalg . LinAlgError :
print ( 'fail' )
# just scale the old covariance matrix if everything else fails
self . proposal . update ( self . proposal . sigma / self . covar_scale_multiplier )
|
def insert_from_xmldoc ( connection , source_xmldoc , preserve_ids = False , verbose = False ) :
"""Insert the tables from an in - ram XML document into the database at
the given connection . If preserve _ ids is False ( default ) , then row
IDs are modified during the insert process to prevent collisions
with IDs already in the database . If preserve _ ids is True then IDs
are not modified ; this will result in database consistency
violations if any of the IDs of newly - inserted rows collide with
row IDs already in the database , and is generally only sensible
when inserting a document into an empty database . If verbose is
True then progress reports will be printed to stderr ."""
|
# enable / disable ID remapping
orig_DBTable_append = dbtables . DBTable . append
if not preserve_ids :
try :
dbtables . idmap_create ( connection )
except sqlite3 . OperationalError : # assume table already exists
pass
dbtables . idmap_sync ( connection )
dbtables . DBTable . append = dbtables . DBTable . _remapping_append
else :
dbtables . DBTable . append = dbtables . DBTable . _append
try : # create a place - holder XML representation of the target
# document so we can pass the correct tree to update _ ids ( ) .
# note that only tables present in the source document need
# ID ramapping , so xmldoc only contains representations of
# the tables in the target document that are also in the
# source document
xmldoc = ligolw . Document ( )
xmldoc . appendChild ( ligolw . LIGO_LW ( ) )
# iterate over tables in the source XML tree , inserting
# each into the target database
for tbl in source_xmldoc . getElementsByTagName ( ligolw . Table . tagName ) : # instantiate the correct table class , connected to the
# target database , and save in XML tree
name = tbl . Name
try :
cls = dbtables . TableByName [ name ]
except KeyError :
cls = dbtables . DBTable
dbtbl = xmldoc . childNodes [ - 1 ] . appendChild ( cls ( tbl . attributes , connection = connection ) )
# copy table element child nodes from source XML tree
for elem in tbl . childNodes :
if elem . tagName == ligolw . Stream . tagName :
dbtbl . _end_of_columns ( )
dbtbl . appendChild ( type ( elem ) ( elem . attributes ) )
# copy table rows from source XML tree
for row in tbl :
dbtbl . append ( row )
dbtbl . _end_of_rows ( )
# update references to row IDs and clean up ID remapping
if not preserve_ids :
update_ids ( connection , xmldoc , verbose = verbose )
finally :
dbtables . DBTable . append = orig_DBTable_append
# done . unlink the document to delete database cursor objects it
# retains
connection . commit ( )
xmldoc . unlink ( )
|
def get_dependencies ( self ) :
"""Return dependencies , which should trigger updates of this model ."""
|
# pylint : disable = no - member
return super ( ) . get_dependencies ( ) + [ Data . collection_set , Data . entity_set , Data . parents , ]
|
def _get_raw_movielens_data ( ) :
"""Return the raw lines of the train and test files ."""
|
path = _get_movielens_path ( )
if not os . path . isfile ( path ) :
_download_movielens ( path )
with zipfile . ZipFile ( path ) as datafile :
return ( datafile . read ( 'ml-100k/ua.base' ) . decode ( ) . split ( '\n' ) , datafile . read ( 'ml-100k/ua.test' ) . decode ( ) . split ( '\n' ) )
|
def cli_schemata_list ( self , * args ) :
"""Display a list of registered schemata"""
|
self . log ( 'Registered schemata languages:' , "," . join ( sorted ( l10n_schemastore . keys ( ) ) ) )
self . log ( 'Registered Schemata:' , "," . join ( sorted ( schemastore . keys ( ) ) ) )
if '-c' in args or '-config' in args :
self . log ( 'Registered Configuration Schemata:' , "," . join ( sorted ( configschemastore . keys ( ) ) ) , pretty = True )
|
def transform_to_3d ( points , normal , z = 0 ) :
"""Project points into 3d from 2d points ."""
|
d = np . cross ( normal , ( 0 , 0 , 1 ) )
M = rotation_matrix ( d )
transformed_points = M . dot ( points . T ) . T + z
return transformed_points
|
def parse_dereplicated_uc ( dereplicated_uc_lines ) :
"""Return dict of seq ID : dereplicated seq IDs from dereplicated . uc lines
dereplicated _ uc _ lines : list of lines of . uc file from dereplicated seqs from
usearch61 ( i . e . open file of abundance sorted . uc data )"""
|
dereplicated_clusters = { }
seed_hit_ix = 0
seq_id_ix = 8
seed_id_ix = 9
for line in dereplicated_uc_lines :
if line . startswith ( "#" ) or len ( line . strip ( ) ) == 0 :
continue
curr_line = line . strip ( ) . split ( '\t' )
if curr_line [ seed_hit_ix ] == "S" :
dereplicated_clusters [ curr_line [ seq_id_ix ] ] = [ ]
if curr_line [ seed_hit_ix ] == "H" :
curr_seq_id = curr_line [ seq_id_ix ]
dereplicated_clusters [ curr_line [ seed_id_ix ] ] . append ( curr_seq_id )
return dereplicated_clusters
|
def cloud_percent ( self ) :
"""Return percentage of cloud coverage ."""
|
image_content_qi = self . _metadata . findtext ( ( """n1:Quality_Indicators_Info/Image_Content_QI/""" """CLOUDY_PIXEL_PERCENTAGE""" ) , namespaces = self . _nsmap )
return float ( image_content_qi )
|
def repo ( name : str , owner : str ) -> snug . Query [ dict ] :
"""a repository lookup by owner and name"""
|
return json . loads ( ( yield f'/repos/{owner}/{name}' ) . content )
|
def is_defined_by_module ( item , module , parent = None ) :
"""Check if item is directly defined by a module .
This check may be prone to errors ."""
|
flag = False
if isinstance ( item , types . ModuleType ) :
if not hasattr ( item , '__file__' ) :
try : # hack for cv2 and xfeatures2d
import utool as ut
name = ut . get_modname_from_modpath ( module . __file__ )
flag = name in str ( item )
except :
flag = False
else :
item_modpath = os . path . realpath ( dirname ( item . __file__ ) )
mod_fpath = module . __file__ . replace ( '.pyc' , '.py' )
if not mod_fpath . endswith ( '__init__.py' ) :
flag = False
else :
modpath = os . path . realpath ( dirname ( mod_fpath ) )
modpath = modpath . replace ( '.pyc' , '.py' )
flag = item_modpath . startswith ( modpath )
elif hasattr ( item , '_utinfo' ) : # Capture case where there is a utool wrapper
orig_func = item . _utinfo [ 'orig_func' ]
flag = is_defined_by_module ( orig_func , module , parent )
else :
if isinstance ( item , staticmethod ) : # static methods are a wrapper around a function
item = item . __func__
try :
func_globals = meta_util_six . get_funcglobals ( item )
func_module_name = func_globals [ '__name__' ]
if func_module_name == 'line_profiler' :
valid_names = dir ( module )
if parent is not None :
valid_names += dir ( parent )
if item . func_name in valid_names : # hack to prevent small names
# if len ( item . func _ name ) > 8:
if len ( item . func_name ) > 6 :
flag = True
elif func_module_name == module . __name__ :
flag = True
except AttributeError :
if hasattr ( item , '__module__' ) :
flag = item . __module__ == module . __name__
return flag
|
def add ( self , num , price , aType , stuff_status , title , desc , location_state , location_city , cid , session , ** kwargs ) :
'''taobao . item . add 添加一个商品
此接口用于新增一个商品 商品所属的卖家是当前会话的用户 商品的属性和sku的属性有包含的关系 , 商品的价格要位于sku的价格区间之中 ( 例如 , sku价格有5元 、 10元两种 , 那么商品的价格就需要大于等于5元 , 小于等于10元 , 否则新增商品会失败 ) 商品的类目和商品的价格 、 sku的价格都有一定的相关性 ( 具体的关系要通过类目属性查询接口获得 ) 商品的运费承担方式和邮费设置有相关性 , 卖家承担运费不用设置邮费 , 买家承担运费需要设置邮费 当关键属性值选择了 “ 其他 ” 的时候 , 需要输入input _ pids和input _ str商品才能添加成功 。'''
|
request = TOPRequest ( 'taobao.item.add' )
request [ 'num' ] = num
request [ 'price' ] = price
request [ 'type' ] = aType
request [ 'stuff_status' ] = stuff_status
request [ 'title' ] = title
request [ 'desc' ] = desc
request [ 'location.state' ] = location_state
request [ 'location.city' ] = location_city
request [ 'cid' ] = cid
for k , v in kwargs . iteritems ( ) :
if k not in ( 'props' , 'approve_status' , 'freight_payer' , 'valid_thru' , 'has_invoice' , 'has_warranty' , 'has_showcase' , 'seller_cids' , 'has_discount' , 'post_fee' , 'express_fee' , 'ems_fee' , 'list_time' , 'increment' , 'image' , 'postage_id' , 'auction_point' , 'property_alias' , 'input_pids' , 'sku_properties' , 'sku_prices' , 'sku_outer_ids' , 'lang' , 'outer_id' , 'product_id' , 'pic_path' , 'auto_fill' , 'input_str' , 'is_taobao' , 'is_ex' , 'is_3D' , 'sell_promise' , 'after_sale_id' , 'cod_postage_id' , 'is_lightning_consignment' , 'weight' , 'is_xinpin' , 'sub_stock' ) and v == None :
continue
request [ k ] = v
self . create ( self . execute ( request , session ) [ 'item' ] )
return self
|
def _compute_counts_from_intensity ( intensity , bexpcube ) :
"""Make the counts map from the intensity"""
|
data = intensity . data * np . sqrt ( bexpcube . data [ 1 : ] * bexpcube . data [ 0 : - 1 ] )
return HpxMap ( data , intensity . hpx )
|
def resolve_attr ( obj , path ) :
"""A recursive version of getattr for navigating dotted paths .
Args :
obj : An object for which we want to retrieve a nested attribute .
path : A dot separated string containing zero or more attribute names .
Returns :
The attribute referred to by obj . a1 . a2 . a3 . . .
Raises :
AttributeError : If there is no such attribute ."""
|
if not path :
return obj
head , _ , tail = path . partition ( '.' )
head_obj = getattr ( obj , head )
return resolve_attr ( head_obj , tail )
|
def _split_audio_by_size ( self , audio_abs_path , results_abs_path , chunk_size ) :
"""Calculates the duration of the name . wav in order for all splits have
the size of chunk _ size except possibly the last split ( which will be
smaller ) and then passes the duration to ` split _ audio _ by _ duration `
Parameters
audio _ abs _ path : str
results _ abs _ path : str
A place for adding digits needs to be added prior the the format
decleration i . e . name % 03 . wav
chunk _ size : int
Should be in bytes"""
|
sample_rate = self . _get_audio_sample_rate ( audio_abs_path )
sample_bit = self . _get_audio_sample_bit ( audio_abs_path )
channel_num = self . _get_audio_channels ( audio_abs_path )
duration = 8 * chunk_size / reduce ( lambda x , y : int ( x ) * int ( y ) , [ sample_rate , sample_bit , channel_num ] )
self . _split_audio_by_duration ( audio_abs_path , results_abs_path , duration )
|
def copydir ( orig , dest ) :
"""copies directory orig to dest . Returns a list of tuples of
relative filenames which were copied from orig to dest"""
|
copied = list ( )
makedirsp ( dest )
for root , dirs , files in walk ( orig ) :
for d in dirs : # ensure directories exist
makedirsp ( join ( dest , d ) )
for f in files :
root_f = join ( root , f )
dest_f = join ( dest , relpath ( root_f , orig ) )
copy ( root_f , dest_f )
copied . append ( ( root_f , dest_f ) )
return copied
|
def load ( cls , campaign_dir , ns_path = None , runner_type = 'Auto' , optimized = True , check_repo = True ) :
"""Load an existing simulation campaign .
Note that specifying an ns - 3 installation is not compulsory when using
this method : existing results will be available , but in order to run
additional simulations it will be necessary to specify a
SimulationRunner object , and assign it to the CampaignManager .
Args :
campaign _ dir ( str ) : path to the directory in which to save the
simulation campaign database .
ns _ path ( str ) : path to the ns - 3 installation to employ in this
campaign .
runner _ type ( str ) : implementation of the SimulationRunner to use .
Value can be : SimulationRunner ( for running sequential
simulations locally ) , ParallelRunner ( for running parallel
simulations locally ) , GridRunner ( for running simulations using
a DRMAA - compatible parallel task scheduler ) .
optimized ( bool ) : whether to configure the runner to employ an
optimized ns - 3 build ."""
|
# Convert paths to be absolute
if ns_path is not None :
ns_path = os . path . abspath ( ns_path )
campaign_dir = os . path . abspath ( campaign_dir )
# Read the existing configuration into the new DatabaseManager
db = DatabaseManager . load ( campaign_dir )
script = db . get_script ( )
runner = None
if ns_path is not None :
runner = CampaignManager . create_runner ( ns_path , script , runner_type , optimized )
return cls ( db , runner , check_repo )
|
def _master ( self ) :
"""Master node ' s operation .
Assigning tasks to workers and collecting results from them
Parameters
None
Returns
results : list of tuple ( voxel _ id , accuracy )
the accuracy numbers of all voxels , in accuracy descending order
the length of array equals the number of voxels"""
|
logger . info ( 'Master at rank %d starts to allocate tasks' , MPI . COMM_WORLD . Get_rank ( ) )
results = [ ]
comm = MPI . COMM_WORLD
size = comm . Get_size ( )
sending_voxels = self . voxel_unit if self . voxel_unit < self . num_voxels else self . num_voxels
current_task = ( 0 , sending_voxels )
status = MPI . Status ( )
# using _ size is used when the number of tasks
# is smaller than the number of workers
using_size = size
for i in range ( 0 , size ) :
if i == self . master_rank :
continue
if current_task [ 1 ] == 0 :
using_size = i
break
logger . debug ( 'master starts to send a task to worker %d' % i )
comm . send ( current_task , dest = i , tag = self . _WORKTAG )
next_start = current_task [ 0 ] + current_task [ 1 ]
sending_voxels = self . voxel_unit if self . voxel_unit < self . num_voxels - next_start else self . num_voxels - next_start
current_task = ( next_start , sending_voxels )
while using_size == size :
if current_task [ 1 ] == 0 :
break
result = comm . recv ( source = MPI . ANY_SOURCE , tag = MPI . ANY_TAG , status = status )
results += result
comm . send ( current_task , dest = status . Get_source ( ) , tag = self . _WORKTAG )
next_start = current_task [ 0 ] + current_task [ 1 ]
sending_voxels = self . voxel_unit if self . voxel_unit < self . num_voxels - next_start else self . num_voxels - next_start
current_task = ( next_start , sending_voxels )
for i in range ( 0 , using_size ) :
if i == self . master_rank :
continue
result = comm . recv ( source = MPI . ANY_SOURCE , tag = MPI . ANY_TAG )
results += result
for i in range ( 0 , size ) :
if i == self . master_rank :
continue
comm . send ( None , dest = i , tag = self . _TERMINATETAG )
return results
|
def sort_sections ( self , order ) :
"""Sort sections according to the section names in the order list . All remaining sections
are added to the end in their original order
: param order : Iterable of section names
: return :"""
|
order_lc = [ e . lower ( ) for e in order ]
sections = OrderedDict ( ( k , self . sections [ k ] ) for k in order_lc if k in self . sections )
sections . update ( ( k , self . sections [ k ] ) for k in self . sections . keys ( ) if k not in order_lc )
assert len ( self . sections ) == len ( sections )
self . sections = sections
|
def delete_device ( name , safety_on = True ) :
'''Deletes a device from Vistara based on DNS name or partial name . By default ,
delete _ device will only perform the delete if a single host is returned . Set
safety _ on = False to delete all matches ( up to default API search page size )
CLI Example :
. . code - block : : bash
salt - run vistara . delete _ device ' hostname - 101 . mycompany . com '
salt - run vistara . delete _ device ' hostname - 101'
salt - run vistara . delete _ device ' hostname - 1 ' safety _ on = False'''
|
config = _get_vistara_configuration ( )
if not config :
return False
access_token = _get_oath2_access_token ( config [ 'client_key' ] , config [ 'client_secret' ] )
if not access_token :
return 'Vistara access token not available'
query_string = 'dnsName:{0}' . format ( name )
devices = _search_devices ( query_string , config [ 'client_id' ] , access_token )
if not devices :
return "No devices found"
device_count = len ( devices )
if safety_on and device_count != 1 :
return "Expected to delete 1 device and found {0}. " "Set safety_on=False to override." . format ( device_count )
delete_responses = [ ]
for device in devices :
device_id = device [ 'id' ]
log . debug ( device_id )
delete_response = _delete_resource ( device_id , config [ 'client_id' ] , access_token )
if not delete_response :
return False
delete_responses . append ( delete_response )
return delete_responses
|
def create_software_renderer ( self , surface ) :
"""Create a 2D software rendering context for a surface .
Args :
surface ( Surface ) : The surface where rendering is done .
Returns :
Renderer : A 2D software rendering context .
Raises :
SDLError : If there was an error creating the renderer ."""
|
renderer = object . __new__ ( Renderer )
renderer . _ptr = self . _ptr = check_ptr_err ( lib . SDL_CreateSoftwareRenderer ( surface . _ptr ) )
return renderer
|
def handle_incoming_response ( self , call_id , payload ) :
"""Get a registered handler for a given response and execute it ."""
|
self . log . debug ( 'handle_incoming_response: in [typehint: %s, call ID: %s]' , payload [ 'typehint' ] , call_id )
# We already log the full JSON response
typehint = payload [ "typehint" ]
handler = self . handlers . get ( typehint )
def feature_not_supported ( m ) :
msg = feedback [ "handler_not_implemented" ]
self . editor . raw_message ( msg . format ( typehint , self . launcher . ensime_version ) )
if handler :
with catch ( NotImplementedError , feature_not_supported ) :
handler ( call_id , payload )
else :
self . log . warning ( 'Response has not been handled: %s' , Pretty ( payload ) )
|
def IsDatabaseLink ( link ) :
"""Finds whether the link is a database Self Link or a database ID based link
: param str link :
Link to analyze
: return :
True or False .
: rtype : boolean"""
|
if not link :
return False
# trimming the leading and trailing " / " from the input string
link = TrimBeginningAndEndingSlashes ( link )
# Splitting the link ( separated by " / " ) into parts
parts = link . split ( '/' )
if len ( parts ) != 2 :
return False
# First part should be " dbs "
if not parts [ 0 ] or not parts [ 0 ] . lower ( ) == 'dbs' :
return False
# The second part is the database id ( ResourceID or Name ) and cannot be empty
if not parts [ 1 ] :
return False
return True
|
def delete_blacklist_entry ( self , blacklist_entry_id ) :
"""Delete an existing blacklist entry .
Keyword arguments :
blacklist _ entry _ id - - The unique identifier of the blacklist entry to delete ."""
|
delete_blacklist_endpoint = Template ( "${rest_root}/blacklist/${public_key}/${blacklist_entry_id}/delete" )
url = delete_blacklist_endpoint . substitute ( rest_root = self . _rest_root , public_key = self . _public_key , blacklist_entry_id = blacklist_entry_id )
self . __post_request ( url , { } )
|
def OnFont ( self , event ) :
"""Check event handler"""
|
font_data = wx . FontData ( )
# Disable color chooser on Windows
font_data . EnableEffects ( False )
if self . chosen_font :
font_data . SetInitialFont ( self . chosen_font )
dlg = wx . FontDialog ( self , font_data )
if dlg . ShowModal ( ) == wx . ID_OK :
font_data = dlg . GetFontData ( )
font = self . chosen_font = font_data . GetChosenFont ( )
self . font_face = font . GetFaceName ( )
self . font_size = font . GetPointSize ( )
self . font_style = font . GetStyle ( )
self . font_weight = font . GetWeight ( )
dlg . Destroy ( )
post_command_event ( self , self . DrawChartMsg )
|
def publish ( c , sdist = True , wheel = False , index = None , sign = False , dry_run = False , directory = None , dual_wheels = False , alt_python = None , check_desc = False , ) :
"""Publish code to PyPI or index of choice .
All parameters save ` ` dry _ run ` ` and ` ` directory ` ` honor config settings of
the same name , under the ` ` packaging ` ` tree . E . g . say
` ` . configure ( { ' packaging ' : { ' wheel ' : True } } ) ` ` to force building wheel
archives by default .
: param bool sdist :
Whether to upload sdists / tgzs .
: param bool wheel :
Whether to upload wheels ( requires the ` ` wheel ` ` package from PyPI ) .
: param str index :
Custom upload index / repository name . See ` ` upload ` ` help for details .
: param bool sign :
Whether to sign the built archive ( s ) via GPG .
: param bool dry _ run :
Skip actual publication step if ` ` True ` ` .
This also prevents cleanup of the temporary build / dist directories , so
you can examine the build artifacts .
: param str directory :
Base directory within which will live the ` ` dist / ` ` and ` ` build / ` `
directories .
Defaults to a temporary directory which is cleaned up after the run
finishes .
: param bool dual _ wheels :
When ` ` True ` ` , builds individual wheels for Python 2 and Python 3.
Useful for situations where you can ' t build universal wheels , but still
want to distribute for both interpreter versions .
Requires that you have a useful ` ` python3 ` ` ( or ` ` python2 ` ` , if you ' re
on Python 3 already ) binary in your ` ` $ PATH ` ` . Also requires that this
other python have the ` ` wheel ` ` package installed in its
` ` site - packages ` ` ; usually this will mean the global site - packages for
that interpreter .
See also the ` ` alt _ python ` ` argument .
: param str alt _ python :
Path to the ' alternate ' Python interpreter to use when
` ` dual _ wheels = True ` ` .
When ` ` None ` ` ( the default ) will be ` ` python3 ` ` or ` ` python2 ` ` ,
depending on the currently active interpreter .
: param bool check _ desc :
Whether to run ` ` setup . py check - r - s ` ` ( uses ` ` readme _ renderer ` ` )
before trying to publish - catches long _ description bugs . Default :
` ` False ` ` ."""
|
# Don ' t hide by default , this step likes to be verbose most of the time .
c . config . run . hide = False
# Config hooks
config = c . config . get ( "packaging" , { } )
index = config . get ( "index" , index )
sign = config . get ( "sign" , sign )
dual_wheels = config . get ( "dual_wheels" , dual_wheels )
check_desc = config . get ( "check_desc" , check_desc )
# Initial sanity check , if needed . Will die usefully .
if check_desc :
c . run ( "python setup.py check -r -s" )
# Build , into controlled temp dir ( avoids attempting to re - upload old
# files )
with tmpdir ( skip_cleanup = dry_run , explicit = directory ) as tmp : # Build default archives
build ( c , sdist = sdist , wheel = wheel , directory = tmp )
# Build opposing interpreter archive , if necessary
if dual_wheels :
if not alt_python :
alt_python = "python2"
if sys . version_info [ 0 ] == 2 :
alt_python = "python3"
build ( c , sdist = False , wheel = True , directory = tmp , python = alt_python )
# Do the thing !
upload ( c , directory = tmp , index = index , sign = sign , dry_run = dry_run )
|
def sample_ruptures ( sources , srcfilter , param , monitor = Monitor ( ) ) :
""": param sources :
a sequence of sources of the same group
: param srcfilter :
SourceFilter instance used also for bounding box post filtering
: param param :
a dictionary of additional parameters including
ses _ per _ logic _ tree _ path
: param monitor :
monitor instance
: yields :
dictionaries with keys rup _ array , calc _ times , eff _ ruptures"""
|
# AccumDict of arrays with 3 elements weight , nsites , calc _ time
calc_times = AccumDict ( accum = numpy . zeros ( 3 , numpy . float32 ) )
# Compute and save stochastic event sets
num_ses = param [ 'ses_per_logic_tree_path' ]
eff_ruptures = 0
ir_mon = monitor ( 'iter_ruptures' , measuremem = False )
# Compute the number of occurrences of the source group . This is used
# for cluster groups or groups with mutually exclusive sources .
if ( getattr ( sources , 'atomic' , False ) and getattr ( sources , 'cluster' , False ) ) :
eb_ruptures , calc_times , eff_ruptures , grp_id = sample_cluster ( sources , srcfilter , num_ses , param )
# Yield ruptures
yield AccumDict ( rup_array = get_rup_array ( eb_ruptures ) , calc_times = calc_times , eff_ruptures = { grp_id : eff_ruptures } )
else :
eb_ruptures = [ ]
# AccumDict of arrays with 3 elements weight , nsites , calc _ time
calc_times = AccumDict ( accum = numpy . zeros ( 3 , numpy . float32 ) )
[ grp_id ] = set ( src . src_group_id for src in sources )
for src , _sites in srcfilter ( sources ) :
t0 = time . time ( )
if len ( eb_ruptures ) > MAX_RUPTURES : # yield partial result to avoid running out of memory
yield AccumDict ( rup_array = get_rup_array ( eb_ruptures , srcfilter ) , calc_times = { } , eff_ruptures = { grp_id : eff_ruptures } )
eb_ruptures . clear ( )
samples = getattr ( src , 'samples' , 1 )
n_occ = 0
for rup , n_occ in src . sample_ruptures ( samples * num_ses , ir_mon ) :
ebr = EBRupture ( rup , src . id , grp_id , n_occ , samples )
eb_ruptures . append ( ebr )
n_occ += ebr . n_occ
eff_ruptures += src . num_ruptures
dt = time . time ( ) - t0
calc_times [ src . id ] += numpy . array ( [ n_occ , src . nsites , dt ] )
rup_array = get_rup_array ( eb_ruptures , srcfilter )
yield AccumDict ( rup_array = rup_array , calc_times = calc_times , eff_ruptures = { grp_id : eff_ruptures } )
|
def config_pp ( subs ) :
"""Pretty print of configuration options .
Args :
subs ( iterable of str ) : iterable with the list of conf sections to
print ."""
|
print ( '(c|f): available only as CLI argument/in the config file' , end = '\n\n' )
for sub in subs :
hlp_lst = [ ]
for opt , meta in conf [ sub ] . defaults_ ( ) :
if meta . cmd_arg ^ meta . conf_arg :
opt += ' (c)' if meta . cmd_arg else ' (f)'
hlp_lst . append ( ( opt , meta . help ) )
if hlp_lst :
print ( '{}:' . format ( sub ) )
_pretty_print ( hlp_lst , sep = ' -- ' , text_width = min ( get_terminal_size ( ) . columns , 100 ) )
print ( )
|
def set_state ( self , state , speed = None ) :
""": param state : bool
: param speed : a string one of [ " lowest " , " low " ,
" medium " , " high " , " auto " ] defaults to last speed
: return : nothing"""
|
speed = speed or self . current_fan_speed ( )
if state :
desired_state = { "powered" : state , "mode" : speed }
else :
desired_state = { "powered" : state }
response = self . api_interface . set_device_state ( self , { "desired_state" : desired_state } )
self . _update_state_from_response ( response )
|
def shuffle_models ( self , start_iteration = 0 , end_iteration = - 1 ) :
"""Shuffle models .
Parameters
start _ iteration : int , optional ( default = 0)
The first iteration that will be shuffled .
end _ iteration : int , optional ( default = - 1)
The last iteration that will be shuffled .
If < = 0 , means the last available iteration .
Returns
self : Booster
Booster with shuffled models ."""
|
_safe_call ( _LIB . LGBM_BoosterShuffleModels ( self . handle , ctypes . c_int ( start_iteration ) , ctypes . c_int ( end_iteration ) ) )
return self
|
def main ( * args ) :
"""Contains flow control"""
|
options , args , parser = parse_options ( args )
if options . regex and options . write_changes :
print ( 'ERROR: --write-changes cannot be used together with ' '--regex' )
parser . print_help ( )
return 1
word_regex = options . regex or word_regex_def
try :
word_regex = re . compile ( word_regex )
except re . error as err :
print ( 'ERROR: invalid regular expression "%s" (%s)' % ( word_regex , err ) , file = sys . stderr )
parser . print_help ( )
return 1
ignore_words_files = options . ignore_words or [ ]
ignore_words = set ( )
for ignore_words_file in ignore_words_files :
if not os . path . exists ( ignore_words_file ) :
print ( 'ERROR: cannot find ignore-words file: %s' % ignore_words_file , file = sys . stderr )
parser . print_help ( )
return 1
build_ignore_words ( ignore_words_file , ignore_words )
ignore_words_list = options . ignore_words_list or [ ]
for comma_separated_words in ignore_words_list :
for word in comma_separated_words . split ( ',' ) :
ignore_words . add ( word . strip ( ) )
dictionaries = options . dictionary or [ default_dictionary ]
misspellings = dict ( )
for dictionary in dictionaries :
if dictionary == "-" :
dictionary = default_dictionary
if not os . path . exists ( dictionary ) :
print ( 'ERROR: cannot find dictionary file: %s' % dictionary , file = sys . stderr )
parser . print_help ( )
return 1
build_dict ( dictionary , misspellings , ignore_words )
colors = TermColors ( )
if not options . colors or sys . platform == 'win32' :
colors . disable ( )
if options . summary :
summary = Summary ( )
else :
summary = None
context = None
if options . context is not None :
if ( options . before_context is not None ) or ( options . after_context is not None ) :
print ( 'ERROR: --context/-C cannot be used together with ' '--context-before/-B or --context-after/-A' )
parser . print_help ( )
return 1
context_both = max ( 0 , options . context )
context = ( context_both , context_both )
elif ( options . before_context is not None ) or ( options . after_context is not None ) :
context_before = 0
context_after = 0
if options . before_context is not None :
context_before = max ( 0 , options . before_context )
if options . after_context is not None :
context_after = max ( 0 , options . after_context )
context = ( context_before , context_after )
exclude_lines = set ( )
if options . exclude_file :
build_exclude_hashes ( options . exclude_file , exclude_lines )
file_opener = FileOpener ( options . hard_encoding_detection , options . quiet_level )
glob_match = GlobMatch ( options . skip )
bad_count = 0
for filename in args : # ignore hidden files
if is_hidden ( filename , options . check_hidden ) :
continue
if os . path . isdir ( filename ) :
for root , dirs , files in os . walk ( filename ) :
if glob_match . match ( root ) : # skip ( absolute ) directories
del dirs [ : ]
continue
for file_ in files :
if glob_match . match ( file_ ) : # skip files
continue
fname = os . path . join ( root , file_ )
if glob_match . match ( fname ) : # skip paths
continue
if not os . path . isfile ( fname ) or not os . path . getsize ( fname ) :
continue
bad_count += parse_file ( fname , colors , summary , misspellings , exclude_lines , file_opener , word_regex , context , options )
# skip ( relative ) directories
dirs [ : ] = [ dir_ for dir_ in dirs if not glob_match . match ( dir_ ) ]
else :
bad_count += parse_file ( filename , colors , summary , misspellings , exclude_lines , file_opener , word_regex , context , options )
if summary :
print ( "\n-------8<-------\nSUMMARY:" )
print ( summary )
return bad_count
|
def to_file ( self , filename , ** kwargs ) :
"""dump a representation of the nparray object to a json - formatted file .
The nparray object should then be able to be fully restored via
nparray . from _ file
@ parameter str filename : path to the file to be created ( will overwrite
if already exists )
@ rtype : str
@ returns : the filename"""
|
f = open ( filename , 'w' )
f . write ( self . to_json ( ** kwargs ) )
f . close ( )
return filename
|
def resize ( self , new_data_size ) :
"""Resize the file and update the chunk sizes"""
|
resize_bytes ( self . __fileobj , self . data_size , new_data_size , self . data_offset )
self . _update_size ( new_data_size )
|
def thread_values_df ( run_list , estimator_list , estimator_names , ** kwargs ) :
"""Calculates estimator values for the constituent threads of the input
runs .
Parameters
run _ list : list of dicts
List of nested sampling run dicts .
estimator _ list : list of functions
Estimators to apply to runs .
estimator _ names : list of strs
Name of each func in estimator _ list .
kwargs :
Kwargs to pass to parallel _ apply .
Returns
df : pandas data frame
Columns represent estimators and rows represent runs .
Each cell contains a 1d numpy array with length equal to the number
of threads in the run , containing the results from evaluating the
estimator on each thread ."""
|
tqdm_kwargs = kwargs . pop ( 'tqdm_kwargs' , { 'desc' : 'thread values' } )
assert len ( estimator_list ) == len ( estimator_names ) , ( 'len(estimator_list) = {0} != len(estimator_names = {1}' . format ( len ( estimator_list ) , len ( estimator_names ) ) )
# get thread results
thread_vals_arrays = pu . parallel_apply ( nestcheck . error_analysis . run_thread_values , run_list , func_args = ( estimator_list , ) , tqdm_kwargs = tqdm_kwargs , ** kwargs )
df = pd . DataFrame ( )
for i , name in enumerate ( estimator_names ) :
df [ name ] = [ arr [ i , : ] for arr in thread_vals_arrays ]
# Check there are the correct number of thread values in each cell
for vals_shape in df . loc [ 0 ] . apply ( lambda x : x . shape ) . values :
assert vals_shape == ( run_list [ 0 ] [ 'thread_min_max' ] . shape [ 0 ] , ) , ( 'Should be nlive=' + str ( run_list [ 0 ] [ 'thread_min_max' ] . shape [ 0 ] ) + ' values in each cell. The cell contains array with shape ' + str ( vals_shape ) )
return df
|
def choose_tag ( self : object , tokens : List [ str ] , index : int , history : List [ str ] ) :
"""Use regular expressions for rules - based lemmatizing based on word endings ;
tokens are matched for patterns with the base kept as a group ; an word ending
replacement is added to the ( base ) group .
: rtype : str
: type tokens : list
: param tokens : List of tokens to be lemmatized
: type index : int
: param index : Int with current token
: type history : list
: param history : List with tokens that have already been lemmatized ; NOT USED"""
|
for pattern , replace in self . _regexs :
if re . search ( pattern , tokens [ index ] ) :
if self . default :
return self . default
else :
return replace
|
def downstream_index ( dir_value , i , j , alg = 'taudem' ) :
"""find downslope coordinate for D8 direction ."""
|
assert alg . lower ( ) in FlowModelConst . d8_deltas
delta = FlowModelConst . d8_deltas . get ( alg . lower ( ) )
drow , dcol = delta [ int ( dir_value ) ]
return i + drow , j + dcol
|
def do_get ( url , params , to = 3 ) :
"""使用 ` ` request . get ` ` 从指定 url 获取数据
: param params : ` ` 输入参数 , 可为空 ` `
: type params : dict
: param url : ` ` 接口地址 ` `
: type url :
: param to : ` ` 响应超时返回时间 ` `
: type to :
: return : ` ` 接口返回的数据 ` `
: rtype : dict"""
|
try :
rs = requests . get ( url , params = params , timeout = to )
if rs . status_code == 200 :
try :
return rs . json ( )
except Exception as __e : # log . error ( _ _ e )
return rs . text
except Exception as er :
log . error ( 'get {} ({}) with err: {}' . format ( url , params , er ) )
time . sleep ( 0.5 )
return { }
|
def get_assessment_taken_ids_by_bank ( self , bank_id ) :
"""Gets the list of ` ` AssessmentTaken ` ` ` ` Ids ` ` associated with a ` ` Bank ` ` .
arg : bank _ id ( osid . id . Id ) : ` ` Id ` ` of the ` ` Bank ` `
return : ( osid . id . IdList ) - list of related assessment taken
` ` Ids ` `
raise : NotFound - ` ` bank _ id ` ` is not found
raise : NullArgument - ` ` bank _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure occurred
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . ResourceBinSession . get _ resource _ ids _ by _ bin
id_list = [ ]
for assessment_taken in self . get_assessments_taken_by_bank ( bank_id ) :
id_list . append ( assessment_taken . get_id ( ) )
return IdList ( id_list )
|
def _to_edge_list ( self , G ) :
"""Transform NetworkX object to an edge list .
Parameters
G : Graph object .
Returns
node _ pairs : ( M , 2 ) numpy array , where M is the number of edges . node _ pairs [ i , 0 ] and node _ pairs [ i , 1 ] are the endpoints of the ith edge .
w : Mx1 numpy array . w [ i ] is the weight of the ith edge .
node2id : Dict . A function mapping from node name to node id , i . e . , node2id [ node _ name ] gives the id .
id2node : Dict . A function mapping from node id to node name , i . e . , id2node [ node _ id ] gives the node name ."""
|
node2id = dict ( zip ( G . nodes , range ( len ( G . nodes ) ) ) )
id2node = dict ( ( v , k ) for k , v in node2id . items ( ) )
nx . relabel_nodes ( G , node2id , False )
edges = G . edges ( data = "weight" )
node_pairs = np . array ( [ [ edge [ 0 ] , edge [ 1 ] ] for edge in edges ] ) . astype ( int )
w = np . array ( [ edge [ 2 ] for edge in edges ] ) . astype ( float )
if all ( np . isnan ( w ) ) :
nx . set_edge_attributes ( G , values = 1 , name = 'weight' )
w [ : ] = 1.0
nx . relabel_nodes ( G , id2node , False )
return node_pairs , w , node2id , id2node
|
def is_endpoint_expecting ( self , endpoint , * arguments ) :
"""Iterate over all rules and check if the endpoint expects
the arguments provided . This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it .
: param endpoint : the endpoint to check .
: param arguments : this function accepts one or more arguments
as positional arguments . Each one of them is
checked ."""
|
self . update ( )
arguments = set ( arguments )
for rule in self . _rules_by_endpoint [ endpoint ] :
if arguments . issubset ( rule . arguments ) :
return True
return False
|
def kms_encrypt ( kms_client , service , env , secret ) :
"""Encrypt string for use by a given service / environment
Args :
kms _ client ( boto3 kms client object ) : Instantiated kms client object . Usually created through create _ aws _ clients .
service ( string ) : name of the service that the secret is being encrypted for .
env ( string ) : environment that the secret is being encrypted for .
secret ( string ) : value to be encrypted
Returns :
a populated EFPWContext object
Raises :
SystemExit ( 1 ) : If there is an error with the boto3 encryption call ( ex . missing kms key )"""
|
# Converting all periods to underscores because they are invalid in KMS alias names
key_alias = '{}-{}' . format ( env , service . replace ( '.' , '_' ) )
try :
response = kms_client . encrypt ( KeyId = 'alias/{}' . format ( key_alias ) , Plaintext = secret . encode ( ) )
except ClientError as error :
if error . response [ 'Error' ] [ 'Code' ] == "NotFoundException" :
fail ( "Key '{}' not found. You may need to run ef-generate for this environment." . format ( key_alias ) , error )
else :
fail ( "boto3 exception occurred while performing kms encrypt operation." , error )
encrypted_secret = base64 . b64encode ( response [ 'CiphertextBlob' ] )
return encrypted_secret
|
def precision_curve ( self , delta_tau = 0.01 ) :
"""Computes the relationship between probability threshold
and classification precision ."""
|
# compute thresholds based on the sorted probabilities
orig_thresh = self . threshold
sorted_labels , sorted_probs = self . sorted_values
scores = [ ]
taus = [ ]
tau = 0
for k in range ( len ( sorted_labels ) ) : # compute new accuracy
self . threshold = tau
scores . append ( self . precision )
taus . append ( tau )
# update threshold
tau = sorted_probs [ k ]
# add last datapoint
tau = 1.0
self . threshold = tau
scores . append ( 1.0 )
taus . append ( tau )
self . threshold = orig_thresh
return scores , taus
|
def _platform_name ( ) :
"""Returns information about the current operating system and version
: return :
A unicode string containing the OS name and version"""
|
if sys . platform == 'darwin' :
version = _plat . mac_ver ( ) [ 0 ]
_plat_ver_info = tuple ( map ( int , version . split ( '.' ) ) )
if _plat_ver_info < ( 10 , 12 ) :
name = 'OS X'
else :
name = 'macOS'
return '%s %s' % ( name , version )
elif sys . platform == 'win32' :
_win_ver = sys . getwindowsversion ( )
_plat_ver_info = ( _win_ver [ 0 ] , _win_ver [ 1 ] )
return 'Windows %s' % _plat . win32_ver ( ) [ 0 ]
elif sys . platform in [ 'linux' , 'linux2' ] :
if os . path . exists ( '/etc/os-release' ) :
with open ( '/etc/os-release' , 'r' , encoding = 'utf-8' ) as f :
pairs = _parse_env_var_file ( f . read ( ) )
if 'NAME' in pairs and 'VERSION_ID' in pairs :
return '%s %s' % ( pairs [ 'NAME' ] , pairs [ 'VERSION_ID' ] )
version = pairs [ 'VERSION_ID' ]
elif 'PRETTY_NAME' in pairs :
return pairs [ 'PRETTY_NAME' ]
elif 'NAME' in pairs :
return pairs [ 'NAME' ]
else :
raise ValueError ( 'No suitable version info found in /etc/os-release' )
elif os . path . exists ( '/etc/lsb-release' ) :
with open ( '/etc/lsb-release' , 'r' , encoding = 'utf-8' ) as f :
pairs = _parse_env_var_file ( f . read ( ) )
if 'DISTRIB_DESCRIPTION' in pairs :
return pairs [ 'DISTRIB_DESCRIPTION' ]
else :
raise ValueError ( 'No suitable version info found in /etc/lsb-release' )
else :
return 'Linux'
else :
return '%s %s' % ( _plat . system ( ) , _plat . release ( ) )
|
def put ( self , user_id ) :
"""Update a user object"""
|
self . reqparse . add_argument ( 'roles' , type = str , action = 'append' )
args = self . reqparse . parse_args ( )
auditlog ( event = 'user.create' , actor = session [ 'user' ] . username , data = args )
user = db . User . find_one ( User . user_id == user_id )
roles = db . Role . find ( Role . name . in_ ( args [ 'roles' ] ) )
if not user :
return self . make_response ( 'No such user found: {}' . format ( user_id ) , HTTP . NOT_FOUND )
if user . username == 'admin' and user . auth_system == 'builtin' :
return self . make_response ( 'You cannot modify the built-in admin user' , HTTP . FORBIDDEN )
user . roles = [ ]
for role in roles :
if role in args [ 'roles' ] :
user . roles . append ( role )
db . session . add ( user )
db . session . commit ( )
return self . make_response ( { 'message' : 'User roles updated' } , HTTP . OK )
|
def plan ( self ) :
"""Gets the associated plan for this invoice .
In order to provide a consistent view of invoices , the plan object
should be taken from the first invoice item that has one , rather than
using the plan associated with the subscription .
Subscriptions ( and their associated plan ) are updated by the customer
and represent what is current , but invoice items are immutable within
the invoice and stay static / unchanged .
In other words , a plan retrieved from an invoice item will represent
the plan as it was at the time an invoice was issued . The plan
retrieved from the subscription will be the currently active plan .
: returns : The associated plan for the invoice .
: rtype : ` ` djstripe . Plan ` `"""
|
for invoiceitem in self . invoiceitems . all ( ) :
if invoiceitem . plan :
return invoiceitem . plan
if self . subscription :
return self . subscription . plan
|
def notify_task ( self , task_id , ** kwargs ) :
"""Notify PNC about a BPM task event . Accepts polymorphic JSON { \" eventType \" : \" string \" } based on \" eventType \" field .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please define a ` callback ` function
to be invoked when receiving the response .
> > > def callback _ function ( response ) :
> > > pprint ( response )
> > > thread = api . notify _ task ( task _ id , callback = callback _ function )
: param callback function : The callback function
for asynchronous request . ( optional )
: param int task _ id : BPM task ID ( required )
: return : None
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'callback' ) :
return self . notify_task_with_http_info ( task_id , ** kwargs )
else :
( data ) = self . notify_task_with_http_info ( task_id , ** kwargs )
return data
|
def export_to_pem ( self , private_key = False , password = False ) :
"""Exports keys to a data buffer suitable to be stored as a PEM file .
Either the public or the private key can be exported to a PEM file .
For private keys the PKCS # 8 format is used . If a password is provided
the best encryption method available as determined by the cryptography
module is used to wrap the key .
: param private _ key : Whether the private key should be exported .
Defaults to ` False ` which means the public key is exported by default .
: param password ( bytes ) : A password for wrapping the private key .
Defaults to False which will cause the operation to fail . To avoid
encryption the user must explicitly pass None , otherwise the user
needs to provide a password in a bytes buffer ."""
|
e = serialization . Encoding . PEM
if private_key :
if not self . has_private :
raise InvalidJWKType ( "No private key available" )
f = serialization . PrivateFormat . PKCS8
if password is None :
a = serialization . NoEncryption ( )
elif isinstance ( password , bytes ) :
a = serialization . BestAvailableEncryption ( password )
elif password is False :
raise ValueError ( "The password must be None or a bytes string" )
else :
raise TypeError ( "The password string must be bytes" )
return self . _get_private_key ( ) . private_bytes ( encoding = e , format = f , encryption_algorithm = a )
else :
if not self . has_public :
raise InvalidJWKType ( "No public key available" )
f = serialization . PublicFormat . SubjectPublicKeyInfo
return self . _get_public_key ( ) . public_bytes ( encoding = e , format = f )
|
def _implementation ( ) :
"""Return a dict with the Python implementation and version .
Provide both the name and the version of the Python implementation
currently running . For example , on CPython 2.7.5 it will return
{ ' name ' : ' CPython ' , ' version ' : ' 2.7.5 ' } .
This function works best on CPython and PyPy : in particular , it probably
doesn ' t work for Jython or IronPython . Future investigation should be done
to work out the correct shape of the code for those platforms ."""
|
implementation = platform . python_implementation ( )
if implementation == 'CPython' :
implementation_version = platform . python_version ( )
elif implementation == 'PyPy' :
implementation_version = '%s.%s.%s' % ( sys . pypy_version_info . major , sys . pypy_version_info . minor , sys . pypy_version_info . micro )
if sys . pypy_version_info . releaselevel != 'final' :
implementation_version = '' . join ( [ implementation_version , sys . pypy_version_info . releaselevel ] )
elif implementation == 'Jython' :
implementation_version = platform . python_version ( )
# Complete Guess
elif implementation == 'IronPython' :
implementation_version = platform . python_version ( )
# Complete Guess
else :
implementation_version = 'Unknown'
return { 'name' : implementation , 'version' : implementation_version }
|
def close ( self ) :
"""Close the connection ."""
|
if not self . _closed :
if self . protocol_version >= 3 :
log_debug ( "[#%04X] C: GOODBYE" , self . local_port )
self . _append ( b"\x02" , ( ) )
try :
self . send ( )
except ServiceUnavailable :
pass
log_debug ( "[#%04X] C: <CLOSE>" , self . local_port )
try :
self . socket . close ( )
except IOError :
pass
finally :
self . _closed = True
|
def vector_poly_data ( orig , vec ) :
"""Creates a vtkPolyData object composed of vectors"""
|
# shape , dimention checking
if not isinstance ( orig , np . ndarray ) :
orig = np . asarray ( orig )
if not isinstance ( vec , np . ndarray ) :
vec = np . asarray ( vec )
if orig . ndim != 2 :
orig = orig . reshape ( ( - 1 , 3 ) )
elif orig . shape [ 1 ] != 3 :
raise Exception ( 'orig array must be 3D' )
if vec . ndim != 2 :
vec = vec . reshape ( ( - 1 , 3 ) )
elif vec . shape [ 1 ] != 3 :
raise Exception ( 'vec array must be 3D' )
# Create vtk points and cells objects
vpts = vtk . vtkPoints ( )
vpts . SetData ( numpy_to_vtk ( np . ascontiguousarray ( orig ) , deep = True ) )
npts = orig . shape [ 0 ]
cells = np . hstack ( ( np . ones ( ( npts , 1 ) , 'int' ) , np . arange ( npts ) . reshape ( ( - 1 , 1 ) ) ) )
if cells . dtype != ctypes . c_int64 or cells . flags . c_contiguous :
cells = np . ascontiguousarray ( cells , ctypes . c_int64 )
cells = np . reshape ( cells , ( 2 * npts ) )
vcells = vtk . vtkCellArray ( )
vcells . SetCells ( npts , numpy_to_vtkIdTypeArray ( cells , deep = True ) )
# Create vtkPolyData object
pdata = vtk . vtkPolyData ( )
pdata . SetPoints ( vpts )
pdata . SetVerts ( vcells )
# Add vectors to polydata
name = 'vectors'
vtkfloat = numpy_to_vtk ( np . ascontiguousarray ( vec ) , deep = True )
vtkfloat . SetName ( name )
pdata . GetPointData ( ) . AddArray ( vtkfloat )
pdata . GetPointData ( ) . SetActiveVectors ( name )
# Add magnitude of vectors to polydata
name = 'mag'
scalars = ( vec * vec ) . sum ( 1 ) ** 0.5
vtkfloat = numpy_to_vtk ( np . ascontiguousarray ( scalars ) , deep = True )
vtkfloat . SetName ( name )
pdata . GetPointData ( ) . AddArray ( vtkfloat )
pdata . GetPointData ( ) . SetActiveScalars ( name )
return vtki . PolyData ( pdata )
|
def _set_network ( self , v , load = False ) :
"""Setter method for network , mapped from YANG variable / routing _ system / interface / ve / ipv6 / interface _ ospfv3 _ conf / network ( enumeration )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ network is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ network ( ) directly .
YANG Description : To configure the OSPF network type . The default setting of the parameter depends on the network type ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'broadcast' : { 'value' : 1 } , u'point-to-point' : { 'value' : 2 } } , ) , is_leaf = True , yang_name = "network" , rest_name = "network" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'info' : u'Interface type' } } , namespace = 'urn:brocade.com:mgmt:brocade-ospfv3' , defining_module = 'brocade-ospfv3' , yang_type = 'enumeration' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """network must be of a type compatible with enumeration""" , 'defined-type' : "brocade-ospfv3:enumeration" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'broadcast': {'value': 1}, u'point-to-point': {'value': 2}},), is_leaf=True, yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Interface type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='enumeration', is_config=True)""" , } )
self . __network = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def spin_sx ( self ) :
"""Returns the x - component of the spin of the secondary mass ."""
|
return conversions . secondary_spin ( self . mass1 , self . mass2 , self . spin1x , self . spin2x )
|
def _fix_attribute_names ( attrs , change_map ) :
"""Change attribute names as per values in change _ map dictionary .
Parameters
: param attrs : dict Dict of operator attributes
: param change _ map : dict Dict of onnx attribute name to mxnet attribute names .
Returns
: return new _ attr : dict Converted dict of operator attributes ."""
|
new_attr = { }
for k in attrs . keys ( ) :
if k in change_map :
new_attr [ change_map [ k ] ] = attrs [ k ]
else :
new_attr [ k ] = attrs [ k ]
return new_attr
|
def build_graph ( formula ) :
'''Builds the implication graph from the formula'''
|
graph = { }
for clause in formula :
for ( lit , _ ) in clause :
for neg in [ False , True ] :
graph [ ( lit , neg ) ] = [ ]
for ( ( a_lit , a_neg ) , ( b_lit , b_neg ) ) in formula :
add_edge ( graph , ( a_lit , a_neg ) , ( b_lit , not b_neg ) )
add_edge ( graph , ( b_lit , b_neg ) , ( a_lit , not a_neg ) )
return graph
|
def entries ( self ) :
"""Returns a list of all entries"""
|
def add ( x , y ) :
return x + y
try :
return reduce ( add , list ( self . cache . values ( ) ) )
except :
return [ ]
|
def create ( self , user : str , * , pwd : str , sgrp : str , comment : str = None ) -> None :
"""Create new user ."""
|
data = { 'action' : 'add' , 'user' : user , 'pwd' : pwd , 'grp' : 'users' , 'sgrp' : sgrp }
if comment :
data [ 'comment' ] = comment
self . _request ( 'post' , URL , data = data )
|
def _store_generic_inference_results ( self , results_dict , all_params , all_names ) :
"""Store the model inference values that are common to all choice models .
This includes things like index coefficients , gradients , hessians ,
asymptotic covariance matrices , t - values , p - values , and robust versions
of these values .
Parameters
results _ dict : dict .
The estimation result dictionary that is output from
scipy . optimize . minimize . In addition to the standard keys which are
included , it should also contain the following keys :
` [ " utility _ coefs " , " final _ gradient " , " final _ hessian " ,
" fisher _ info " ] ` .
The " final _ gradient " , " final _ hessian " , and " fisher _ info " values
should be the gradient , hessian , and Fisher - Information Matrix of
the log likelihood , evaluated at the final parameter vector .
all _ params : list of 1D ndarrays .
Should contain the various types of parameters that were actually
estimated .
all _ names : list of strings .
Should contain names of each estimated parameter .
Returns
None . Stores all results on the model instance ."""
|
# Store the utility coefficients
self . _store_inferential_results ( results_dict [ "utility_coefs" ] , index_names = self . ind_var_names , attribute_name = "coefs" , series_name = "coefficients" )
# Store the gradient
self . _store_inferential_results ( results_dict [ "final_gradient" ] , index_names = all_names , attribute_name = "gradient" , series_name = "gradient" )
# Store the hessian
self . _store_inferential_results ( results_dict [ "final_hessian" ] , index_names = all_names , attribute_name = "hessian" , column_names = all_names )
# Store the variance - covariance matrix
self . _store_inferential_results ( - 1 * scipy . linalg . inv ( self . hessian ) , index_names = all_names , attribute_name = "cov" , column_names = all_names )
# Store ALL of the estimated parameters
self . _store_inferential_results ( np . concatenate ( all_params , axis = 0 ) , index_names = all_names , attribute_name = "params" , series_name = "parameters" )
# Store the standard errors
self . _store_inferential_results ( np . sqrt ( np . diag ( self . cov ) ) , index_names = all_names , attribute_name = "standard_errors" , series_name = "std_err" )
# Store the t - stats of the estimated parameters
self . tvalues = self . params / self . standard_errors
self . tvalues . name = "t_stats"
# Store the p - values
p_vals = 2 * scipy . stats . norm . sf ( np . abs ( self . tvalues ) )
self . _store_inferential_results ( p_vals , index_names = all_names , attribute_name = "pvalues" , series_name = "p_values" )
# Store the fischer information matrix of estimated coefficients
self . _store_inferential_results ( results_dict [ "fisher_info" ] , index_names = all_names , attribute_name = "fisher_information" , column_names = all_names )
# Store the ' robust ' variance - covariance matrix
robust_covariance = calc_asymptotic_covariance ( self . hessian , self . fisher_information )
self . _store_inferential_results ( robust_covariance , index_names = all_names , attribute_name = "robust_cov" , column_names = all_names )
# Store the ' robust ' standard errors
self . _store_inferential_results ( np . sqrt ( np . diag ( self . robust_cov ) ) , index_names = all_names , attribute_name = "robust_std_errs" , series_name = "robust_std_err" )
# Store the ' robust ' t - stats of the estimated coefficients
self . robust_t_stats = self . params / self . robust_std_errs
self . robust_t_stats . name = "robust_t_stats"
# Store the ' robust ' p - values
one_sided_p_vals = scipy . stats . norm . sf ( np . abs ( self . robust_t_stats ) )
self . _store_inferential_results ( 2 * one_sided_p_vals , index_names = all_names , attribute_name = "robust_p_vals" , series_name = "robust_p_values" )
return None
|
def show_edge ( self , edge_id ) :
"""Displays edge with ce _ ratio .
: param edge _ id : Edge ID for which to show the ce _ ratio .
: type edge _ id : int"""
|
# pylint : disable = unused - variable , relative - import
from mpl_toolkits . mplot3d import Axes3D
from matplotlib import pyplot as plt
if "faces" not in self . cells :
self . create_cell_face_relationships ( )
if "edges" not in self . faces :
self . create_face_edge_relationships ( )
fig = plt . figure ( )
ax = fig . gca ( projection = Axes3D . name )
plt . axis ( "equal" )
# find all faces with this edge
adj_face_ids = numpy . where ( ( self . faces [ "edges" ] == edge_id ) . any ( axis = 1 ) ) [ 0 ]
# find all cells with the faces
# https : / / stackoverflow . com / a / 38481969/353337
adj_cell_ids = numpy . where ( numpy . in1d ( self . cells [ "faces" ] , adj_face_ids ) . reshape ( self . cells [ "faces" ] . shape ) . any ( axis = 1 ) ) [ 0 ]
# plot all those adjacent cells ; first collect all edges
adj_edge_ids = numpy . unique ( [ adj_edge_id for adj_cell_id in adj_cell_ids for face_id in self . cells [ "faces" ] [ adj_cell_id ] for adj_edge_id in self . faces [ "edges" ] [ face_id ] ] )
col = "k"
for adj_edge_id in adj_edge_ids :
x = self . node_coords [ self . edges [ "nodes" ] [ adj_edge_id ] ]
ax . plot ( x [ : , 0 ] , x [ : , 1 ] , x [ : , 2 ] , col )
# make clear which is edge _ id
x = self . node_coords [ self . edges [ "nodes" ] [ edge_id ] ]
ax . plot ( x [ : , 0 ] , x [ : , 1 ] , x [ : , 2 ] , color = col , linewidth = 3.0 )
# connect the face circumcenters with the corresponding cell
# circumcenters
X = self . node_coords
for cell_id in adj_cell_ids :
cc = self . _circumcenters [ cell_id ]
x = X [ self . node_face_cells [ ... , [ cell_id ] ] ]
face_ccs = compute_triangle_circumcenters ( x , self . ei_dot_ei , self . ei_dot_ej )
# draw the face circumcenters
ax . plot ( face_ccs [ ... , 0 ] . flatten ( ) , face_ccs [ ... , 1 ] . flatten ( ) , face_ccs [ ... , 2 ] . flatten ( ) , "go" , )
# draw the connections
# tet circumcenter - - - face circumcenter
for face_cc in face_ccs :
ax . plot ( [ cc [ ... , 0 ] , face_cc [ ... , 0 ] ] , [ cc [ ... , 1 ] , face_cc [ ... , 1 ] ] , [ cc [ ... , 2 ] , face_cc [ ... , 2 ] ] , "b-" , )
# draw the cell circumcenters
cc = self . _circumcenters [ adj_cell_ids ]
ax . plot ( cc [ : , 0 ] , cc [ : , 1 ] , cc [ : , 2 ] , "ro" )
return
|
def _QA_data_stock_to_fq ( bfq_data , xdxr_data , fqtype ) :
'使用数据库数据进行复权'
|
info = xdxr_data . query ( 'category==1' )
bfq_data = bfq_data . assign ( if_trade = 1 )
if len ( info ) > 0 :
data = pd . concat ( [ bfq_data , info . loc [ bfq_data . index [ 0 ] : bfq_data . index [ - 1 ] , [ 'category' ] ] ] , axis = 1 )
data [ 'if_trade' ] . fillna ( value = 0 , inplace = True )
data = data . fillna ( method = 'ffill' )
data = pd . concat ( [ data , info . loc [ bfq_data . index [ 0 ] : bfq_data . index [ - 1 ] , [ 'fenhong' , 'peigu' , 'peigujia' , 'songzhuangu' ] ] ] , axis = 1 )
else :
data = pd . concat ( [ bfq_data , info . loc [ : , [ 'category' , 'fenhong' , 'peigu' , 'peigujia' , 'songzhuangu' ] ] ] , axis = 1 )
data = data . fillna ( 0 )
data [ 'preclose' ] = ( data [ 'close' ] . shift ( 1 ) * 10 - data [ 'fenhong' ] + data [ 'peigu' ] * data [ 'peigujia' ] ) / ( 10 + data [ 'peigu' ] + data [ 'songzhuangu' ] )
if fqtype in [ '01' , 'qfq' ] :
data [ 'adj' ] = ( data [ 'preclose' ] . shift ( - 1 ) / data [ 'close' ] ) . fillna ( 1 ) [ : : - 1 ] . cumprod ( )
else :
data [ 'adj' ] = ( data [ 'close' ] / data [ 'preclose' ] . shift ( - 1 ) ) . cumprod ( ) . shift ( 1 ) . fillna ( 1 )
for col in [ 'open' , 'high' , 'low' , 'close' , 'preclose' ] :
data [ col ] = data [ col ] * data [ 'adj' ]
data [ 'volume' ] = data [ 'volume' ] / data [ 'adj' ] if 'volume' in data . columns else data [ 'vol' ] / data [ 'adj' ]
try :
data [ 'high_limit' ] = data [ 'high_limit' ] * data [ 'adj' ]
data [ 'low_limit' ] = data [ 'high_limit' ] * data [ 'adj' ]
except :
pass
return data . query ( 'if_trade==1 and open != 0' ) . drop ( [ 'fenhong' , 'peigu' , 'peigujia' , 'songzhuangu' , 'if_trade' , 'category' ] , axis = 1 , errors = 'ignore' )
|
def reloadCollections ( self ) :
"reloads the collection list ."
|
r = self . connection . session . get ( self . collectionsURL )
data = r . json ( )
if r . status_code == 200 :
self . collections = { }
for colData in data [ "result" ] :
colName = colData [ 'name' ]
if colData [ 'isSystem' ] :
colObj = COL . SystemCollection ( self , colData )
else :
try :
colClass = COL . getCollectionClass ( colName )
colObj = colClass ( self , colData )
except KeyError :
if colData [ "type" ] == CONST . COLLECTION_EDGE_TYPE :
colObj = COL . Edges ( self , colData )
elif colData [ "type" ] == CONST . COLLECTION_DOCUMENT_TYPE :
colObj = COL . Collection ( self , colData )
else :
print ( ( "Warning!! Collection of unknown type: %d, trying to load it as Collection nonetheless." % colData [ "type" ] ) )
colObj = COL . Collection ( self , colData )
self . collections [ colName ] = colObj
else :
raise UpdateError ( data [ "errorMessage" ] , data )
|
def Voevent ( stream , stream_id , role ) :
"""Create a new VOEvent element tree , with specified IVORN and role .
Args :
stream ( str ) : used to construct the IVORN like so : :
ivorn = ' ivo : / / ' + stream + ' # ' + stream _ id
( N . B . ` ` stream _ id ` ` is converted to string if required . )
So , e . g . we might set : :
stream = ' voevent . soton . ac . uk / super _ exciting _ events '
stream _ id = 77
stream _ id ( str ) : See above .
role ( str ) : role as defined in VOEvent spec .
( See also : py : class : ` . definitions . roles ` )
Returns :
Root - node of the VOEvent , as represented by an lxml . objectify element
tree ( ' etree ' ) . See also
http : / / lxml . de / objectify . html # the - lxml - objectify - api"""
|
parser = objectify . makeparser ( remove_blank_text = True )
v = objectify . fromstring ( voeventparse . definitions . v2_0_skeleton_str , parser = parser )
_remove_root_tag_prefix ( v )
if not isinstance ( stream_id , string_types ) :
stream_id = repr ( stream_id )
v . attrib [ 'ivorn' ] = '' . join ( ( 'ivo://' , stream , '#' , stream_id ) )
v . attrib [ 'role' ] = role
# Presumably we ' ll always want the following children :
# ( NB , valid to then leave them empty )
etree . SubElement ( v , 'Who' )
etree . SubElement ( v , 'What' )
etree . SubElement ( v , 'WhereWhen' )
v . Who . Description = ( 'VOEvent created with voevent-parse, version {}. ' 'See https://github.com/timstaley/voevent-parse for details.' ) . format ( __version__ )
return v
|
def init_app ( self , app = None , blueprint = None , additional_blueprints = None ) :
"""Update flask application with our api
: param Application app : a flask application"""
|
if app is not None :
self . app = app
if blueprint is not None :
self . blueprint = blueprint
for resource in self . resources :
self . route ( resource [ 'resource' ] , resource [ 'view' ] , * resource [ 'urls' ] , url_rule_options = resource [ 'url_rule_options' ] )
if self . blueprint is not None :
self . app . register_blueprint ( self . blueprint )
if additional_blueprints is not None :
for blueprint in additional_blueprints :
self . app . register_blueprint ( blueprint )
self . app . config . setdefault ( 'PAGE_SIZE' , 30 )
|
def write_int ( fo , datum , schema = None ) :
"""int and long values are written using variable - length , zig - zag coding ."""
|
datum = ( datum << 1 ) ^ ( datum >> 63 )
while ( datum & ~ 0x7F ) != 0 :
fo . write ( pack ( 'B' , ( datum & 0x7f ) | 0x80 ) )
datum >>= 7
fo . write ( pack ( 'B' , datum ) )
|
def create_installer ( self , rpm_py_version , ** kwargs ) :
"""Create Installer object ."""
|
return FedoraInstaller ( rpm_py_version , self . python , self . rpm , ** kwargs )
|
def valid_vlan_id ( vlan_id , extended = True ) :
"""Validates a VLAN ID .
Args :
vlan _ id ( integer ) : VLAN ID to validate . If passed as ` ` str ` ` , it will
be cast to ` ` int ` ` .
extended ( bool ) : If the VLAN ID range should be considered extended
for Virtual Fabrics .
Returns :
bool : ` ` True ` ` if it is a valid VLAN ID . ` ` False ` ` if not .
Raises :
None
Examples :
> > > import pynos . utilities
> > > vlan = ' 565'
> > > pynos . utilities . valid _ vlan _ id ( vlan )
True
> > > extended = False
> > > vlan = ' 6789'
> > > pynos . utilities . valid _ vlan _ id ( vlan , extended = extended )
False
> > > pynos . utilities . valid _ vlan _ id ( vlan )
True"""
|
minimum_vlan_id = 1
maximum_vlan_id = 4095
if extended :
maximum_vlan_id = 8191
return minimum_vlan_id <= int ( vlan_id ) <= maximum_vlan_id
|
def property_observer ( self , name ) :
"""Function decorator to register a property observer . See ` ` MPV . observe _ property ` ` for details ."""
|
def wrapper ( fun ) :
self . observe_property ( name , fun )
fun . unobserve_mpv_properties = lambda : self . unobserve_property ( name , fun )
return fun
return wrapper
|
def tables_insert ( self , table_name , schema = None , query = None , friendly_name = None , description = None ) :
"""Issues a request to create a table or view in the specified dataset with the specified id .
A schema must be provided to create a Table , or a query must be provided to create a View .
Args :
table _ name : the name of the table as a tuple of components .
schema : the schema , if this is a Table creation .
query : the query , if this is a View creation .
friendly _ name : an optional friendly name .
description : an optional description .
Returns :
A parsed result object .
Raises :
Exception if there is an error performing the operation ."""
|
url = Api . _ENDPOINT + ( Api . _TABLES_PATH % ( table_name . project_id , table_name . dataset_id , '' , '' ) )
data = { 'kind' : 'bigquery#table' , 'tableReference' : { 'projectId' : table_name . project_id , 'datasetId' : table_name . dataset_id , 'tableId' : table_name . table_id } }
if schema :
data [ 'schema' ] = { 'fields' : schema }
if query :
data [ 'view' ] = { 'query' : query }
if friendly_name :
data [ 'friendlyName' ] = friendly_name
if description :
data [ 'description' ] = description
return datalab . utils . Http . request ( url , data = data , credentials = self . _credentials )
|
def layout_filename ( fallback ) :
'''get location of layout file'''
|
global display_size
global vehiclename
( dw , dh ) = display_size
if 'HOME' in os . environ :
dirname = os . path . join ( os . environ [ 'HOME' ] , ".mavproxy" )
if not os . path . exists ( dirname ) :
try :
os . mkdir ( dirname )
except Exception :
pass
elif 'LOCALAPPDATA' in os . environ :
dirname = os . path . join ( os . environ [ 'LOCALAPPDATA' ] , "MAVProxy" )
else :
return None
if vehiclename :
fname = os . path . join ( dirname , "layout-%s-%ux%u" % ( vehiclename , dw , dh ) )
if not fallback or os . path . exists ( fname ) :
return fname
return os . path . join ( dirname , "layout-%ux%u" % ( dw , dh ) )
|
def sql_context ( self , application_name ) :
"""Create a spark context given the parameters configured in this class .
The caller is responsible for calling ` ` . close ` ` on the resulting spark context
Parameters
application _ name : string
Returns
sc : SparkContext"""
|
sc = self . spark_context ( application_name )
import pyspark
sqlContext = pyspark . SQLContext ( sc )
return ( sc , sqlContext )
|
async def preProcessForComparison ( results , target_size , size_tolerance_prct ) :
"""Process results to prepare them for future comparison and sorting ."""
|
# find reference ( = image most likely to match target cover ignoring factors like size and format )
reference = None
for result in results :
if result . source_quality is CoverSourceQuality . REFERENCE :
if ( ( reference is None ) or ( CoverSourceResult . compare ( result , reference , target_size = target_size , size_tolerance_prct = size_tolerance_prct ) > 0 ) ) :
reference = result
# remove results that are only refs
results = list ( itertools . filterfalse ( operator . attrgetter ( "is_only_reference" ) , results ) )
# remove duplicates
no_dup_results = [ ]
for result in results :
is_dup = False
for result_comp in results :
if ( ( result_comp is not result ) and ( result_comp . urls == result . urls ) and ( __class__ . compare ( result , result_comp , target_size = target_size , size_tolerance_prct = size_tolerance_prct ) < 0 ) ) :
is_dup = True
break
if not is_dup :
no_dup_results . append ( result )
dup_count = len ( results ) - len ( no_dup_results )
if dup_count > 0 :
logging . getLogger ( "Cover" ) . info ( "Removed %u duplicate results" % ( dup_count ) )
results = no_dup_results
if reference is not None :
logging . getLogger ( "Cover" ) . info ( "Reference is: %s" % ( reference ) )
reference . is_similar_to_reference = True
# calculate sigs
futures = [ ]
for result in results :
coroutine = result . updateSignature ( )
future = asyncio . ensure_future ( coroutine )
futures . append ( future )
if reference . is_only_reference :
assert ( reference not in results )
coroutine = reference . updateSignature ( )
future = asyncio . ensure_future ( coroutine )
futures . append ( future )
if futures :
await asyncio . wait ( futures )
for future in futures :
future . result ( )
# raise pending exception if any
# compare other results to reference
for result in results :
if ( ( result is not reference ) and ( result . thumbnail_sig is not None ) and ( reference . thumbnail_sig is not None ) ) :
result . is_similar_to_reference = __class__ . areImageSigsSimilar ( result . thumbnail_sig , reference . thumbnail_sig )
if result . is_similar_to_reference :
logging . getLogger ( "Cover" ) . debug ( "%s is similar to reference" % ( result ) )
else :
logging . getLogger ( "Cover" ) . debug ( "%s is NOT similar to reference" % ( result ) )
else :
logging . getLogger ( "Cover" ) . warning ( "No reference result found" )
return results
|
def _process_genotypes ( self , limit ) :
"""Add the genotype internal id to flybase mapping to the idhashmap .
Also , add them as individuals to the graph .
Triples created :
< genotype id > a GENO : intrinsic _ genotype
< genotype id > rdfs : label " < gvc > [ bkgd ] "
: param limit :
: return :"""
|
if self . test_mode :
graph = self . testgraph
else :
graph = self . graph
model = Model ( graph )
line_counter = 0
raw = '/' . join ( ( self . rawdir , 'genotype' ) )
LOG . info ( "building labels for genotypes" )
geno = Genotype ( graph )
fly_tax = self . globaltt [ 'Drosophila melanogaster' ]
with open ( raw , 'r' ) as f :
f . readline ( )
# read the header row ; skip
filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' )
for line in filereader :
line_counter += 1
( genotype_num , uniquename , description , name ) = line
# if self . test _ mode is True :
# if int ( object _ key ) not in self . test _ keys . get ( ' genotype ' ) :
# continue
# add the internal genotype to pub mapping
genotype_id = 'MONARCH:FBgeno' + str ( genotype_num )
self . idhash [ 'genotype' ] [ genotype_num ] = genotype_id
if description == '' :
description = None
if not self . test_mode and limit is not None and line_counter > limit :
pass
else :
if self . test_mode and int ( genotype_num ) not in self . test_keys [ 'genotype' ] :
continue
model . addIndividualToGraph ( genotype_id , uniquename , self . globaltt [ 'intrinsic_genotype' ] , description )
# we know all genotypes are in flies
# FIXME we assume here they are in melanogaster ,
# but that isn ' t necessarily true ! ! !
# TODO should the taxon be = = genomic background ?
geno . addTaxon ( fly_tax , genotype_id )
genotype_iid = self . _makeInternalIdentifier ( 'genotype' , genotype_num )
model . addComment ( genotype_id , genotype_iid )
if name . strip ( ) != '' :
model . addSynonym ( genotype_id , name )
return
|
def _ExtractContentFromDataStream ( self , mediator , file_entry , data_stream_name ) :
"""Extracts content from a data stream .
Args :
mediator ( ParserMediator ) : mediates the interactions between
parsers and other components , such as storage and abort signals .
file _ entry ( dfvfs . FileEntry ) : file entry to extract its content .
data _ stream _ name ( str ) : name of the data stream whose content is to be
extracted ."""
|
self . processing_status = definitions . STATUS_INDICATOR_EXTRACTING
if self . _processing_profiler :
self . _processing_profiler . StartTiming ( 'extracting' )
self . _event_extractor . ParseDataStream ( mediator , file_entry , data_stream_name )
if self . _processing_profiler :
self . _processing_profiler . StopTiming ( 'extracting' )
self . processing_status = definitions . STATUS_INDICATOR_RUNNING
self . last_activity_timestamp = time . time ( )
|
def nested_set_dict ( d , keys , value ) :
"""Set a value to a sequence of nested keys
Parameters
d : Mapping
keys : Sequence [ str ]
value : Any"""
|
assert keys
key = keys [ 0 ]
if len ( keys ) == 1 :
if key in d :
raise ValueError ( "duplicated key '{}'" . format ( key ) )
d [ key ] = value
return
d = d . setdefault ( key , { } )
nested_set_dict ( d , keys [ 1 : ] , value )
|
def colors ( ) :
"""Creates an enum for colors"""
|
enums = dict ( TIME_LEFT = "red" , CONTEST_NAME = "yellow" , HOST = "green" , MISC = "blue" , TIME_TO_START = "green" , )
return type ( 'Enum' , ( ) , enums )
|
def add_tip_labels_to_axes ( self ) :
"""Add text offset from tips of tree with correction for orientation ,
and fixed _ order which is usually used in multitree plotting ."""
|
# get tip - coords and replace if using fixed _ order
xpos = self . ttree . get_tip_coordinates ( 'x' )
ypos = self . ttree . get_tip_coordinates ( 'y' )
if self . style . orient in ( "up" , "down" ) :
if self . ttree . _fixed_order :
xpos = list ( range ( self . ttree . ntips ) )
ypos = ypos [ self . ttree . _fixed_idx ]
if self . style . tip_labels_align :
ypos = np . zeros ( self . ttree . ntips )
if self . style . orient in ( "right" , "left" ) :
if self . ttree . _fixed_order :
xpos = xpos [ self . ttree . _fixed_idx ]
ypos = list ( range ( self . ttree . ntips ) )
if self . style . tip_labels_align :
xpos = np . zeros ( self . ttree . ntips )
# pop fill from color dict if using color
tstyle = deepcopy ( self . style . tip_labels_style )
if self . style . tip_labels_colors :
tstyle . pop ( "fill" )
# add tip names to coordinates calculated above
self . axes . text ( xpos , ypos , self . tip_labels , angle = ( 0 if self . style . orient in ( "right" , "left" ) else - 90 ) , style = tstyle , color = self . style . tip_labels_colors , )
# get stroke - width for aligned tip - label lines ( optional )
# copy stroke - width from the edge _ style unless user set it
if not self . style . edge_align_style . get ( "stroke-width" ) :
self . style . edge_align_style [ "stroke-width" ] = ( self . style . edge_style [ "stroke-width" ] )
|
def make_future_info ( first_sid , root_symbols , years , notice_date_func , expiration_date_func , start_date_func , month_codes = None , multiplier = 500 ) :
"""Create a DataFrame representing futures for ` root _ symbols ` during ` year ` .
Generates a contract per triple of ( symbol , year , month ) supplied to
` root _ symbols ` , ` years ` , and ` month _ codes ` .
Parameters
first _ sid : int
The first sid to use for assigning sids to the created contracts .
root _ symbols : list [ str ]
A list of root symbols for which to create futures .
years : list [ int or str ]
Years ( e . g . 2014 ) , for which to produce individual contracts .
notice _ date _ func : ( Timestamp ) - > Timestamp
Function to generate notice dates from first of the month associated
with asset month code . Return NaT to simulate futures with no notice
date .
expiration _ date _ func : ( Timestamp ) - > Timestamp
Function to generate expiration dates from first of the month
associated with asset month code .
start _ date _ func : ( Timestamp ) - > Timestamp , optional
Function to generate start dates from first of the month associated
with each asset month code . Defaults to a start _ date one year prior
to the month _ code date .
month _ codes : dict [ str - > [ 1 . . 12 ] ] , optional
Dictionary of month codes for which to create contracts . Entries
should be strings mapped to values from 1 ( January ) to 12 ( December ) .
Default is zipline . futures . CMES _ CODE _ TO _ MONTH
multiplier : int
The contract multiplier .
Returns
futures _ info : pd . DataFrame
DataFrame of futures data suitable for passing to an AssetDBWriter ."""
|
if month_codes is None :
month_codes = CMES_CODE_TO_MONTH
year_strs = list ( map ( str , years ) )
years = [ pd . Timestamp ( s , tz = 'UTC' ) for s in year_strs ]
# Pairs of string / date like ( ' K06 ' , 2006-05-01)
contract_suffix_to_beginning_of_month = tuple ( ( month_code + year_str [ - 2 : ] , year + MonthBegin ( month_num ) ) for ( ( year , year_str ) , ( month_code , month_num ) ) in product ( zip ( years , year_strs ) , iteritems ( month_codes ) , ) )
contracts = [ ]
parts = product ( root_symbols , contract_suffix_to_beginning_of_month )
for sid , ( root_sym , ( suffix , month_begin ) ) in enumerate ( parts , first_sid ) :
contracts . append ( { 'sid' : sid , 'root_symbol' : root_sym , 'symbol' : root_sym + suffix , 'start_date' : start_date_func ( month_begin ) , 'notice_date' : notice_date_func ( month_begin ) , 'expiration_date' : notice_date_func ( month_begin ) , 'multiplier' : multiplier , 'exchange' : "TEST" , } )
return pd . DataFrame . from_records ( contracts , index = 'sid' )
|
def preprocess ( S , coloring_method = None ) :
"""Preprocess splitting functions .
Parameters
S : csr _ matrix
Strength of connection matrix
method : string
Algorithm used to compute the vertex coloring :
* ' MIS ' - Maximal Independent Set
* ' JP ' - Jones - Plassmann ( parallel )
* ' LDF ' - Largest - Degree - First ( parallel )
Returns
weights : ndarray
Weights from a graph coloring of G
S : csr _ matrix
Strength matrix with ones
T : csr _ matrix
transpose of S
G : csr _ matrix
union of S and T
Notes
Performs the following operations :
- Checks input strength of connection matrix S
- Replaces S . data with ones
- Creates T = S . T in CSR format
- Creates G = S union T in CSR format
- Creates random weights
- Augments weights with graph coloring ( if use _ color = = True )"""
|
if not isspmatrix_csr ( S ) :
raise TypeError ( 'expected csr_matrix' )
if S . shape [ 0 ] != S . shape [ 1 ] :
raise ValueError ( 'expected square matrix, shape=%s' % ( S . shape , ) )
N = S . shape [ 0 ]
S = csr_matrix ( ( np . ones ( S . nnz , dtype = 'int8' ) , S . indices , S . indptr ) , shape = ( N , N ) )
T = S . T . tocsr ( )
# transpose S for efficient column access
G = S + T
# form graph ( must be symmetric )
G . data [ : ] = 1
weights = np . ravel ( T . sum ( axis = 1 ) )
# initial weights
# weights - = T . diagonal ( ) # discount self loops
if coloring_method is None :
weights = weights + sp . rand ( len ( weights ) )
else :
coloring = vertex_coloring ( G , coloring_method )
num_colors = coloring . max ( ) + 1
weights = weights + ( sp . rand ( len ( weights ) ) + coloring ) / num_colors
return ( weights , G , S , T )
|
def GetEventTagByIdentifier ( self , identifier ) :
"""Retrieves a specific event tag .
Args :
identifier ( SQLTableIdentifier ) : event tag identifier .
Returns :
EventTag : event tag or None if not available ."""
|
event_tag = self . _GetAttributeContainerByIndex ( self . _CONTAINER_TYPE_EVENT_TAG , identifier . row_identifier - 1 )
if event_tag :
event_identifier = identifiers . SQLTableIdentifier ( self . _CONTAINER_TYPE_EVENT , event_tag . event_row_identifier )
event_tag . SetEventIdentifier ( event_identifier )
del event_tag . event_row_identifier
return event_tag
|
def add_schema ( self , schema ) :
"""Merge in a JSON schema . This can be a ` ` dict ` ` or another
` ` SchemaBuilder ` `
: param schema : a JSON Schema
. . note : :
There is no schema validation . If you pass in a bad schema ,
you might get back a bad schema ."""
|
if isinstance ( schema , SchemaBuilder ) :
schema_uri = schema . schema_uri
schema = schema . to_schema ( )
if schema_uri is None :
del schema [ '$schema' ]
elif isinstance ( schema , SchemaNode ) :
schema = schema . to_schema ( )
if '$schema' in schema :
self . schema_uri = self . schema_uri or schema [ '$schema' ]
schema = dict ( schema )
del schema [ '$schema' ]
self . _root_node . add_schema ( schema )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.