signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def download ( self , output_dir , url , overwrite ) :
"""Dowload file to / tmp"""
|
tmp = self . url2tmp ( output_dir , url )
if os . path . isfile ( tmp ) and not overwrite :
logging . info ( "File {0} already exists. Skipping download." . format ( tmp ) )
return tmp
f = open ( tmp , 'wb' )
logging . info ( "Downloading {0}" . format ( url ) )
res = requests . get ( url , stream = True )
if res . status_code != 200 : # failed to download , cleanup and raise exception
f . close ( )
os . remove ( tmp )
error = "{0}\n\nFailed to download < {0} >" . format ( res . content , url )
raise IOError ( error )
for block in res . iter_content ( 1024 ) :
f . write ( block )
f . close ( )
return tmp
|
def visit_UnaryOp ( self , node : AST , dfltChaining : bool = True ) -> str :
"""Return representation of ` node ` s operator and operand ."""
|
op = node . op
with self . op_man ( op ) :
return self . visit ( op ) + self . visit ( node . operand )
|
def update_hash ( src_file ) :
"""Update the hash for the given file .
Args :
src : The file name .
root : The path of the given file ."""
|
hash_file = local . path ( src_file ) + ".hash"
new_hash = 0
with open ( hash_file , 'w' ) as h_file :
new_hash = get_hash_of_dirs ( src_file )
h_file . write ( str ( new_hash ) )
return new_hash
|
def reset_clipboard ( self ) :
"""Resets the clipboard , so that old elements do not pollute the new selection that is copied into the
clipboard .
: return :"""
|
# reset selections
for state_element_attr in ContainerState . state_element_attrs :
self . model_copies [ state_element_attr ] = [ ]
# reset parent state _ id the copied elements are taken from
self . copy_parent_state_id = None
self . reset_clipboard_mapping_dicts ( )
|
def _deep_merge_dict ( a , b ) :
"""Additively merge right side dict into left side dict ."""
|
for k , v in b . items ( ) :
if k in a and isinstance ( a [ k ] , dict ) and isinstance ( v , dict ) :
_deep_merge_dict ( a [ k ] , v )
else :
a [ k ] = v
|
def build ( cls , node ) :
"""Construct a namer object for a given function scope ."""
|
if not isinstance ( node , gast . FunctionDef ) :
raise ValueError
namer = cls ( )
namer . names . update ( get_names ( node ) )
return namer
|
def cast_to_report ( self , value ) :
"""Report format uses only the value ' s id"""
|
value = super ( ValuesListField , self ) . cast_to_report ( value )
if value :
return value [ 'id' ]
|
def flush ( self , include_footers : bool = False ) -> "Future[None]" :
"""Flushes the current output buffer to the network .
The ` ` callback ` ` argument , if given , can be used for flow control :
it will be run when all flushed data has been written to the socket .
Note that only one flush callback can be outstanding at a time ;
if another flush occurs before the previous flush ' s callback
has been run , the previous callback will be discarded .
. . versionchanged : : 4.0
Now returns a ` . Future ` if no callback is given .
. . versionchanged : : 6.0
The ` ` callback ` ` argument was removed ."""
|
assert self . request . connection is not None
chunk = b"" . join ( self . _write_buffer )
self . _write_buffer = [ ]
if not self . _headers_written :
self . _headers_written = True
for transform in self . _transforms :
assert chunk is not None
self . _status_code , self . _headers , chunk = transform . transform_first_chunk ( self . _status_code , self . _headers , chunk , include_footers )
# Ignore the chunk and only write the headers for HEAD requests
if self . request . method == "HEAD" :
chunk = b""
# Finalize the cookie headers ( which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent ) .
if hasattr ( self , "_new_cookie" ) :
for cookie in self . _new_cookie . values ( ) :
self . add_header ( "Set-Cookie" , cookie . OutputString ( None ) )
start_line = httputil . ResponseStartLine ( "" , self . _status_code , self . _reason )
return self . request . connection . write_headers ( start_line , self . _headers , chunk )
else :
for transform in self . _transforms :
chunk = transform . transform_chunk ( chunk , include_footers )
# Ignore the chunk and only write the headers for HEAD requests
if self . request . method != "HEAD" :
return self . request . connection . write ( chunk )
else :
future = Future ( )
# type : Future [ None ]
future . set_result ( None )
return future
|
def unpack_ip_addr ( addr ) :
"""Given a six - octet BACnet address , return an IP address tuple ."""
|
if isinstance ( addr , bytearray ) :
addr = bytes ( addr )
return ( socket . inet_ntoa ( addr [ 0 : 4 ] ) , struct . unpack ( '!H' , addr [ 4 : 6 ] ) [ 0 ] )
|
def ip_rtm_config_route_static_route_oif_static_route_oif_name ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
ip = ET . SubElement ( config , "ip" , xmlns = "urn:brocade.com:mgmt:brocade-common-def" )
rtm_config = ET . SubElement ( ip , "rtm-config" , xmlns = "urn:brocade.com:mgmt:brocade-rtm" )
route = ET . SubElement ( rtm_config , "route" )
static_route_oif = ET . SubElement ( route , "static-route-oif" )
static_route_dest_key = ET . SubElement ( static_route_oif , "static-route-dest" )
static_route_dest_key . text = kwargs . pop ( 'static_route_dest' )
static_route_oif_type_key = ET . SubElement ( static_route_oif , "static-route-oif-type" )
static_route_oif_type_key . text = kwargs . pop ( 'static_route_oif_type' )
static_route_oif_name = ET . SubElement ( static_route_oif , "static-route-oif-name" )
static_route_oif_name . text = kwargs . pop ( 'static_route_oif_name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def get_client_by_appid ( self , authorizer_appid ) :
"""通过 authorizer _ appid 获取 Client 对象
: params authorizer _ appid : 授权公众号appid"""
|
access_token_key = '{0}_access_token' . format ( authorizer_appid )
refresh_token_key = '{0}_refresh_token' . format ( authorizer_appid )
access_token = self . session . get ( access_token_key )
refresh_token = self . session . get ( refresh_token_key )
assert refresh_token
if not access_token :
ret = self . refresh_authorizer_token ( authorizer_appid , refresh_token )
access_token = ret [ 'authorizer_access_token' ]
refresh_token = ret [ 'authorizer_refresh_token' ]
access_token_key = '{0}_access_token' . format ( authorizer_appid )
expires_in = 7200
if 'expires_in' in ret :
expires_in = ret [ 'expires_in' ]
self . session . set ( access_token_key , access_token , expires_in )
return WeChatComponentClient ( authorizer_appid , self , session = self . session )
|
async def get_key_metadata ( wallet_handle : int , verkey : str ) -> str :
"""Retrieves the meta information for the giving key in the wallet .
: param wallet _ handle : Wallet handle ( created by open _ wallet ) .
: param verkey : The key ( verkey , key id ) to retrieve metadata .
: return : metadata : The meta information stored with the key ; Can be null if no metadata was saved for this key ."""
|
logger = logging . getLogger ( __name__ )
logger . debug ( "get_key_metadata: >>> wallet_handle: %r, verkey: %r" , wallet_handle , verkey )
if not hasattr ( get_key_metadata , "cb" ) :
logger . debug ( "get_key_metadata: Creating callback" )
get_key_metadata . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 , c_char_p ) )
c_wallet_handle = c_int32 ( wallet_handle )
c_verkey = c_char_p ( verkey . encode ( 'utf-8' ) )
metadata = await do_call ( 'indy_get_key_metadata' , c_wallet_handle , c_verkey , get_key_metadata . cb )
res = metadata . decode ( )
logger . debug ( "get_key_metadata: <<< res: %r" , res )
return res
|
def parse_variable ( self , variable ) :
"""Method to parse an input or output variable .
* * Example Variable * * : :
# App : 1234 : output ! String
Args :
variable ( string ) : The variable name to parse .
Returns :
( dictionary ) : Result of parsed string ."""
|
data = None
if variable is not None :
variable = variable . strip ( )
if re . match ( self . _variable_match , variable ) :
var = re . search ( self . _variable_parse , variable )
data = { 'root' : var . group ( 0 ) , 'job_id' : var . group ( 2 ) , 'name' : var . group ( 3 ) , 'type' : var . group ( 4 ) , }
return data
|
def __convert_booleans ( self , eitem ) :
"""Convert True / False to 1/0 for better kibana processing"""
|
for field in eitem . keys ( ) :
if isinstance ( eitem [ field ] , bool ) :
if eitem [ field ] :
eitem [ field ] = 1
else :
eitem [ field ] = 0
return eitem
|
def run_final_eval ( train_session , module_spec , class_count , image_lists , jpeg_data_tensor , decoded_image_tensor , resized_image_tensor , bottleneck_tensor ) :
"""Runs a final evaluation on an eval graph using the test data set .
Args :
train _ session : Session for the train graph with the tensors below .
module _ spec : The hub . ModuleSpec for the image module being used .
class _ count : Number of classes
image _ lists : OrderedDict of training images for each label .
jpeg _ data _ tensor : The layer to feed jpeg image data into .
decoded _ image _ tensor : The output of decoding and resizing the image .
resized _ image _ tensor : The input node of the recognition graph .
bottleneck _ tensor : The bottleneck output layer of the CNN graph ."""
|
test_bottlenecks , test_ground_truth , test_filenames = ( get_random_cached_bottlenecks ( train_session , image_lists , FLAGS . test_batch_size , 'testing' , FLAGS . bottleneck_dir , FLAGS . image_dir , jpeg_data_tensor , decoded_image_tensor , resized_image_tensor , bottleneck_tensor , FLAGS . tfhub_module ) )
( eval_session , _ , bottleneck_input , ground_truth_input , evaluation_step , prediction ) = build_eval_session ( module_spec , class_count )
test_accuracy , predictions = eval_session . run ( [ evaluation_step , prediction ] , feed_dict = { bottleneck_input : test_bottlenecks , ground_truth_input : test_ground_truth } )
tf . logging . info ( 'Final test accuracy = %.1f%% (N=%d)' % ( test_accuracy * 100 , len ( test_bottlenecks ) ) )
if FLAGS . print_misclassified_test_images :
tf . logging . info ( '=== MISCLASSIFIED TEST IMAGES ===' )
for i , test_filename in enumerate ( test_filenames ) :
if predictions [ i ] != test_ground_truth [ i ] :
tf . logging . info ( '%70s %s' % ( test_filename , list ( image_lists . keys ( ) ) [ predictions [ i ] ] ) )
|
def wald_wolfowitz ( sequence ) :
"""implements the wald - wolfowitz runs test :
http : / / en . wikipedia . org / wiki / Wald - Wolfowitz _ runs _ test
http : / / support . sas . com / kb / 33/092 . html
: param sequence : any iterable with at most 2 values . e . g .
'1001001'
[1 , 0 , 1 , 0 , 1]
' abaaabbba '
: rtype : a dict with keys of
` n _ runs ` : the number of runs in the sequence
` p ` : the support to reject the null - hypothesis that the number of runs
supports a random sequence
` z ` : the z - score , used to calculate the p - value
` sd ` , ` mean ` : the expected standard deviation , mean the number of runs ,
given the ratio of numbers of 1 ' s / 0 ' s in the sequence
> > > r = wald _ wolfowitz ( ' 1000001 ' )
> > > r [ ' n _ runs ' ] # should be 3 , because 1 , 0 , 1
> > > r [ ' p ' ] < 0.05 # not < 0.05 evidence to reject Ho of random sequence
False
# this should show significance for non - randomness
> > > li = [ 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1]
> > > wald _ wolfowitz ( li ) [ ' p ' ] < 0.05
True"""
|
R = n_runs = sum ( 1 for s in groupby ( sequence , lambda a : a ) )
n = float ( sum ( 1 for s in sequence if s == sequence [ 0 ] ) )
m = float ( sum ( 1 for s in sequence if s != sequence [ 0 ] ) )
# expected mean runs
ER = ( ( 2 * n * m ) / ( n + m ) ) + 1
# expected variance runs
VR = ( 2 * n * m * ( 2 * n * m - n - m ) ) / ( ( n + m ) ** 2 * ( n + m - 1 ) )
O = ( ER - 1 ) * ( ER - 2 ) / ( n + m - 1. )
assert VR - O < 0.001 , ( VR , O )
SD = math . sqrt ( VR )
# Z - score
Z = ( R - ER ) / SD
return { 'z' : Z , 'mean' : ER , 'sd' : SD , 'p' : zprob ( Z ) , 'n_runs' : R }
|
async def wait ( self ) :
"""EventSourceResponse object is used for streaming data to the client ,
this method returns future , so we can wain until connection will
be closed or other task explicitly call ` ` stop _ streaming ` ` method ."""
|
if self . _ping_task is None :
raise RuntimeError ( 'Response is not started' )
with contextlib . suppress ( asyncio . CancelledError ) :
await self . _ping_task
|
def copy_to_region ( self , region ) :
"""Create a new key pair of the same new in another region .
Note that the new key pair will use a different ssh
cert than the this key pair . After doing the copy ,
you will need to save the material associated with the
new key pair ( use the save method ) to a local file .
: type region : : class : ` boto . ec2 . regioninfo . RegionInfo `
: param region : The region to which this security group will be copied .
: rtype : : class : ` boto . ec2 . keypair . KeyPair `
: return : The new key pair"""
|
if region . name == self . region :
raise BotoClientError ( 'Unable to copy to the same Region' )
conn_params = self . connection . get_params ( )
rconn = region . connect ( ** conn_params )
kp = rconn . create_key_pair ( self . name )
return kp
|
def path_regex ( self ) :
"""Return the regex for the path to the build folder ."""
|
regex = r'releases/%(VERSION)s/%(PLATFORM)s/%(LOCALE)s/'
return regex % { 'LOCALE' : self . locale , 'PLATFORM' : self . platform_regex , 'VERSION' : self . version }
|
def showLayer ( self , title = '' , debugText = '' ) :
"""Shows the single layer .
: param title : A string with the title of the window where to render the image .
: param debugText : A string with some text to render over the image .
: rtype : Nothing ."""
|
img = PIL . Image . fromarray ( self . data , 'RGBA' )
if debugText != '' :
draw = PIL . ImageDraw . Draw ( img )
font = PIL . ImageFont . truetype ( "DejaVuSansMono.ttf" , 24 )
draw . text ( ( 0 , 0 ) , debugText , ( 255 , 255 , 255 ) , font = font )
img . show ( title = title )
|
def generate_evenly_distributed_data_sparse ( dim = 2000 , num_active = 40 , num_samples = 1000 ) :
"""Generates a set of data drawn from a uniform distribution . The binning
structure from Poirazi & Mel is ignored , and all ( dim choose num _ active )
arrangements are possible ."""
|
indices = [ numpy . random . choice ( dim , size = num_active , replace = False ) for _ in range ( num_samples ) ]
data = SM32 ( )
data . reshape ( 0 , dim )
for row in indices :
data . addRowNZ ( row , [ 1 ] * num_active )
# data . reshape ( num _ samples , dim )
# for sample , datapoint in enumerate ( indices ) :
# for index in datapoint :
# data [ sample , index ] = 1.
return data
|
def get ( self , sid ) :
"""Constructs a ReservationContext
: param sid : The sid
: returns : twilio . rest . taskrouter . v1 . workspace . task . reservation . ReservationContext
: rtype : twilio . rest . taskrouter . v1 . workspace . task . reservation . ReservationContext"""
|
return ReservationContext ( self . _version , workspace_sid = self . _solution [ 'workspace_sid' ] , task_sid = self . _solution [ 'task_sid' ] , sid = sid , )
|
def append_transformed_structures ( self , tstructs_or_transmuter ) :
"""Method is overloaded to accept either a list of transformed structures
or transmuter , it which case it appends the second transmuter " s
structures .
Args :
tstructs _ or _ transmuter : A list of transformed structures or a
transmuter ."""
|
if isinstance ( tstructs_or_transmuter , self . __class__ ) :
self . transformed_structures . extend ( tstructs_or_transmuter . transformed_structures )
else :
for ts in tstructs_or_transmuter :
assert isinstance ( ts , TransformedStructure )
self . transformed_structures . extend ( tstructs_or_transmuter )
|
def commit ( self ) :
"""Commit recorded changes , turn off recording , return
changes ."""
|
assert self . record
result = self . files_written , self . dirs_created
self . _init_record ( )
return result
|
def validate_input ( function ) :
"""Decorator that validates the kwargs of the function passed to it ."""
|
@ wraps ( function )
def wrapper ( * args , ** kwargs ) :
try :
name = function . __name__ + '_validator'
# find validator name
globals ( ) [ name ] ( kwargs )
# call validation function
return function ( * args , ** kwargs )
except KeyError :
raise Exception ( "Could not find validation schema for the" " function " + function . __name__ )
return wrapper
|
def _src_media_url_for_video ( self , video ) :
'''Get the url for the video media that we can send to Clarify'''
|
src_url = None
best_height = 0
best_source = None
# TODO : This assumes we have ingested videos . For remote videos , check if the remote flag is True
# and if so , use the src url from the Asset endpoint .
video_sources = self . bc_client . get_video_sources ( video [ 'id' ] )
# Look for codec H264 with good resolution
for source in video_sources :
height = source . get ( 'height' , 0 )
codec = source . get ( 'codec' )
if source . get ( 'src' ) and codec and codec . upper ( ) == 'H264' and height <= 1080 and height > best_height :
best_source = source
if best_source is not None :
src_url = best_source [ 'src' ]
return src_url
|
def getinfo ( self , disk , part = '' ) :
"""Get more info about a disk or a disk partition
: param disk : ( / dev / sda , / dev / sdb , etc . . )
: param part : ( / dev / sda1 , / dev / sdb2 , etc . . . )
: return : a dict with { " blocksize " , " start " , " size " , and " free " sections }"""
|
args = { "disk" : disk , "part" : part , }
self . _getpart_chk . check ( args )
response = self . _client . raw ( 'disk.getinfo' , args )
result = response . get ( )
if result . state != 'SUCCESS' :
raise RuntimeError ( 'failed to get info: %s' % result . data )
if result . level != 20 : # 20 is JSON output .
raise RuntimeError ( 'invalid response type from disk.getinfo command' )
data = result . data . strip ( )
if data :
return json . loads ( data )
else :
return { }
|
def save_object ( self , obj , expected_value = None ) :
"""Marshal the object and do a PUT"""
|
doc = self . marshal_object ( obj )
if obj . id :
url = "/%s/%s" % ( self . db_name , obj . id )
else :
url = "/%s" % ( self . db_name )
resp = self . _make_request ( "PUT" , url , body = doc . toxml ( ) )
new_obj = self . get_object_from_doc ( obj . __class__ , None , parse ( resp ) )
obj . id = new_obj . id
for prop in obj . properties ( ) :
try :
propname = prop . name
except AttributeError :
propname = None
if propname :
value = getattr ( new_obj , prop . name )
if value :
setattr ( obj , prop . name , value )
return obj
|
def _get_referenced_type_equivalences ( graphql_types , type_equivalence_hints ) :
"""Filter union types with no edges from the type equivalence hints dict ."""
|
referenced_types = set ( )
for graphql_type in graphql_types . values ( ) :
if isinstance ( graphql_type , ( GraphQLObjectType , GraphQLInterfaceType ) ) :
for _ , field in graphql_type . fields . items ( ) :
if isinstance ( field . type , GraphQLList ) :
referenced_types . add ( field . type . of_type . name )
return { original : union for original , union in type_equivalence_hints . items ( ) if union . name in referenced_types }
|
def permute ( self , ordering : np . ndarray , axis : int ) -> None :
"""Permute the dataset along the indicated axis .
Args :
ordering ( list of int ) : The desired order along the axis
axis ( int ) : The axis along which to permute
Returns :
Nothing ."""
|
if self . _file . __contains__ ( "tiles" ) :
del self . _file [ 'tiles' ]
ordering = list ( np . array ( ordering ) . flatten ( ) )
# Flatten the ordering , in case we got a column vector
self . layers . _permute ( ordering , axis = axis )
if axis == 0 :
self . row_attrs . _permute ( ordering )
self . row_graphs . _permute ( ordering )
if axis == 1 :
self . col_attrs . _permute ( ordering )
self . col_graphs . _permute ( ordering )
|
def create_key ( key_type = 'RSA' , key_length = 1024 , name_real = 'Autogenerated Key' , name_comment = 'Generated by SaltStack' , name_email = None , subkey_type = None , subkey_length = None , expire_date = None , use_passphrase = False , user = None , gnupghome = None ) :
'''Create a key in the GPG keychain
. . note : :
GPG key generation requires * a lot * of entropy and randomness .
Difficult to do over a remote connection , consider having
another process available which is generating randomness for
the machine . Also especially difficult on virtual machines ,
consider the ` rng - tools
< http : / / www . gnu . org / software / hurd / user / tlecarrour / rng - tools . html > ` _
package .
The create _ key process takes awhile so increasing the timeout
may be necessary , e . g . - t 15.
key _ type
The type of the primary key to generate . It must be capable of signing .
' RSA ' or ' DSA ' .
key _ length
The length of the primary key in bits .
name _ real
The real name of the user identity which is represented by the key .
name _ comment
A comment to attach to the user id .
name _ email
An email address for the user .
subkey _ type
The type of the secondary key to generate .
subkey _ length
The length of the secondary key in bits .
expire _ date
The expiration date for the primary and any secondary key .
You can specify an ISO date , A number of days / weeks / months / years ,
an epoch value , or 0 for a non - expiring key .
use _ passphrase
Whether to use a passphrase with the signing key . Passphrase is received
from Pillar .
user
Which user ' s keychain to access , defaults to user Salt is running as .
Passing the user as ` ` salt ` ` will set the GnuPG home directory to the
` ` / etc / salt / gpgkeys ` ` .
gnupghome
Specify the location where GPG keyring and related files are stored .
CLI Example :
. . code - block : : bash
salt - t 15 ' * ' gpg . create _ key'''
|
ret = { 'res' : True , 'fingerprint' : '' , 'message' : '' }
create_params = { 'key_type' : key_type , 'key_length' : key_length , 'name_real' : name_real , 'name_comment' : name_comment , }
gpg = _create_gpg ( user , gnupghome )
if name_email :
create_params [ 'name_email' ] = name_email
if subkey_type :
create_params [ 'subkey_type' ] = subkey_type
if subkey_length :
create_params [ 'subkey_length' ] = subkey_length
if expire_date :
create_params [ 'expire_date' ] = expire_date
if use_passphrase :
gpg_passphrase = __salt__ [ 'pillar.get' ] ( 'gpg_passphrase' )
if not gpg_passphrase :
ret [ 'res' ] = False
ret [ 'message' ] = "gpg_passphrase not available in pillar."
return ret
else :
create_params [ 'passphrase' ] = gpg_passphrase
input_data = gpg . gen_key_input ( ** create_params )
key = gpg . gen_key ( input_data )
if key . fingerprint :
ret [ 'fingerprint' ] = key . fingerprint
ret [ 'message' ] = 'GPG key pair successfully generated.'
else :
ret [ 'res' ] = False
ret [ 'message' ] = 'Unable to generate GPG key pair.'
return ret
|
def design ( npos ) :
"""make a design matrix for an anisotropy experiment"""
|
if npos == 15 : # rotatable design of Jelinek for kappabridge ( see Tauxe , 1998)
A = np . array ( [ [ .5 , .5 , 0 , - 1. , 0 , 0 ] , [ .5 , .5 , 0 , 1. , 0 , 0 ] , [ 1 , .0 , 0 , 0 , 0 , 0 ] , [ .5 , .5 , 0 , - 1. , 0 , 0 ] , [ .5 , .5 , 0 , 1. , 0 , 0 ] , [ 0 , .5 , .5 , 0 , - 1. , 0 ] , [ 0 , .5 , .5 , 0 , 1. , 0 ] , [ 0 , 1. , 0 , 0 , 0 , 0 ] , [ 0 , .5 , .5 , 0 , - 1. , 0 ] , [ 0 , .5 , .5 , 0 , 1. , 0 ] , [ .5 , 0 , .5 , 0 , 0 , - 1. ] , [ .5 , 0 , .5 , 0 , 0 , 1. ] , [ 0 , 0 , 1. , 0 , 0 , 0 ] , [ .5 , 0 , .5 , 0 , 0 , - 1. ] , [ .5 , 0 , .5 , 0 , 0 , 1. ] ] )
# design matrix for 15 measurment positions
elif npos == 6 :
A = np . array ( [ [ 1. , 0 , 0 , 0 , 0 , 0 ] , [ 0 , 1. , 0 , 0 , 0 , 0 ] , [ 0 , 0 , 1. , 0 , 0 , 0 ] , [ .5 , .5 , 0 , 1. , 0 , 0 ] , [ 0 , .5 , .5 , 0 , 1. , 0 ] , [ .5 , 0 , .5 , 0 , 0 , 1. ] ] )
# design matrix for 6 measurment positions
else :
print ( "measurement protocol not supported yet " )
return
B = np . dot ( np . transpose ( A ) , A )
B = linalg . inv ( B )
B = np . dot ( B , np . transpose ( A ) )
return A , B
|
def _policyFileReplaceOrAppend ( this_string , policy_data , append_only = False ) :
'''helper function to take a ADMX policy string for registry . pol file data and
update existing string or append the string to the data'''
|
# we are going to clean off the special pre - fixes , so we get only the valuename
if not policy_data :
policy_data = b''
specialValueRegex = salt . utils . stringutils . to_bytes ( r'(\*\*Del\.|\*\*DelVals\.){0,1}' )
item_key = None
item_value_name = None
data_to_replace = None
if not append_only :
item_key = this_string . split ( b'\00;' ) [ 0 ] . lstrip ( b'[' )
item_value_name = re . sub ( specialValueRegex , b'' , this_string . split ( b'\00;' ) [ 1 ] , flags = re . IGNORECASE )
log . debug ( 'item value name is %s' , item_value_name )
data_to_replace = _regexSearchKeyValueCombo ( policy_data , item_key , item_value_name )
if data_to_replace :
log . debug ( 'replacing %s with %s' , data_to_replace , this_string )
policy_data = policy_data . replace ( data_to_replace , this_string )
else :
log . debug ( 'appending %s' , this_string )
policy_data = b'' . join ( [ policy_data , this_string ] )
return policy_data
|
def installed_add_ons ( self ) :
""": rtype : twilio . rest . preview . marketplace . installed _ add _ on . InstalledAddOnList"""
|
if self . _installed_add_ons is None :
self . _installed_add_ons = InstalledAddOnList ( self )
return self . _installed_add_ons
|
def load ( cls , filename ) :
"""Loads the experiment from disk .
: param filename : the filename of the experiment to load
: type filename : str
: return : the experiment
: rtype : Experiment"""
|
jobject = javabridge . static_call ( "weka/experiment/Experiment" , "read" , "(Ljava/lang/String;)Lweka/experiment/Experiment;" , filename )
return Experiment ( jobject = jobject )
|
def obtain_credentials ( credentials ) :
"""Prompt for credentials if possible .
If the credentials are " - " then read from stdin without interactive
prompting ."""
|
if credentials == "-" :
credentials = sys . stdin . readline ( ) . strip ( )
elif credentials is None :
credentials = try_getpass ( "API key (leave empty for anonymous access): " )
# Ensure that the credentials have a valid form .
if credentials and not credentials . isspace ( ) :
return Credentials . parse ( credentials )
else :
return None
|
def list_users ( self , limit = None , marker = None ) :
"""Returns a list of the names of all users for this instance ."""
|
return self . _user_manager . list ( limit = limit , marker = marker )
|
def fill_default_satellites ( self , alignak_launched = False ) : # pylint : disable = too - many - branches , too - many - locals , too - many - statements
"""If a required satellite is missing in the configuration , we create a new satellite
on localhost with some default values
: param alignak _ launched : created daemons are to be launched or not
: type alignak _ launched : bool
: return : None"""
|
# Log all satellites list
logger . debug ( "Alignak configured daemons list:" )
self . log_daemons_list ( )
# We must create relations betweens the realms first . This is necessary to have
# an accurate map of the situation !
self . realms . linkify ( )
self . realms . get_default ( check = True )
# Get list of known realms
# realms _ names = [ realm . get _ name ( ) for realm in self . realms ]
# Create one instance of each satellite type if it does not exist . . .
if not self . schedulers :
logger . warning ( "No scheduler defined, I am adding one on 127.0.0.1:%d" , self . daemons_initial_port )
satellite = SchedulerLink ( { 'type' : 'scheduler' , 'name' : 'Default-Scheduler' , 'realm' : self . realms . default . get_name ( ) , 'alignak_launched' : alignak_launched , 'missing_daemon' : True , 'spare' : '0' , 'manage_sub_realms' : '0' , 'address' : '127.0.0.1' , 'port' : self . daemons_initial_port } )
self . daemons_initial_port = self . daemons_initial_port + 1
self . schedulers = SchedulerLinks ( [ satellite ] )
self . missing_daemons . append ( satellite )
if not self . reactionners :
logger . warning ( "No reactionner defined, I am adding one on 127.0.0.1:%d" , self . daemons_initial_port )
satellite = ReactionnerLink ( { 'type' : 'reactionner' , 'name' : 'Default-Reactionner' , 'realm' : self . realms . default . get_name ( ) , 'alignak_launched' : alignak_launched , 'missing_daemon' : True , 'spare' : '0' , 'manage_sub_realms' : '0' , 'address' : '127.0.0.1' , 'port' : self . daemons_initial_port } )
self . daemons_initial_port = self . daemons_initial_port + 1
self . reactionners = ReactionnerLinks ( [ satellite ] )
self . missing_daemons . append ( satellite )
if not self . pollers :
logger . warning ( "No poller defined, I am adding one on 127.0.0.1:%d" , self . daemons_initial_port )
satellite = PollerLink ( { 'type' : 'poller' , 'name' : 'Default-Poller' , 'realm' : self . realms . default . get_name ( ) , 'alignak_launched' : alignak_launched , 'missing_daemon' : True , 'spare' : '0' , 'manage_sub_realms' : '0' , 'address' : '127.0.0.1' , 'port' : self . daemons_initial_port } )
self . daemons_initial_port = self . daemons_initial_port + 1
self . pollers = PollerLinks ( [ satellite ] )
self . missing_daemons . append ( satellite )
if not self . brokers :
logger . warning ( "No broker defined, I am adding one on 127.0.0.1:%d" , self . daemons_initial_port )
satellite = BrokerLink ( { 'type' : 'broker' , 'name' : 'Default-Broker' , 'realm' : self . realms . default . get_name ( ) , 'alignak_launched' : alignak_launched , 'missing_daemon' : True , 'spare' : '0' , 'manage_sub_realms' : '0' , 'address' : '127.0.0.1' , 'port' : self . daemons_initial_port } )
self . daemons_initial_port = self . daemons_initial_port + 1
self . brokers = BrokerLinks ( [ satellite ] )
self . missing_daemons . append ( satellite )
if not self . receivers :
logger . warning ( "No receiver defined, I am adding one on 127.0.0.1:%d" , self . daemons_initial_port )
satellite = ReceiverLink ( { 'type' : 'receiver' , 'name' : 'Default-Receiver' , 'alignak_launched' : alignak_launched , 'missing_daemon' : True , 'spare' : '0' , 'manage_sub_realms' : '0' , 'address' : '127.0.0.1' , 'port' : self . daemons_initial_port } )
self . daemons_initial_port = self . daemons_initial_port + 1
self . receivers = ReceiverLinks ( [ satellite ] )
self . missing_daemons . append ( satellite )
# Assign default realm to the satellites that do not have a defined realm
for satellites_list in [ self . pollers , self . brokers , self . reactionners , self . receivers , self . schedulers ] :
for satellite in satellites_list : # Here the ' realm ' property is not yet a real realm object uuid . . .
# but still a realm name ! Make it a realm uuid
if not getattr ( satellite , 'realm' , None ) :
satellite . realm = self . realms . default . get_name ( )
sat_realm = self . realms . find_by_name ( satellite . realm )
if not sat_realm :
self . add_error ( "The %s '%s' is affected to an unknown realm: '%s'" % ( satellite . type , satellite . name , satellite . realm ) )
continue
# satellite . realm _ name = sat _ realm . get _ name ( )
logger . info ( "Tagging satellite '%s' with realm %s" , satellite . name , satellite . realm )
satellite . realm = sat_realm . uuid
satellite . realm_name = sat_realm . get_name ( )
# Alert for spare daemons
if getattr ( satellite , 'spare' , False ) :
self . add_warning ( "The %s '%s' is declared as a spare daemon. " "Spare mode is not yet implemented and it will be ignored." % ( satellite . type , satellite . name ) )
continue
# Alert for non active daemons
if not getattr ( satellite , 'active' , False ) :
self . add_warning ( "The %s '%s' is declared as a non active daemon. " "It will be ignored." % ( satellite . type , satellite . name ) )
continue
# And tell the realm that it knows the satellite
realm_satellites = getattr ( sat_realm , '%ss' % satellite . type )
if satellite . uuid not in realm_satellites :
realm_satellites . append ( satellite . uuid )
# If the satellite manages sub realms . . .
# We update the " potential _ " satellites that may be used for this realm
if satellite . manage_sub_realms :
for realm_uuid in sat_realm . all_sub_members :
logger . debug ( "Linkify %s '%s' with realm %s" , satellite . type , satellite . name , self . realms [ realm_uuid ] . get_name ( ) )
realm_satellites = getattr ( self . realms [ realm_uuid ] , 'potential_%ss' % satellite . type )
if satellite . uuid not in realm_satellites :
realm_satellites . append ( satellite . uuid )
# Parse hosts for realms and set host in the default realm if no realm is set
hosts_realms_names = set ( )
logger . debug ( "Hosts realm configuration:" )
for host in self . hosts :
if not getattr ( host , 'realm' , None ) : # todo : perharps checking hostgroups realm ( if any ) to set an hostgroup realm
# rather than the default realm
logger . debug ( "Host: %s, realm: %s, hostgroups: %s" , host . get_name ( ) , host . realm , host . hostgroups )
host . realm = self . realms . default . get_name ( )
host . got_default_realm = True
host_realm = self . realms . find_by_name ( host . realm )
if not host_realm :
self . add_error ( "The host '%s' is affected to an unknown realm: '%s'" % ( host . get_name ( ) , host . realm ) )
continue
host . realm_name = host_realm . get_name ( )
host_realm . add_members ( host . get_name ( ) )
logger . debug ( "- tagging host '%s' with realm %s" , host . get_name ( ) , host . realm_name )
hosts_realms_names . add ( host . realm_name )
logger . debug ( " - %s: realm %s, active %s, passive %s" , host . get_name ( ) , host_realm . get_name ( ) , host . active_checks_enabled , host . passive_checks_enabled )
host_realm . passively_checked_hosts = host_realm . passively_checked_hosts or host . passive_checks_enabled
host_realm . actively_checked_hosts = host_realm . actively_checked_hosts or host . passive_checks_enabled
hosts_realms_names . add ( host . realm )
# Parse hostgroups for realms and set hostgroup in the default realm if no realm is set
hostgroups_realms_names = set ( )
logger . debug ( "Hostgroups realm configuration:" )
for hostgroup in self . hostgroups :
if not getattr ( hostgroup , 'realm' , None ) :
hostgroup . realm = self . realms . default . get_name ( )
hostgroup . got_default_realm = True
hostgroup_realm = self . realms . find_by_name ( hostgroup . realm )
if not hostgroup_realm :
self . add_error ( "The hostgroup '%s' is affected to an unknown realm: '%s'" % ( hostgroup . get_name ( ) , hostgroup . realm ) )
continue
hostgroup . realm_name = hostgroup_realm . get_name ( )
hostgroup_realm . add_group_members ( hostgroup . get_name ( ) )
logger . debug ( "- tagging hostgroup '%s' with realm %s" , hostgroup . get_name ( ) , hostgroup . realm_name )
hostgroups_realms_names . add ( hostgroup . realm_name )
# Check that all daemons and realms are coherent
for satellites_list in [ self . pollers , self . brokers , self . reactionners , self . receivers , self . schedulers ] :
sat_class = satellites_list . inner_class
# Collect the names of all the realms that are managed by all the satellites
sat_realms_names = set ( )
for satellite in satellites_list :
for realm in self . realms :
realm_satellites = getattr ( realm , '%ss' % satellite . type )
realm_potential_satellites = getattr ( realm , 'potential_%ss' % satellite . type )
if satellite . uuid in realm_satellites or satellite . uuid in realm_potential_satellites :
sat_realms_names . add ( realm . get_name ( ) )
if not hosts_realms_names . issubset ( sat_realms_names ) : # Check if a daemon is able to manage the concerned hosts . . .
for realm_name in hosts_realms_names . difference ( sat_realms_names ) :
realm = self . realms . find_by_name ( realm_name )
self . add_warning ( "Some hosts exist in the realm '%s' but no %s is " "defined for this realm." % ( realm_name , sat_class . my_type ) )
if not alignak_launched :
continue
# Add a self - generated daemon
logger . warning ( "Adding a %s for the realm: %s" , satellite . type , realm_name )
new_daemon = sat_class ( { 'type' : satellite . type , 'name' : '%s-%s' % ( satellite . type , realm_name ) , 'alignak_launched' : True , 'missing_daemon' : True , 'realm' : realm . uuid , 'manage_sub_realms' : '0' , 'spare' : '0' , 'address' : '127.0.0.1' , 'port' : self . daemons_initial_port } )
satellites_list . add_item ( new_daemon )
# And tell the realm that it knows the satellite
realm_satellites = getattr ( realm , '%ss' % satellite . type )
if new_daemon . uuid not in realm_satellites :
realm_satellites . append ( new_daemon . uuid )
self . add_warning ( "Added a %s (%s, %s) for the realm '%s'" % ( satellite . type , '%s-%s' % ( satellite . type , realm_name ) , satellite . uri , realm_name ) )
self . daemons_initial_port = self . daemons_initial_port + 1
self . missing_daemons . append ( new_daemon )
logger . debug ( "Realms hosts configuration:" )
for realm in self . realms :
logger . debug ( "Realm: %s, actively checked hosts %s, passively checked hosts %s" , realm . get_name ( ) , realm . actively_checked_hosts , realm . passively_checked_hosts )
logger . info ( "Realm: %s, hosts: %s, groups: %s" , realm . get_name ( ) , realm . members , realm . group_members )
# Log all satellites list
logger . debug ( "Alignak definitive daemons list:" )
self . log_daemons_list ( )
|
def mkstemp ( suffix = "" , prefix = template , dir = None , text = False ) :
"""User - callable function to create and return a unique temporary
file . The return value is a pair ( fd , name ) where fd is the
file descriptor returned by os . open , and name is the filename .
If ' suffix ' is specified , the file name will end with that suffix ,
otherwise there will be no suffix .
If ' prefix ' is specified , the file name will begin with that prefix ,
otherwise a default prefix is used .
If ' dir ' is specified , the file will be created in that directory ,
otherwise a default directory is used .
If ' text ' is specified and true , the file is opened in text
mode . Else ( the default ) the file is opened in binary mode . On
some operating systems , this makes no difference .
The file is readable and writable only by the creating user ID .
If the operating system uses permission bits to indicate whether a
file is executable , the file is executable by no one . The file
descriptor is not inherited by children of this process .
Caller is responsible for deleting the file when done with it ."""
|
if dir is None :
dir = gettempdir ( )
if text :
flags = _text_openflags
else :
flags = _bin_openflags
return _mkstemp_inner ( dir , prefix , suffix , flags )
|
def check_if_this_file_exist ( filename ) :
"""Check if this file exist and if it ' s a directory
This function will check if the given filename
actually exists and if it ' s not a Directory
Arguments :
filename { string } - - filename
Return :
True : if it ' s not a directory and if this file exist
False : If it ' s not a file and if it ' s a directory"""
|
# get the absolute path
filename = os . path . abspath ( filename )
# Boolean
this_file_exist = os . path . exists ( filename )
a_directory = os . path . isdir ( filename )
result = this_file_exist and not a_directory
if result == False :
raise ValueError ( 'The filename given was either non existent or was a directory' )
else :
return result
|
def onMessageUnsent ( self , mid = None , author_id = None , thread_id = None , thread_type = None , ts = None , msg = None , ) :
"""Called when the client is listening , and someone unsends ( deletes for everyone ) a message
: param mid : ID of the unsent message
: param author _ id : The ID of the person who unsent the message
: param thread _ id : Thread ID that the action was sent to . See : ref : ` intro _ threads `
: param thread _ type : Type of thread that the action was sent to . See : ref : ` intro _ threads `
: param ts : A timestamp of the action
: param msg : A full set of the data recieved
: type thread _ type : models . ThreadType"""
|
log . info ( "{} unsent the message {} in {} ({}) at {}s" . format ( author_id , repr ( mid ) , thread_id , thread_type . name , ts / 1000 ) )
|
def switch_off ( self , * args ) :
"""Sets the state of the switch to False if off _ check ( ) returns True ,
given the arguments provided in kwargs .
: param kwargs : variable length dictionary of key - pair arguments
: return : Boolean . Returns True if the operation is successful"""
|
if self . off_check ( * args ) :
return self . _switch . switch ( False )
else :
return False
|
def check_header_validity ( header ) :
"""Verifies that header value is a string which doesn ' t contain
leading whitespace or return characters . This prevents unintended
header injection .
: param header : tuple , in the format ( name , value ) ."""
|
name , value = header
if isinstance ( value , bytes ) :
pat = _CLEAN_HEADER_REGEX_BYTE
else :
pat = _CLEAN_HEADER_REGEX_STR
try :
if not pat . match ( value ) :
raise InvalidHeader ( "Invalid return character or leading space in header: %s" % name )
except TypeError :
raise InvalidHeader ( "Value for header {%s: %s} must be of type str or " "bytes, not %s" % ( name , value , type ( value ) ) )
|
def joined ( name , host , user = 'rabbit' , ram_node = None , runas = 'root' ) :
'''Ensure the current node joined to a cluster with node user @ host
name
Irrelevant , not used ( recommended : user @ host )
user
The user of node to join to ( default : rabbit )
host
The host of node to join to
ram _ node
Join node as a RAM node
runas
The user to run the rabbitmq command as'''
|
ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
status = __salt__ [ 'rabbitmq.cluster_status' ] ( )
if '{0}@{1}' . format ( user , host ) in status :
ret [ 'comment' ] = 'Already in cluster'
return ret
if not __opts__ [ 'test' ] :
result = __salt__ [ 'rabbitmq.join_cluster' ] ( host , user , ram_node , runas = runas )
if 'Error' in result :
ret [ 'result' ] = False
ret [ 'comment' ] = result [ 'Error' ]
return ret
elif 'Join' in result :
ret [ 'comment' ] = result [ 'Join' ]
# If we ' ve reached this far before returning , we have changes .
ret [ 'changes' ] = { 'old' : '' , 'new' : '{0}@{1}' . format ( user , host ) }
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Node is set to join cluster {0}@{1}' . format ( user , host )
return ret
|
def Import ( context , request ) :
"""Read analysis results from an XML string"""
|
errors = [ ]
logs = [ ]
# Do import stuff here
logs . append ( "Generic XML Import is not available" )
results = { 'errors' : errors , 'log' : logs }
return json . dumps ( results )
|
def _get_package_manager ( self ) :
'''Get package manager .
: return :'''
|
ret = None
if self . __grains__ . get ( 'os_family' ) in ( 'Kali' , 'Debian' ) :
ret = 'apt-get'
elif self . __grains__ . get ( 'os_family' , '' ) == 'Suse' :
ret = 'zypper'
elif self . __grains__ . get ( 'os_family' , '' ) == 'redhat' :
ret = 'yum'
if ret is None :
raise InspectorKiwiProcessorException ( 'Unsupported platform: {0}' . format ( self . __grains__ . get ( 'os_family' ) ) )
return ret
|
def mangle_package_path ( self , files ) :
"""Mangle paths for post - UsrMove systems .
If the system implements UsrMove , all files will be in
' / usr / [ s ] bin ' . This method substitutes all the / [ s ] bin
references in the ' files ' list with ' / usr / [ s ] bin ' .
: param files : the list of package managed files"""
|
paths = [ ]
def transform_path ( path ) : # Some packages actually own paths in / bin : in this case ,
# duplicate the path as both the / and / usr version .
skip_paths = [ "/bin/rpm" , "/bin/mailx" ]
if path in skip_paths :
return ( path , os . path . join ( "/usr" , path [ 1 : ] ) )
return ( re . sub ( r'(^)(/s?bin)' , r'\1/usr\2' , path ) , )
if self . usrmove :
for f in files :
paths . extend ( transform_path ( f ) )
return paths
else :
return files
|
def bddvar ( name , index = None ) :
r"""Return a unique BDD variable .
A Boolean * variable * is an abstract numerical quantity that may assume any
value in the set : math : ` B = \ { 0 , 1 \ } ` .
The ` ` bddvar ` ` function returns a unique Boolean variable instance
represented by a binary decision diagram .
Variable instances may be used to symbolically construct larger BDDs .
A variable is defined by one or more * names * ,
and zero or more * indices * .
Multiple names establish hierarchical namespaces ,
and multiple indices group several related variables .
If the ` ` name ` ` parameter is a single ` ` str ` ` ,
it will be converted to ` ` ( name , ) ` ` .
The ` ` index ` ` parameter is optional ;
when empty , it will be converted to an empty tuple ` ` ( ) ` ` .
If the ` ` index ` ` parameter is a single ` ` int ` ` ,
it will be converted to ` ` ( index , ) ` ` .
Given identical names and indices , the ` ` bddvar ` ` function will always
return the same variable :
> > > bddvar ( ' a ' , 0 ) is bddvar ( ' a ' , 0)
True
To create several single - letter variables :
> > > a , b , c , d = map ( bddvar , ' abcd ' )
To create variables with multiple names ( inner - most first ) :
> > > fifo _ push = bddvar ( ( ' push ' , ' fifo ' ) )
> > > fifo _ pop = bddvar ( ( ' pop ' , ' fifo ' ) )
. . seealso : :
For creating arrays of variables with incremental indices ,
use the : func : ` pyeda . boolalg . bfarray . bddvars ` function ."""
|
bvar = boolfunc . var ( name , index )
try :
var = _VARS [ bvar . uniqid ]
except KeyError :
var = _VARS [ bvar . uniqid ] = BDDVariable ( bvar )
_BDDS [ var . node ] = var
return var
|
def reverseGeocode ( self , location ) :
"""The reverseGeocode operation determines the address at a particular
x / y location . You pass the coordinates of a point location to the
geocoding service , and the service returns the address that is
closest to the location .
Input :
location - either an Point object or a list defined as [ X , Y ]"""
|
params = { "f" : "json" }
url = self . _url + "/reverseGeocode"
if isinstance ( location , Point ) :
params [ 'location' ] = location . asDictionary
elif isinstance ( location , list ) :
params [ 'location' ] = "%s,%s" % ( location [ 0 ] , location [ 1 ] )
else :
raise Exception ( "Invalid location" )
return self . _post ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
|
def add_clink ( self , my_clink ) :
"""Adds a clink to the causalRelations layer
@ type my _ clink : L { Cclink }
@ param my _ clink : clink object"""
|
if self . causalRelations_layer is None :
self . causalRelations_layer = CcausalRelations ( )
self . root . append ( self . causalRelations_layer . get_node ( ) )
self . causalRelations_layer . add_clink ( my_clink )
|
def modify_process_property ( self , key , value , pid = None ) :
'''modify _ process _ property ( self , key , value , pid = None )
Modify process output property .
Please note that the process property key provided must be declared as an output property in the relevant service specification .
: Parameters :
* * key * ( ` String ` ) - - key of property to modify
* * key * ( ` value ` ) - - value of property to modify
* * pid * ( ` string ` ) - - Identifier of an existing process
: Example :
. . code - block : : python
pid = opereto _ client . create _ process ( service = ' simple _ shell _ command ' , title = ' Test simple shell command service ' )
opereto _ client . modify _ process _ property ( " my _ output _ param " , " 1 " , pid )'''
|
pid = self . _get_pid ( pid )
request_data = { "key" : key , "value" : value }
return self . _call_rest_api ( 'post' , '/processes/' + pid + '/output' , data = request_data , error = 'Failed to modify output property [%s]' % key )
|
def FromManagedObject ( self ) :
"""Method creates and returns an object of _ GenericMO class using the classId and other information from the
managed object ."""
|
import os
if ( isinstance ( self . mo , ManagedObject ) == True ) :
self . classId = self . mo . classId
if self . mo . getattr ( 'Dn' ) :
self . dn = self . mo . getattr ( 'Dn' )
if self . mo . getattr ( 'Rn' ) :
self . rn = self . mo . getattr ( 'Rn' )
elif self . dn :
self . rn = os . path . basename ( self . dn )
for property in UcsUtils . GetUcsPropertyMetaAttributeList ( self . mo . classId ) :
self . properties [ property ] = self . mo . getattr ( property )
if len ( self . mo . child ) :
for ch in self . mo . child :
if not ch . getattr ( 'Dn' ) :
_Dn = self . mo . getattr ( 'Dn' ) + "/" + ch . getattr ( 'Rn' )
ch . setattr ( 'Dn' , _Dn )
gmo = _GenericMO ( mo = ch )
self . child . append ( gmo )
|
def weave_layers ( infiles , output_file , log , context ) :
"""Apply text layer and / or image layer changes to baseline file
This is where the magic happens . infiles will be the main PDF to modify ,
and optional . text . pdf and . image - layer . pdf files , organized however ruffus
organizes them .
From . text . pdf , we copy the content stream ( which contains the Tesseract
OCR results ) , and rotate it into place . The first time we do this , we also
copy the GlyphlessFont , and then reference that font again .
For . image - layer . pdf , we check if this is a " pointer " to the original file ,
or a new file . If a new file , we replace the page and remember that we
replaced this page .
Every 100 open files , we save intermediate results , to avoid any resource
limits , since pikepdf / qpdf need to keep a lot of open file handles in the
background . When objects are copied from one file to another qpdf , qpdf
doesn ' t actually copy the data until asked to write , so all the resources
it may need to remain available .
For completeness , we set up a / ProcSet on every page , although it ' s
unlikely any PDF viewer cares about this anymore ."""
|
def input_sorter ( key ) :
try :
return page_number ( key )
except ValueError :
return - 1
flat_inputs = sorted ( flatten_groups ( infiles ) , key = input_sorter )
groups = groupby ( flat_inputs , key = input_sorter )
# Extract first item
_ , basegroup = next ( groups )
base = list ( basegroup ) [ 0 ]
path_base = Path ( base ) . resolve ( )
pdf_base = pikepdf . open ( path_base )
font , font_key , procset = None , None , None
pdfinfo = context . get_pdfinfo ( )
pagerefs = { }
procset = pdf_base . make_indirect ( pikepdf . Object . parse ( b'[ /PDF /Text /ImageB /ImageC /ImageI ]' ) )
replacements = 0
# Iterate rest
for page_num , layers in groups :
layers = list ( layers )
log . debug ( page_num )
log . debug ( layers )
text = next ( ( ii for ii in layers if ii . endswith ( '.text.pdf' ) ) , None )
image = next ( ( ii for ii in layers if ii . endswith ( '.image-layer.pdf' ) ) , None )
if text and not font :
font , font_key = _find_font ( text , pdf_base )
replacing = False
content_rotation = pdfinfo [ page_num - 1 ] . rotation
path_image = Path ( image ) . resolve ( ) if image else None
if path_image is not None and path_image != path_base : # We are replacing the old page with a rasterized PDF of the new
# page
log . debug ( "Replace" )
old_objgen = pdf_base . pages [ page_num - 1 ] . objgen
with pikepdf . open ( image ) as pdf_image :
replacements += 1
image_page = pdf_image . pages [ 0 ]
pdf_base . pages [ page_num - 1 ] = image_page
# We ' re adding a new page , which will get a new objgen number pair ,
# so we need to update any references to it . qpdf did not like
# my attempt to update the old object in place , but that is an
# option to consider
pagerefs [ old_objgen ] = pdf_base . pages [ page_num - 1 ] . objgen
replacing = True
autorotate_correction = context . get_rotation ( page_num - 1 )
if replacing :
content_rotation = autorotate_correction
text_rotation = autorotate_correction
text_misaligned = ( text_rotation - content_rotation ) % 360
log . debug ( '%r' , [ text_rotation , autorotate_correction , text_misaligned , content_rotation ] , )
if text and font : # Graft the text layer onto this page , whether new or old
strip_old = context . get_options ( ) . redo_ocr
_weave_layers_graft ( pdf_base = pdf_base , page_num = page_num , text = text , font = font , font_key = font_key , rotation = text_misaligned , procset = procset , strip_old_text = strip_old , log = log , )
# Correct the rotation if applicable
pdf_base . pages [ page_num - 1 ] . Rotate = ( content_rotation - autorotate_correction ) % 360
if replacements % MAX_REPLACE_PAGES == 0 : # Periodically save and reload the Pdf object . This will keep a
# lid on our memory usage for very large files . Attach the font to
# page 1 even if page 1 doesn ' t use it , so we have a way to get it
# back .
# TODO refactor this to outside the loop
page0 = pdf_base . pages [ 0 ]
_update_page_resources ( page = page0 , font = font , font_key = font_key , procset = procset )
interim = output_file + f'_working{page_num}.pdf'
pdf_base . save ( interim )
pdf_base . close ( )
pdf_base = pikepdf . open ( interim )
procset = pdf_base . pages [ 0 ] . Resources . ProcSet
font , font_key = None , None
# Reacquire this information
_fix_toc ( pdf_base , pagerefs , log )
pdf_base . save ( output_file )
pdf_base . close ( )
|
def get_coiledcoil_region ( self , cc_number = 0 , cutoff = 7.0 , min_kihs = 2 ) :
"""Assembly containing only assigned regions ( i . e . regions with contiguous KnobsIntoHoles ."""
|
g = self . filter_graph ( self . graph , cutoff = cutoff , min_kihs = min_kihs )
ccs = sorted ( networkx . connected_component_subgraphs ( g , copy = True ) , key = lambda x : len ( x . nodes ( ) ) , reverse = True )
cc = ccs [ cc_number ]
helices = [ x for x in g . nodes ( ) if x . number in cc . nodes ( ) ]
assigned_regions = self . get_assigned_regions ( helices = helices , include_alt_states = False , complementary_only = True )
coiledcoil_monomers = [ h . get_slice_from_res_id ( * assigned_regions [ h . number ] ) for h in helices ]
return Assembly ( coiledcoil_monomers )
|
def create_alias ( FunctionName , Name , FunctionVersion , Description = "" , region = None , key = None , keyid = None , profile = None ) :
'''Given a valid config , create an alias to a function .
Returns { created : true } if the alias was created and returns
{ created : False } if the alias was not created .
CLI Example :
. . code - block : : bash
salt myminion boto _ lamba . create _ alias my _ function my _ alias $ LATEST " An alias "'''
|
try :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
alias = conn . create_alias ( FunctionName = FunctionName , Name = Name , FunctionVersion = FunctionVersion , Description = Description )
if alias :
log . info ( 'The newly created alias name is %s' , alias [ 'Name' ] )
return { 'created' : True , 'name' : alias [ 'Name' ] }
else :
log . warning ( 'Alias was not created' )
return { 'created' : False }
except ClientError as e :
return { 'created' : False , 'error' : __utils__ [ 'boto3.get_error' ] ( e ) }
|
def extend_columns ( self , column_data , kind ) :
"""Extend column metadata
: param column _ data : list of ( rel _ name , column _ name ) tuples
: param kind : either ' tables ' or ' views '
: return :"""
|
# ' column _ data ' is a generator object . It can throw an exception while
# being consumed . This could happen if the user has launched the app
# without specifying a database name . This exception must be handled to
# prevent crashing .
try :
column_data = [ self . escaped_names ( d , '"' ) for d in column_data ]
except Exception :
column_data = [ ]
metadata = self . dbmetadata [ kind ]
for relname , column in column_data :
metadata [ self . dbname ] [ relname ] . append ( column )
self . all_completions . add ( column )
|
def reshape ( self , shape : tf . TensorShape ) -> 'TensorFluent' :
'''Returns a TensorFluent for the reshape operation with given ` shape ` .
Args :
shape : The output ' s shape .
Returns :
A TensorFluent wrapping the reshape operation .'''
|
t = tf . reshape ( self . tensor , shape )
scope = self . scope . as_list ( )
batch = self . batch
return TensorFluent ( t , scope , batch = batch )
|
def step_worker ( step , pipe , max_entities ) :
"""All messages follow the form : < message > , < data >
Valid messages
run , < input _ data >
finalise , None
next , None
stop , None"""
|
state = None
while True :
message , input = pipe . recv ( )
if message == 'run' :
state = step . run ( input , max_entities )
elif message == 'finalise' :
state = step . finalise ( max_entities )
elif message == 'next' :
try :
data = state . next ( )
sys . stderr . write ( ' {}\n' . format ( step . name ) )
sys . stderr . write ( ' * {}\n' . format ( ', ' . join ( key . name for key in data ) ) )
sys . stderr . write ( ' * {}\n' . format ( ', ' . join ( str ( value ) for value in data . values ( ) ) ) )
pipe . send ( ( 'data' , { 'step' : step , 'data' : data } ) )
except StopIteration :
pipe . send ( ( 'stop' , { 'step' : step } ) )
state = None
elif message == 'stop' :
break
|
def revnet_step ( name , x , hparams , reverse = True ) :
"""One step of glow generative flow .
Actnorm + invertible 1X1 conv + affine _ coupling .
Args :
name : used for variable scope .
x : input
hparams : coupling _ width is the only hparam that is being used in
this function .
reverse : forward or reverse pass .
Returns :
z : Output of one step of reversible flow ."""
|
with tf . variable_scope ( name , reuse = tf . AUTO_REUSE ) :
if hparams . coupling == "additive" :
coupling_layer = functools . partial ( additive_coupling , name = "additive" , reverse = reverse , mid_channels = hparams . coupling_width , activation = hparams . activation , dropout = hparams . coupling_dropout )
else :
coupling_layer = functools . partial ( affine_coupling , name = "affine" , reverse = reverse , mid_channels = hparams . coupling_width , activation = hparams . activation , dropout = hparams . coupling_dropout )
ops = [ functools . partial ( actnorm , name = "actnorm" , reverse = reverse ) , functools . partial ( invertible_1x1_conv , name = "invertible" , reverse = reverse ) , coupling_layer ]
if reverse :
ops = ops [ : : - 1 ]
objective = 0.0
for op in ops :
x , curr_obj = op ( x = x )
objective += curr_obj
return x , objective
|
def new_message_email ( sender , instance , signal , subject_prefix = _ ( u'New Message: %(subject)s' ) , template_name = "django_messages/new_message.html" , default_protocol = None , * args , ** kwargs ) :
"""This function sends an email and is called via Django ' s signal framework .
Optional arguments :
` ` template _ name ` ` : the template to use
` ` subject _ prefix ` ` : prefix for the email subject .
` ` default _ protocol ` ` : default protocol in site URL passed to template"""
|
if default_protocol is None :
default_protocol = getattr ( settings , 'DEFAULT_HTTP_PROTOCOL' , 'http' )
if 'created' in kwargs and kwargs [ 'created' ] :
try :
current_domain = Site . objects . get_current ( ) . domain
subject = subject_prefix % { 'subject' : instance . subject }
message = render_to_string ( template_name , { 'site_url' : '%s://%s' % ( default_protocol , current_domain ) , 'message' : instance , } )
if instance . recipient . email != "" :
send_mail ( subject , message , settings . DEFAULT_FROM_EMAIL , [ instance . recipient . email , ] )
except Exception as e : # print e
pass
|
def strictjoin ( L , keycols , nullvals = None , renaming = None , Names = None ) :
"""Combine two or more numpy ndarray with structured dtypes on common key
column ( s ) .
Merge a list ( or dictionary ) of numpy arrays , given by ` L ` , on key
columns listed in ` keycols ` .
The ` ` strictjoin ` ` assumes the following restrictions :
* each element of ` keycol ` must be a valid column name in ` X ` and each
array in ` L ` , and all of the same data - type .
* for each column ` col ` in ` keycols ` , and each array ` A ` in ` L ` , the
values in ` A [ col ] ` must be unique , e . g . no repeats of values - - and
same for ` X [ col ] ` . ( Actually , the uniqueness criterion need not hold
to the first tabarray in L , but first for all the subsequent ones . )
* the * non * - key - column column names in each of the arrays must be
disjoint from each other - - or disjoint after a renaming ( see below ) .
An error will be thrown if these conditions are not met .
For a wrapper that attempts to meet these restrictions , see
: func : ` tabular . spreadsheet . join ` .
If you don ' t provide a value of ` keycols ` , the algorithm will attempt to
infer which columns should be used by trying to find the largest set of
common column names that contain unique values in each array and have the
same data type . An error will be thrown if no such inference can be made .
* Renaming of overlapping columns *
If the non - keycol column names of the arrays overlap , ` ` join ` ` will
by default attempt to rename the columns by using a simple
convention :
* If ` L ` is a list , it will append the number in the list to the
key associated with the array .
* If ` L ` is a dictionary , the algorithm will append the string
representation of the key associated with an array to the
overlapping columns from that array .
You can override the default renaming scheme using the ` renamer `
parameter .
* Nullvalues for keycolumn differences *
If there are regions of the keycolumns that are not overlapping
between merged arrays , ` join ` will fill in the relevant entries
with null values chosen by default :
* ' 0 ' for integer columns
* ' 0.0 ' for float columns
* the empty character ( ' ' ) for string columns .
* * Parameters * *
* * L * * : list or dictionary
Numpy recarrays to merge . If ` L ` is a dictionary , the keys
name each numpy recarray , and the corresponding values are
the actual numpy recarrays .
* * keycols * * : list of strings
List of the names of the key columns along which to do the
merging .
* * nullvals * * : function , optional
A function that returns a null value for a numpy format
descriptor string , e . g . ` ` ' < i4 ' ` ` or ` ` ' | S5 ' ` ` .
See the default function for further documentation :
: func : ` tabular . spreadsheet . DEFAULT _ NULLVALUEFORMAT `
* * renaming * * : dictionary of dictionaries , optional
Dictionary mapping each input numpy recarray to a
dictionary mapping each original column name to its new
name following the convention above .
For example , the result returned by :
: func : ` tabular . spreadsheet . DEFAULT _ RENAMER `
* * Returns * *
* * result * * : numpy ndarray with structured dtype
Result of the join , e . g . the result of merging the input
numpy arrays defined in ` L ` on the key columns listed in
` keycols ` .
* * See Also : * *
: func : ` tabular . spreadsheet . join `"""
|
if isinstance ( L , dict ) :
Names = L . keys ( )
LL = L . values ( )
else :
if Names == None :
Names = range ( len ( L ) )
else :
assert len ( Names ) == len ( L )
LL = L
if isinstance ( keycols , str ) :
keycols = [ l . strip ( ) for l in keycols . split ( ',' ) ]
assert all ( [ set ( keycols ) <= set ( l . dtype . names ) for l in LL ] ) , ( 'keycols,' , str ( keycols ) , ', must be valid column names in all arrays being merged.' )
assert all ( [ isunique ( l [ keycols ] ) for l in LL [ 1 : ] ] ) , ( 'values in keycol columns,' , str ( keycols ) , ', must be unique in all arrays being merged.' )
if renaming == None :
renaming = { }
assert RenamingIsInCorrectFormat ( renaming , L , Names = Names ) , 'renaming is not in proper format ... '
L = dict ( [ ( k , ll . copy ( ) ) for ( k , ll ) in zip ( Names , LL ) ] )
LL = L . values ( )
for i in Names :
l = L [ i ]
l . dtype = np . dtype ( l . dtype . descr )
if i in renaming . keys ( ) :
for k in renaming [ i ] . keys ( ) :
if k not in keycols :
renamecol ( L [ i ] , k , renaming [ i ] [ k ] )
l . sort ( order = keycols )
commons = set ( Commons ( [ l . dtype . names for l in LL ] ) ) . difference ( keycols )
assert len ( commons ) == 0 , ( 'The following (non-keycol) column names ' 'appear in more than on array being merged:' , str ( commons ) )
Result = colstack ( [ ( L [ Names [ 0 ] ] [ keycols ] ) [ 0 : 0 ] ] + [ deletecols ( L [ k ] [ 0 : 0 ] , keycols ) for k in Names if deletecols ( L [ k ] [ 0 : 0 ] , keycols ) != None ] )
PL = powerlist ( Names )
ToGet = utils . listunion ( [ [ p for p in PL if len ( p ) == k ] for k in range ( 1 , len ( Names ) ) ] ) + [ PL [ - 1 ] ]
for I in ToGet [ : : - 1 ] :
Ref = L [ I [ 0 ] ] [ keycols ]
for j in I [ 1 : ] :
if len ( Ref ) > 0 :
Ref = Ref [ fast . recarrayisin ( Ref , L [ j ] [ keycols ] , weak = True ) ]
else :
break
if len ( Ref ) > 0 :
D = [ fast . recarrayisin ( L [ j ] [ keycols ] , Ref , weak = True ) for j in I ]
Ref0 = L [ I [ 0 ] ] [ keycols ] [ D [ 0 ] ]
Reps0 = np . append ( np . append ( [ - 1 ] , ( Ref0 [ 1 : ] != Ref0 [ : - 1 ] ) . nonzero ( ) [ 0 ] ) , [ len ( Ref0 ) - 1 ] )
Reps0 = Reps0 [ 1 : ] - Reps0 [ : - 1 ]
NewRows = colstack ( [ Ref0 ] + [ deletecols ( L [ j ] [ D [ i ] ] , keycols ) . repeat ( Reps0 ) if i > 0 else deletecols ( L [ j ] [ D [ i ] ] , keycols ) for ( i , j ) in enumerate ( I ) if deletecols ( L [ j ] [ D [ i ] ] , keycols ) != None ] )
for ( i , j ) in enumerate ( I ) :
L [ j ] = L [ j ] [ np . invert ( D [ i ] ) ]
Result = rowstack ( [ Result , NewRows ] , mode = 'nulls' , nullvals = nullvals )
return Result
|
def from_fault_data ( cls , edges , mesh_spacing ) :
"""Create and return a fault surface using fault source data .
: param edges :
A list of at least two horizontal edges of the surface
as instances of : class : ` openquake . hazardlib . geo . line . Line ` . The
list should be in top - to - bottom order ( the shallowest edge first ) .
: param mesh _ spacing :
Distance between two subsequent points in a mesh , in km .
: returns :
An instance of : class : ` ComplexFaultSurface ` created using
that data .
: raises ValueError :
If requested mesh spacing is too big for the surface geometry
( doesn ' t allow to put a single mesh cell along length and / or
width ) .
Uses : meth : ` check _ fault _ data ` for checking parameters ."""
|
cls . check_fault_data ( edges , mesh_spacing )
surface_nodes = [ complex_fault_node ( edges ) ]
mean_length = numpy . mean ( [ edge . get_length ( ) for edge in edges ] )
num_hor_points = int ( round ( mean_length / mesh_spacing ) ) + 1
if num_hor_points <= 1 :
raise ValueError ( 'mesh spacing %.1f km is too big for mean length %.1f km' % ( mesh_spacing , mean_length ) )
edges = [ edge . resample_to_num_points ( num_hor_points ) . points for i , edge in enumerate ( edges ) ]
vert_edges = [ Line ( v_edge ) for v_edge in zip ( * edges ) ]
mean_width = numpy . mean ( [ v_edge . get_length ( ) for v_edge in vert_edges ] )
num_vert_points = int ( round ( mean_width / mesh_spacing ) ) + 1
if num_vert_points <= 1 :
raise ValueError ( 'mesh spacing %.1f km is too big for mean width %.1f km' % ( mesh_spacing , mean_width ) )
points = zip ( * [ v_edge . resample_to_num_points ( num_vert_points ) . points for v_edge in vert_edges ] )
mesh = RectangularMesh . from_points_list ( list ( points ) )
assert 1 not in mesh . shape
self = cls ( mesh )
self . surface_nodes = surface_nodes
return self
|
def poissonSpikeGenerator ( firingRate , nBins , nTrials ) :
"""Generates a Poisson spike train .
@ param firingRate ( int ) firing rate of sample of Poisson spike trains to be generated
@ param nBins ( int ) number of bins or timesteps for the Poisson spike train
@ param nTrials ( int ) number of trials ( or cells ) in the spike train
@ return poissonSpikeTrain ( array )"""
|
dt = 0.001
# we are simulating a ms as a single bin in a vector , ie 1sec = 1000bins
poissonSpikeTrain = np . zeros ( ( nTrials , nBins ) , dtype = "uint32" )
for i in range ( nTrials ) :
for j in range ( int ( nBins ) ) :
if random . random ( ) < firingRate * dt :
poissonSpikeTrain [ i , j ] = 1
return poissonSpikeTrain
|
def check_for_duplicate_comment ( self , new ) :
"""Check that a submitted comment isn ' t a duplicate . This might be caused
by someone posting a comment twice . If it is a dup , silently return the * previous * comment ."""
|
possible_duplicates = self . get_comment_model ( ) . _default_manager . using ( self . target_object . _state . db ) . filter ( content_type = new . content_type , object_pk = new . object_pk )
for old in possible_duplicates :
if old . post_date . date ( ) == new . post_date . date ( ) and old . text == new . text :
return old
return new
|
def setInputFormatText ( self , text ) :
"""Sets the input format text for this widget to the given value .
: param text | < str >"""
|
try :
self . _inputFormat = XLineEdit . InputFormat [ nativestring ( text ) ]
except KeyError :
pass
|
def add_lfn ( self , lfn ) :
"""Add an LFN table to a parsed LIGO _ LW XML document .
lfn = lfn to be added"""
|
if len ( self . table [ 'process' ] [ 'stream' ] ) > 1 :
msg = "cannot add lfn to table with more than one process"
raise LIGOLwParseError , msg
# get the process _ id from the process table
pid_col = self . table [ 'process' ] [ 'orderedcol' ] . index ( 'process_id' )
pid = self . table [ 'process' ] [ 'stream' ] [ 0 ] [ pid_col ]
try :
self . table [ 'lfn' ] [ 'stream' ] . append ( ( pid , lfn ) )
except KeyError :
self . table [ 'lfn' ] = { 'pos' : 0 , 'column' : { 'process_id' : 'ilwd:char' , 'name' : 'lstring' } , 'stream' : [ ( pid , lfn ) ] , 'query' : '' , 'orderedcol' : [ 'process_id' , 'name' ] }
|
def get_task_fs ( self , courseid , taskid ) :
""": param courseid : the course id of the course
: param taskid : the task id of the task
: raise InvalidNameException
: return : A FileSystemProvider to the folder containing the task files"""
|
if not id_checker ( courseid ) :
raise InvalidNameException ( "Course with invalid name: " + courseid )
if not id_checker ( taskid ) :
raise InvalidNameException ( "Task with invalid name: " + taskid )
return self . _filesystem . from_subfolder ( courseid ) . from_subfolder ( taskid )
|
def avail_images ( call = None ) :
'''Return a list of the images that are on the provider'''
|
if call == 'action' :
raise SaltCloudSystemExit ( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' )
items = query ( action = 'template' )
ret = { }
for item in items :
ret [ item . attrib [ 'name' ] ] = item . attrib
return ret
|
def getVersionFromArchiveId ( git_archive_id = '$Format:%ct %d$' ) :
"""Extract the tag if a source is from git archive .
When source is exported via ` git archive ` , the git _ archive _ id init value is modified
and placeholders are expanded to the " archived " revision :
% ct : committer date , UNIX timestamp
% d : ref names , like the - - decorate option of git - log
See man gitattributes ( 5 ) and git - log ( 1 ) ( PRETTY FORMATS ) for more details ."""
|
# mangle the magic string to make sure it is not replaced by git archive
if not git_archive_id . startswith ( '$For' 'mat:' ) : # source was modified by git archive , try to parse the version from
# the value of git _ archive _ id
match = re . search ( r'tag:\s*v([^,)]+)' , git_archive_id )
if match : # archived revision is tagged , use the tag
return gitDescribeToPep440 ( match . group ( 1 ) )
# archived revision is not tagged , use the commit date
tstamp = git_archive_id . strip ( ) . split ( ) [ 0 ]
d = datetime . datetime . utcfromtimestamp ( int ( tstamp ) )
return d . strftime ( '%Y.%m.%d' )
return None
|
def objectprep ( self ) :
"""Creates fastq files from an in - progress Illumina MiSeq run or create an object and moves files appropriately"""
|
# Create . fastq files if necessary . Otherwise create the metadata object
if self . bcltofastq :
if self . customsamplesheet :
assert os . path . isfile ( self . customsamplesheet ) , 'Cannot find custom sample sheet as specified {}' . format ( self . customsamplesheet )
# Create the FASTQ files
self . samples = fastqCreator . CreateFastq ( self )
# Create a dictionary of the object
samples_dict = vars ( self . samples )
# Extract the required information from the dictionary
self . index = samples_dict [ 'index' ]
self . index_length = samples_dict [ 'indexlength' ]
self . forward = samples_dict [ 'forwardlength' ]
self . reverse = samples_dict [ 'reverselength' ]
self . forwardlength = samples_dict [ 'forward' ]
self . reverselength = samples_dict [ 'reverse' ]
self . header = samples_dict [ 'header' ]
else :
self . samples = createObject . ObjectCreation ( self )
|
def parse_tokens ( tokens ) :
'''Read tokens strings into ( is _ flag , value ) tuples :
For this value of ` tokens ` :
[ ' - f ' , ' pets . txt ' , ' - v ' , ' cut ' , ' - cz ' , ' - - lost ' , ' - - delete = sam ' , ' - - ' , ' lester ' , ' jack ' ]
` flatten ( tokens ) ` yields an iterable :
( True , ' f ' ) ,
( False , ' pets . txt ' ) ,
( True , ' v ' ) ,
( False , ' cut ' ) ,
( True , ' c ' ) ,
( True , ' z ' ) ,
( True , ' lost ' ) ,
( True , ' delete ' ) ,
( False , ' sam ' ) ,
( False , ' lester ' ) ,
( False , ' jack ' ) ,
Todo :
ensure that ' verbose ' in ' - - verbose - - a b c ' is treated as a boolean even if not marked as one .'''
|
# one pass max
tokens = iter ( tokens )
for token in tokens :
if token == '--' : # bleed out tokens without breaking , since tokens is an iterator
for token in tokens :
yield False , token
elif token . startswith ( '-' ) : # this handles both - - last = man . txt and - czf = file . tgz
# str . partition produces a 3 - tuple whether or not the separator is found
token , sep , value = token . partition ( '=' )
for flag in split_flag_token ( token ) :
yield True , flag
if sep : # we don ' t re - flatten the ' value ' from ' - - token = value '
yield False , value
else :
yield False , token
|
def bait ( self , maskmiddle = 'f' , k = '19' ) :
"""Use bbduk to perform baiting
: param maskmiddle : boolean argument treat the middle base of a kmer as a wildcard ; increases sensitivity
in the presence of errors .
: param k : keyword argument for length of kmers to use in the analyses"""
|
logging . info ( 'Performing kmer baiting of fastq files with {at} targets' . format ( at = self . analysistype ) )
# There seems to be some sort of issue with java incorrectly calculating the total system memory on certain
# computers . For now , calculate the memory , and feed it into the bbduk call
if self . kmer_size is None :
kmer = k
else :
kmer = self . kmer_size
with progressbar ( self . runmetadata ) as bar :
for sample in bar :
if sample . general . bestassemblyfile != 'NA' and sample [ self . analysistype ] . runanalysis : # Create the folder ( if necessary )
make_path ( sample [ self . analysistype ] . outputdir )
# Make the system call
if len ( sample . general . fastqfiles ) == 2 : # Create the command to run the baiting - paired inputs and a single , zipped output
sample [ self . analysistype ] . bbdukcmd = 'bbduk.sh -Xmx{mem} ref={ref} in1={in1} in2={in2} k={kmer} maskmiddle={mm} ' 'threads={c} outm={om}' . format ( mem = self . mem , ref = sample [ self . analysistype ] . baitfile , in1 = sample . general . trimmedcorrectedfastqfiles [ 0 ] , in2 = sample . general . trimmedcorrectedfastqfiles [ 1 ] , kmer = kmer , mm = maskmiddle , c = str ( self . cpus ) , om = sample [ self . analysistype ] . baitedfastq )
else :
sample [ self . analysistype ] . bbdukcmd = 'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} maskmiddle={mm} ' 'threads={cpus} outm={outm}' . format ( mem = self . mem , ref = sample [ self . analysistype ] . baitfile , in1 = sample . general . trimmedcorrectedfastqfiles [ 0 ] , kmer = kmer , mm = maskmiddle , cpus = str ( self . cpus ) , outm = sample [ self . analysistype ] . baitedfastq )
# Run the system call ( if necessary )
if not os . path . isfile ( sample [ self . analysistype ] . baitedfastq ) :
out , err = run_subprocess ( sample [ self . analysistype ] . bbdukcmd )
write_to_logfile ( sample [ self . analysistype ] . bbdukcmd , sample [ self . analysistype ] . bbdukcmd , self . logfile , sample . general . logout , sample . general . logerr , sample [ self . analysistype ] . logout , sample [ self . analysistype ] . logerr )
write_to_logfile ( out , err , self . logfile , sample . general . logout , sample . general . logerr , sample [ self . analysistype ] . logout , sample [ self . analysistype ] . logerr )
|
def update_delisting ( self , num_iid , session ) :
'''taobao . item . update . delisting 商品下架
单个商品下架
输入的num _ iid必须属于当前会话用户'''
|
request = TOPRequest ( 'taobao.item.update.delisting' )
request [ 'num_iid' ] = num_iid
self . create ( self . execute ( request , session ) [ 'item' ] )
return self
|
def load_L3G_arduino ( filename , remove_begin_spurious = False , return_parser = False ) :
"Load gyro data collected by the arduino version of the L3G logging platform , and return the data ( in rad / s ) , a time vector , and the sample rate ( seconds )"
|
file_data = open ( filename , 'rb' ) . read ( )
parser = L3GArduinoParser ( )
parser . parse ( file_data [ 7 : ] )
# Skip first " GYROLOG " header in file
data = parser . data
if parser . actual_data_rate :
T = 1. / parser . actual_data_rate
print ( "Found measured data rate %.3f ms (%.3f Hz)" % ( 1000 * T , 1. / T ) )
else :
T = 1. / parser . data_rate
print ( "Using data rate provided by gyro (probably off by a few percent!) %.3f ms (%.3f Hz)" % ( 1000 * T , 1. / T ) )
N = parser . data . shape [ 1 ]
t = np . linspace ( 0 , T * N , num = data . shape [ 1 ] )
print ( t . shape , data . shape )
print ( "Loaded %d samples (%.2f seconds) with expected sample rate %.3f ms (%.3f Hz)" % ( N , t [ - 1 ] , T * 1000.0 , 1. / T ) )
try :
print ( "Actual sample rate is %.3f ms (%.3f Hz)" % ( 1000. / parser . actual_data_rate , parser . actual_data_rate , ) )
except TypeError :
pass
if remove_begin_spurious :
to_remove = int ( 0.3 / T )
# Remove first three tenth of second
data [ : , : to_remove ] = 0.0
if return_parser :
return np . deg2rad ( data ) , t , T , parser
else :
return np . deg2rad ( data ) , t , T
|
def _byte_encode ( self , token ) :
"""Encode a single token byte - wise into integer ids ."""
|
# Vocab ids for all bytes follow ids for the subwords
offset = len ( self . _subwords )
if token == "_" :
return [ len ( self . _subwords ) + ord ( " " ) ]
return [ i + offset for i in list ( bytearray ( tf . compat . as_bytes ( token ) ) ) ]
|
def autobuild_doxygen ( tile ) :
"""Generate documentation for firmware in this module using doxygen"""
|
iotile = IOTile ( '.' )
doxydir = os . path . join ( 'build' , 'doc' )
doxyfile = os . path . join ( doxydir , 'doxygen.txt' )
outfile = os . path . join ( doxydir , '%s.timestamp' % tile . unique_id )
env = Environment ( ENV = os . environ , tools = [ ] )
env [ 'IOTILE' ] = iotile
# There is no / dev / null on Windows
if platform . system ( ) == 'Windows' :
action = 'doxygen %s > NUL' % doxyfile
else :
action = 'doxygen %s > /dev/null' % doxyfile
Alias ( 'doxygen' , doxydir )
env . Clean ( outfile , doxydir )
inputfile = doxygen_source_path ( )
env . Command ( doxyfile , inputfile , action = env . Action ( lambda target , source , env : generate_doxygen_file ( str ( target [ 0 ] ) , iotile ) , "Creating Doxygen Config File" ) )
env . Command ( outfile , doxyfile , action = env . Action ( action , "Building Firmware Documentation" ) )
|
def _mmInit ( self ) :
"""Create the minimum match dictionary of keys"""
|
# cache references to speed up loop a bit
mmkeys = { }
mmkeysGet = mmkeys . setdefault
minkeylength = self . minkeylength
for key in self . data . keys ( ) : # add abbreviations as short as minkeylength
# always add at least one entry ( even for key = " " )
lenkey = len ( key )
start = min ( minkeylength , lenkey )
for i in range ( start , lenkey + 1 ) :
mmkeysGet ( key [ 0 : i ] , [ ] ) . append ( key )
self . mmkeys = mmkeys
|
def clip_upper ( self , threshold , axis = None , inplace = False ) :
"""Trim values above a given threshold .
. . deprecated : : 0.24.0
Use clip ( upper = threshold ) instead .
Elements above the ` threshold ` will be changed to match the
` threshold ` value ( s ) . Threshold can be a single value or an array ,
in the latter case it performs the truncation element - wise .
Parameters
threshold : numeric or array - like
Maximum value allowed . All values above threshold will be set to
this value .
* float : every value is compared to ` threshold ` .
* array - like : The shape of ` threshold ` should match the object
it ' s compared to . When ` self ` is a Series , ` threshold ` should be
the length . When ` self ` is a DataFrame , ` threshold ` should 2 - D
and the same shape as ` self ` for ` ` axis = None ` ` , or 1 - D and the
same length as the axis being compared .
axis : { 0 or ' index ' , 1 or ' columns ' } , default 0
Align object with ` threshold ` along the given axis .
inplace : bool , default False
Whether to perform the operation in place on the data .
. . versionadded : : 0.21.0
Returns
Series or DataFrame
Original data with values trimmed .
See Also
Series . clip : General purpose method to trim Series values to given
threshold ( s ) .
DataFrame . clip : General purpose method to trim DataFrame values to
given threshold ( s ) .
Examples
> > > s = pd . Series ( [ 1 , 2 , 3 , 4 , 5 ] )
0 1
1 2
2 3
3 4
4 5
dtype : int64
> > > s . clip ( upper = 3)
0 1
1 2
2 3
3 3
4 3
dtype : int64
> > > elemwise _ thresholds = [ 5 , 4 , 3 , 2 , 1]
> > > elemwise _ thresholds
[5 , 4 , 3 , 2 , 1]
> > > s . clip ( upper = elemwise _ thresholds )
0 1
1 2
2 3
3 2
4 1
dtype : int64"""
|
warnings . warn ( 'clip_upper(threshold) is deprecated, ' 'use clip(upper=threshold) instead' , FutureWarning , stacklevel = 2 )
return self . _clip_with_one_bound ( threshold , method = self . le , axis = axis , inplace = inplace )
|
def merge_dicts ( dict1 , dict2 , deep_merge = True ) :
"""Merge dict2 into dict1."""
|
if deep_merge :
if isinstance ( dict1 , list ) and isinstance ( dict2 , list ) :
return dict1 + dict2
if not isinstance ( dict1 , dict ) or not isinstance ( dict2 , dict ) :
return dict2
for key in dict2 :
dict1 [ key ] = merge_dicts ( dict1 [ key ] , dict2 [ key ] ) if key in dict1 else dict2 [ key ]
# noqa pylint : disable = line - too - long
return dict1
dict3 = dict1 . copy ( )
dict3 . update ( dict2 )
return dict3
|
def compute_volume ( sizes , centers , normals ) :
"""Compute the numerical volume of a convex mesh
: parameter array sizes : array of sizes of triangles
: parameter array centers : array of centers of triangles ( x , y , z )
: parameter array normals : array of normals of triangles ( will normalize if not already )
: return : the volume ( float )"""
|
# the volume of a slanted triangular cone is A _ triangle * ( r _ vec dot norm _ vec ) / 3.
# TODO : implement normalizing normals into meshing routines ( or at least have them supply normal _ mags to the mesh )
# TODO : remove this function - should now be returned by the meshing algorithm itself
# although wd method may currently use this
normal_mags = np . linalg . norm ( normals , axis = 1 )
# np . sqrt ( ( normals * * 2 ) . sum ( axis = 1 ) )
return np . sum ( sizes * ( ( centers * normals ) . sum ( axis = 1 ) / normal_mags ) / 3 )
|
def is_unit_or_unitstring ( value ) :
"""must be an astropy . unit"""
|
if is_unit ( value ) [ 0 ] :
return True , value
try :
unit = units . Unit ( value )
except :
return False , value
else :
return True , unit
|
def all ( self , ** kwargs ) :
"""List all the members , included inherited ones .
Args :
all ( bool ) : If True , return all the items , without pagination
per _ page ( int ) : Number of items to retrieve per request
page ( int ) : ID of the page to return ( starts with page 1)
as _ list ( bool ) : If set to False and no pagination option is
defined , return a generator instead of a list
* * kwargs : Extra options to send to the server ( e . g . sudo )
Raises :
GitlabAuthenticationError : If authentication is not correct
GitlabListError : If the list could not be retrieved
Returns :
RESTObjectList : The list of members"""
|
path = '%s/all' % self . path
obj = self . gitlab . http_list ( path , ** kwargs )
return [ self . _obj_cls ( self , item ) for item in obj ]
|
def setCurrentNode ( self , node ) :
"""Sets the currently selected node in the scene . If multiple nodes are selected , then the last one selected \
will be considered the current one .
: param node | < XNode > | | None"""
|
self . blockSignals ( True )
self . clearSelection ( )
if node :
node . setSelected ( True )
self . blockSignals ( False )
|
def get_binaries ( ) :
"""Download and return paths of all platform - specific binaries"""
|
paths = [ ]
for arp in [ False , True ] :
paths . append ( get_binary ( arp = arp ) )
return paths
|
def display_all ( self ) :
"""Print detailed information ."""
|
print ( ( "total elapse %0.6f seconds, last elapse %0.6f seconds, " "took %s times measurement" ) % ( self . total_elapse , self . elapse , len ( self . records ) ) )
|
def uri_with ( uri , scheme = None , netloc = None , path = None , params = None , query = None , fragment = None ) :
"""Return a URI with the given part ( s ) replaced .
Parts are decoded / encoded ."""
|
old_scheme , old_netloc , old_path , old_params , old_query , old_fragment = urlparse ( uri )
path , _netloc = _normalize_win_path ( path )
return urlunparse ( ( scheme or old_scheme , netloc or old_netloc , path or old_path , params or old_params , query or old_query , fragment or old_fragment ) )
|
def create_pull_request ( self , git_pull_request_to_create , repository_id , project = None , supports_iterations = None ) :
"""CreatePullRequest .
[ Preview API ] Create a pull request .
: param : class : ` < GitPullRequest > < azure . devops . v5_1 . git . models . GitPullRequest > ` git _ pull _ request _ to _ create : The pull request to create .
: param str repository _ id : The repository ID of the pull request ' s target branch .
: param str project : Project ID or project name
: param bool supports _ iterations : If true , subsequent pushes to the pull request will be individually reviewable . Set this to false for large pull requests for performance reasons if this functionality is not needed .
: rtype : : class : ` < GitPullRequest > < azure . devops . v5_1 . git . models . GitPullRequest > `"""
|
route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if repository_id is not None :
route_values [ 'repositoryId' ] = self . _serialize . url ( 'repository_id' , repository_id , 'str' )
query_parameters = { }
if supports_iterations is not None :
query_parameters [ 'supportsIterations' ] = self . _serialize . query ( 'supports_iterations' , supports_iterations , 'bool' )
content = self . _serialize . body ( git_pull_request_to_create , 'GitPullRequest' )
response = self . _send ( http_method = 'POST' , location_id = '9946fd70-0d40-406e-b686-b4744cbbcc37' , version = '5.1-preview.1' , route_values = route_values , query_parameters = query_parameters , content = content )
return self . _deserialize ( 'GitPullRequest' , response )
|
def add_formatter ( self , sqla_col_type , formatter , key_specific = None ) :
"""Add a formatter to the registry
if key _ specific is provided , this formatter will only be used for some
specific exports"""
|
self . add_item ( sqla_col_type , formatter , key_specific )
|
def _list_tenants ( self , admin ) :
"""Returns either a list of all tenants ( admin = True ) , or the tenant for
the currently - authenticated user ( admin = False ) ."""
|
resp , resp_body = self . method_get ( "tenants" , admin = admin )
if 200 <= resp . status_code < 300 :
tenants = resp_body . get ( "tenants" , [ ] )
return [ Tenant ( self , tenant ) for tenant in tenants ]
elif resp . status_code in ( 401 , 403 ) :
raise exc . AuthorizationFailure ( "You are not authorized to list " "tenants." )
else :
raise exc . TenantNotFound ( "Could not get a list of tenants." )
|
def on_source_directory_chooser_clicked ( self ) :
"""Autoconnect slot activated when tbSourceDir is clicked ."""
|
title = self . tr ( 'Set the source directory for script and scenario' )
self . choose_directory ( self . source_directory , title )
|
def copy ( self , memo = None , which = None ) :
"""Returns a ( deep ) copy of the current parameter handle .
All connections to parents of the copy will be cut .
: param dict memo : memo for deepcopy
: param Parameterized which : parameterized object which started the copy process [ default : self ]"""
|
# raise NotImplementedError , " Copy is not yet implemented , TODO : Observable hierarchy "
if memo is None :
memo = { }
import copy
# the next part makes sure that we do not include parents in any form :
parents = [ ]
if which is None :
which = self
which . traverse_parents ( parents . append )
# collect parents
for p in parents :
if not id ( p ) in memo :
memo [ id ( p ) ] = None
# set all parents to be None , so they will not be copied
if not id ( self . gradient ) in memo :
memo [ id ( self . gradient ) ] = None
# reset the gradient
if not id ( self . _fixes_ ) in memo :
memo [ id ( self . _fixes_ ) ] = None
# fixes have to be reset , as this is now highest parent
copy = copy . deepcopy ( self , memo )
# and start the copy
copy . _parent_index_ = None
copy . _trigger_params_changed ( )
return copy
|
def warning ( self , msg , * args , ** kwargs ) -> Task : # type : ignore
"""Log msg with severity ' WARNING ' .
To pass exception information , use the keyword argument exc _ info with
a true value , e . g .
await logger . warning ( " Houston , we have a bit of a problem " , exc _ info = 1)"""
|
return self . _make_log_task ( logging . WARNING , msg , args , ** kwargs )
|
def _redirect ( self , args ) :
"""asks the client to use a different server
This method redirects the client to another server , based on
the requested virtual host and / or capabilities .
RULE :
When getting the Connection . Redirect method , the client
SHOULD reconnect to the host specified , and if that host
is not present , to any of the hosts specified in the
known - hosts list .
PARAMETERS :
host : shortstr
server to connect to
Specifies the server to connect to . This is an IP
address or a DNS name , optionally followed by a colon
and a port number . If no port number is specified , the
client should use the default port number for the
protocol .
known _ hosts : shortstr"""
|
host = args . read_shortstr ( )
self . known_hosts = args . read_shortstr ( )
AMQP_LOGGER . debug ( 'Redirected to [%s], known_hosts [%s]' % ( host , self . known_hosts ) )
return host
|
def do_add_x10_device ( self , args ) :
"""Add an X10 device to the IM .
Usage :
add _ x10 _ device housecode unitcode type
Arguments :
housecode : Device housecode ( A - P )
unitcode : Device unitcode ( 1 - 16)
type : Device type
Current device types are :
- OnOff
- Dimmable
- Sensor
Example :
add _ x10 _ device M 12 OnOff"""
|
params = args . split ( )
housecode = None
unitcode = None
dev_type = None
try :
housecode = params [ 0 ]
unitcode = int ( params [ 1 ] )
if unitcode not in range ( 1 , 17 ) :
raise ValueError
dev_type = params [ 2 ]
except IndexError :
pass
except ValueError :
_LOGGING . error ( 'X10 unit code must be an integer 1 - 16' )
unitcode = None
if housecode and unitcode and dev_type :
device = self . tools . add_x10_device ( housecode , unitcode , dev_type )
if not device :
_LOGGING . error ( 'Device not added. Please check the ' 'information you provided.' )
self . do_help ( 'add_x10_device' )
else :
_LOGGING . error ( 'Device housecode, unitcode and type are ' 'required.' )
self . do_help ( 'add_x10_device' )
|
def init ( self , with_soft = True ) :
"""The method for the SAT oracle initialization . Since the oracle is
is used non - incrementally , it is reinitialized at every iteration
of the MaxSAT algorithm ( see : func : ` reinit ` ) . An input parameter
` ` with _ soft ` ` ( ` ` False ` ` by default ) regulates whether or not the
formula ' s soft clauses are copied to the oracle .
: param with _ soft : copy formula ' s soft clauses to the oracle or not
: type with _ soft : bool"""
|
self . oracle = Solver ( name = self . solver , bootstrap_with = self . hard , use_timer = True )
# self . atm1 is not empty only in case of minicard
for am in self . atm1 :
self . oracle . add_atmost ( * am )
if with_soft :
for cl , cpy in zip ( self . soft , self . scpy ) :
if cpy :
self . oracle . add_clause ( cl )
|
def get_current_user ( self ) :
"""Override get _ current _ user for Google AppEngine
Checks for oauth capable request first , if this fails fall back to standard users API"""
|
from google . appengine . api import users
if _IS_DEVELOPMENT_SERVER :
return users . get_current_user ( )
else :
from google . appengine . api import oauth
try :
user = oauth . get_current_user ( )
except oauth . OAuthRequestError :
user = users . get_current_user ( )
return user
|
def _nest_variable ( v , check_records = False ) :
"""Nest a variable when moving from scattered back to consolidated .
check _ records - - avoid re - nesting a record input if it comes from a previous
step and is already nested , don ' t need to re - array ."""
|
if ( check_records and is_cwl_record ( v ) and len ( v [ "id" ] . split ( "/" ) ) > 1 and v . get ( "type" , { } ) . get ( "type" ) == "array" ) :
return v
else :
v = copy . deepcopy ( v )
v [ "type" ] = { "type" : "array" , "items" : v [ "type" ] }
return v
|
def copy ( self ) :
"""Copy this object into a new object of the same type .
The returned object will not have a parent object ."""
|
copyClass = self . copyClass
if copyClass is None :
copyClass = self . __class__
copied = copyClass ( )
copied . copyData ( self )
return copied
|
def _set_splits ( self , split_dict ) :
"""Split setter ( private method ) ."""
|
# Update the dictionary representation .
# Use from / to proto for a clean copy
self . _splits = split_dict . copy ( )
# Update the proto
del self . as_proto . splits [ : ]
# Clear previous
for split_info in split_dict . to_proto ( ) :
self . as_proto . splits . add ( ) . CopyFrom ( split_info )
|
def conf_budget ( self , budget ) :
"""Set limit on the number of conflicts ."""
|
if self . minisat :
pysolvers . minisatgh_cbudget ( self . minisat , budget )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.