signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def save_volt ( elecs , volt , filename ) :
"""Save the values in volt - format ."""
|
# bring data in shape
content = np . column_stack ( ( elecs , volt , np . zeros ( len ( volt ) ) ) )
# save datapoints
with open ( filename , 'w' ) as fid :
fid . write ( '{0}\n' . format ( content . shape [ 0 ] ) )
with open ( filename , 'ab' ) as fid :
np . savetxt ( fid , np . array ( content ) , fmt = '%i %i %f %f' )
|
def set_umr_namelist ( self ) :
"""Set UMR excluded modules name list"""
|
arguments , valid = QInputDialog . getText ( self , _ ( 'UMR' ) , _ ( "Set the list of excluded modules as " "this: <i>numpy, scipy</i>" ) , QLineEdit . Normal , ", " . join ( self . get_option ( 'umr/namelist' ) ) )
if valid :
arguments = to_text_string ( arguments )
if arguments :
namelist = arguments . replace ( ' ' , '' ) . split ( ',' )
fixed_namelist = [ ]
non_ascii_namelist = [ ]
for module_name in namelist :
if PY2 :
if all ( ord ( c ) < 128 for c in module_name ) :
if programs . is_module_installed ( module_name ) :
fixed_namelist . append ( module_name )
else :
QMessageBox . warning ( self , _ ( 'Warning' ) , _ ( "You are working with Python 2, this means that " "you can not import a module that contains non-" "ascii characters." ) , QMessageBox . Ok )
non_ascii_namelist . append ( module_name )
elif programs . is_module_installed ( module_name ) :
fixed_namelist . append ( module_name )
invalid = ", " . join ( set ( namelist ) - set ( fixed_namelist ) - set ( non_ascii_namelist ) )
if invalid :
QMessageBox . warning ( self , _ ( 'UMR' ) , _ ( "The following modules are not " "installed on your machine:\n%s" ) % invalid , QMessageBox . Ok )
QMessageBox . information ( self , _ ( 'UMR' ) , _ ( "Please note that these changes will " "be applied only to new Python/IPython " "consoles" ) , QMessageBox . Ok )
else :
fixed_namelist = [ ]
self . set_option ( 'umr/namelist' , fixed_namelist )
|
def handle_json_GET_routepatterns ( self , params ) :
"""Given a route _ id generate a list of patterns of the route . For each
pattern include some basic information and a few sample trips ."""
|
schedule = self . server . schedule
route = schedule . GetRoute ( params . get ( 'route' , None ) )
if not route :
self . send_error ( 404 )
return
time = int ( params . get ( 'time' , 0 ) )
date = params . get ( 'date' , "" )
sample_size = 3
# For each pattern return the start time for this many trips
pattern_id_trip_dict = route . GetPatternIdTripDict ( )
patterns = [ ]
for pattern_id , trips in pattern_id_trip_dict . items ( ) :
time_stops = trips [ 0 ] . GetTimeStops ( )
if not time_stops :
continue
has_non_zero_trip_type = False ;
# Iterating over a copy so we can remove from trips inside the loop
trips_with_service = [ ]
for trip in trips :
service_id = trip . service_id
service_period = schedule . GetServicePeriod ( service_id )
if date and not service_period . IsActiveOn ( date ) :
continue
trips_with_service . append ( trip )
if trip [ 'trip_type' ] and trip [ 'trip_type' ] != '0' :
has_non_zero_trip_type = True
# We ' re only interested in the trips that do run on the specified date
trips = trips_with_service
name = u'%s to %s, %d stops' % ( time_stops [ 0 ] [ 2 ] . stop_name , time_stops [ - 1 ] [ 2 ] . stop_name , len ( time_stops ) )
transitfeed . SortListOfTripByTime ( trips )
num_trips = len ( trips )
if num_trips <= sample_size :
start_sample_index = 0
num_after_sample = 0
else : # Will return sample _ size trips that start after the ' time ' param .
# Linear search because I couldn ' t find a built - in way to do a binary
# search with a custom key .
start_sample_index = len ( trips )
for i , trip in enumerate ( trips ) :
if trip . GetStartTime ( ) >= time :
start_sample_index = i
break
num_after_sample = num_trips - ( start_sample_index + sample_size )
if num_after_sample < 0 : # Less than sample _ size trips start after ' time ' so return all the
# last sample _ size trips .
num_after_sample = 0
start_sample_index = num_trips - sample_size
sample = [ ]
for t in trips [ start_sample_index : start_sample_index + sample_size ] :
sample . append ( ( t . GetStartTime ( ) , t . trip_id ) )
patterns . append ( ( name , pattern_id , start_sample_index , sample , num_after_sample , ( 0 , 1 ) [ has_non_zero_trip_type ] ) )
patterns . sort ( )
return patterns
|
def add_metrics ( self , metrics : Iterable [ float ] ) -> None :
"""Helper to add multiple metrics at once ."""
|
for metric in metrics :
self . add_metric ( metric )
|
def get_params ( self , deep = False ) :
"""Get parameter . s"""
|
params = super ( XGBModel , self ) . get_params ( deep = deep )
if params [ 'missing' ] is np . nan :
params [ 'missing' ] = None
# sklearn doesn ' t handle nan . see # 4725
if not params . get ( 'eval_metric' , True ) :
del params [ 'eval_metric' ]
# don ' t give as None param to Booster
return params
|
def download_reference_files ( job , inputs , samples ) :
"""Downloads shared files that are used by all samples for alignment , or generates them if they were not provided .
: param JobFunctionWrappingJob job : passed automatically by Toil
: param Namespace inputs : Input arguments ( see main )
: param list [ list [ str , list [ str , str ] ] ] samples : Samples in the format [ UUID , [ URL1 , URL2 ] ]"""
|
# Create dictionary to store FileStoreIDs of shared input files
shared_ids = { }
urls = [ ( 'amb' , inputs . amb ) , ( 'ann' , inputs . ann ) , ( 'bwt' , inputs . bwt ) , ( 'pac' , inputs . pac ) , ( 'sa' , inputs . sa ) ]
# Alt file is optional and can only be provided , not generated
if inputs . alt :
urls . append ( ( 'alt' , inputs . alt ) )
# Download reference
download_ref = job . wrapJobFn ( download_url_job , inputs . ref , disk = '3G' )
# Human genomes are typically ~ 3G
job . addChild ( download_ref )
shared_ids [ 'ref' ] = download_ref . rv ( )
# If FAI is provided , download it . Otherwise , generate it
if inputs . fai :
shared_ids [ 'fai' ] = job . addChildJobFn ( download_url_job , inputs . fai ) . rv ( )
else :
faidx = job . wrapJobFn ( run_samtools_faidx , download_ref . rv ( ) )
shared_ids [ 'fai' ] = download_ref . addChild ( faidx ) . rv ( )
# If all BWA index files are provided , download them . Otherwise , generate them
if all ( x [ 1 ] for x in urls ) :
for name , url in urls :
shared_ids [ name ] = job . addChildJobFn ( download_url_job , url ) . rv ( )
else :
job . fileStore . logToMaster ( 'BWA index files not provided, creating now' )
bwa_index = job . wrapJobFn ( run_bwa_index , download_ref . rv ( ) )
download_ref . addChild ( bwa_index )
for x , name in enumerate ( [ 'amb' , 'ann' , 'bwt' , 'pac' , 'sa' ] ) :
shared_ids [ name ] = bwa_index . rv ( x )
# Map _ job distributes one sample in samples to the downlaod _ sample _ and _ align function
job . addFollowOnJobFn ( map_job , download_sample_and_align , samples , inputs , shared_ids )
|
def prepend_bcbiopath ( ) :
"""Prepend paths in the BCBIOPATH environment variable ( if any ) to PATH .
Uses either a pre - sent global environmental variable ( BCBIOPATH ) or the
local anaconda directory ."""
|
if os . environ . get ( 'BCBIOPATH' ) :
os . environ [ 'PATH' ] = _prepend ( os . environ . get ( 'PATH' , '' ) , os . environ . get ( 'BCBIOPATH' , None ) )
else :
os . environ [ 'PATH' ] = _prepend ( os . environ . get ( 'PATH' , '' ) , utils . get_bcbio_bin ( ) )
|
def markdownify ( markdown_content ) :
"""Render the markdown content to HTML .
Basic :
> > > from martor . utils import markdownify
> > > content = " ! [ awesome ] ( http : / / i . imgur . com / hvguiSn . jpg ) "
> > > markdownify ( content )
' < p > < img alt = " awesome " src = " http : / / i . imgur . com / hvguiSn . jpg " / > < / p > '"""
|
try :
return markdown . markdown ( markdown_content , safe_mode = MARTOR_MARKDOWN_SAFE_MODE , extensions = MARTOR_MARKDOWN_EXTENSIONS , extension_configs = MARTOR_MARKDOWN_EXTENSION_CONFIGS )
except Exception :
raise VersionNotCompatible ( "The markdown isn't compatible, please reinstall " "your python markdown into Markdown>=3.0" )
|
async def createAnswer ( self ) :
"""Create an SDP answer to an offer received from a remote peer during
the offer / answer negotiation of a WebRTC connection .
: rtype : : class : ` RTCSessionDescription `"""
|
# check state is valid
self . __assertNotClosed ( )
if self . signalingState not in [ 'have-remote-offer' , 'have-local-pranswer' ] :
raise InvalidStateError ( 'Cannot create answer in signaling state "%s"' % self . signalingState )
# create description
ntp_seconds = clock . current_ntp_time ( ) >> 32
description = sdp . SessionDescription ( )
description . origin = '- %d %d IN IP4 0.0.0.0' % ( ntp_seconds , ntp_seconds )
description . msid_semantic . append ( sdp . GroupDescription ( semantic = 'WMS' , items = [ '*' ] ) )
description . type = 'answer'
for remote_m in self . __remoteDescription ( ) . media :
if remote_m . kind in [ 'audio' , 'video' ] :
transceiver = self . __getTransceiverByMid ( remote_m . rtp . muxId )
description . media . append ( create_media_description_for_transceiver ( transceiver , cname = self . __cname , direction = and_direction ( transceiver . direction , transceiver . _offerDirection ) , mid = transceiver . mid ) )
else :
description . media . append ( create_media_description_for_sctp ( self . __sctp , legacy = self . _sctpLegacySdp , mid = self . __sctp . mid ) )
bundle = sdp . GroupDescription ( semantic = 'BUNDLE' , items = [ ] )
for media in description . media :
bundle . items . append ( media . rtp . muxId )
description . group . append ( bundle )
return wrap_session_description ( description )
|
def intersects ( self , other ) :
'''Returns True iff this record ' s reference positions overlap
the other record reference positions ( and are on same chromosome )'''
|
return self . CHROM == other . CHROM and self . POS <= other . ref_end_pos ( ) and other . POS <= self . ref_end_pos ( )
|
def restart ( ctx , ** kwargs ) :
"""restart a vaping process"""
|
update_context ( ctx , kwargs )
daemon = mk_daemon ( ctx )
daemon . stop ( )
daemon . start ( )
|
def _match_display_text ( self , element_key , string , string_match_type , match ) :
"""Matches a display text value"""
|
if string is None or string_match_type is None :
raise NullArgument ( )
match_value = self . _get_string_match_value ( string , string_match_type )
self . _add_match ( element_key + '.text' , match_value , match )
|
def export ( self , private_keys = True ) :
"""Exports a RFC 7517 keyset using the standard JSON format
: param private _ key ( bool ) : Whether to export private keys .
Defaults to True ."""
|
exp_dict = dict ( )
for k , v in iteritems ( self ) :
if k == 'keys' :
keys = list ( )
for jwk in v :
keys . append ( json_decode ( jwk . export ( private_keys ) ) )
v = keys
exp_dict [ k ] = v
return json_encode ( exp_dict )
|
def list ( showgroups ) :
"""Show list of Anchore data feeds ."""
|
ecode = 0
try :
result = { }
subscribed = { }
available = { }
unavailable = { }
current_user_data = contexts [ 'anchore_auth' ] [ 'user_info' ]
feedmeta = anchore_feeds . load_anchore_feedmeta ( )
for feed in feedmeta . keys ( ) :
if feedmeta [ feed ] [ 'subscribed' ] :
subscribed [ feed ] = { }
subscribed [ feed ] [ 'description' ] = feedmeta [ feed ] [ 'description' ]
if showgroups :
subscribed [ feed ] [ 'groups' ] = feedmeta [ feed ] [ 'groups' ] . keys ( )
else :
if current_user_data :
tier = int ( current_user_data [ 'tier' ] )
else :
tier = 0
if int ( feedmeta [ feed ] [ 'access_tier' ] ) > tier :
collection = unavailable
else :
collection = available
collection [ feed ] = { }
collection [ feed ] [ 'description' ] = feedmeta [ feed ] [ 'description' ]
if showgroups and collection == available :
collection [ feed ] [ 'groups' ] = feedmeta [ feed ] [ 'groups' ] . keys ( )
if available :
result [ 'Available' ] = available
if subscribed :
result [ 'Subscribed' ] = subscribed
if unavailable :
result [ 'Unavailable/Insufficient Access Tier' ] = unavailable
anchore_print ( result , do_formatting = True )
except Exception as err :
anchore_print_err ( 'operation failed' )
ecode = 1
sys . exit ( ecode )
|
def from_interval_shorthand ( self , startnote , shorthand , up = True ) :
"""Empty the container and add the note described in the startnote and
shorthand .
See core . intervals for the recognized format .
Examples :
> > > nc = NoteContainer ( )
> > > nc . from _ interval _ shorthand ( ' C ' , ' 5 ' )
[ ' C - 4 ' , ' G - 4 ' ]
> > > nc . from _ interval _ shorthand ( ' C ' , ' 5 ' , False )
[ ' F - 3 ' , ' C - 4 ' ]"""
|
self . empty ( )
if type ( startnote ) == str :
startnote = Note ( startnote )
n = Note ( startnote . name , startnote . octave , startnote . dynamics )
n . transpose ( shorthand , up )
self . add_notes ( [ startnote , n ] )
return self
|
def get_note_names ( self ) :
"""Return a list with all the note names in the current container .
Every name will only be mentioned once ."""
|
res = [ ]
for n in self . notes :
if n . name not in res :
res . append ( n . name )
return res
|
def get_and_set ( self , value ) :
'''Atomically sets the value to ` value ` and returns the old value .
: param value : The value to set .'''
|
with self . _reference . get_lock ( ) :
oldval = self . _reference . value
self . _reference . value = value
return oldval
|
def stop ( self ) :
"""Stop the sensor ."""
|
# check that everything is running
if not self . _running :
logging . warning ( 'Realsense not running. Aborting stop.' )
return False
self . _pipe . stop ( )
self . _running = False
return True
|
def commit ( self , session , mutations , transaction_id = None , single_use_transaction = None , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""Commits a transaction . The request includes the mutations to be applied
to rows in the database .
` ` Commit ` ` might return an ` ` ABORTED ` ` error . This can occur at any
time ; commonly , the cause is conflicts with concurrent transactions .
However , it can also happen for a variety of other reasons . If
` ` Commit ` ` returns ` ` ABORTED ` ` , the caller should re - attempt the
transaction from the beginning , re - using the same session .
Example :
> > > from google . cloud import spanner _ v1
> > > client = spanner _ v1 . SpannerClient ( )
> > > session = client . session _ path ( ' [ PROJECT ] ' , ' [ INSTANCE ] ' , ' [ DATABASE ] ' , ' [ SESSION ] ' )
> > > # TODO : Initialize ` mutations ` :
> > > mutations = [ ]
> > > response = client . commit ( session , mutations )
Args :
session ( str ) : Required . The session in which the transaction to be committed is running .
mutations ( list [ Union [ dict , ~ google . cloud . spanner _ v1 . types . Mutation ] ] ) : The mutations to be executed when this transaction commits . All
mutations are applied atomically , in the order they appear in
this list .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . spanner _ v1 . types . Mutation `
transaction _ id ( bytes ) : Commit a previously - started transaction .
single _ use _ transaction ( Union [ dict , ~ google . cloud . spanner _ v1 . types . TransactionOptions ] ) : Execute mutations in a temporary transaction . Note that unlike commit of
a previously - started transaction , commit with a temporary transaction is
non - idempotent . That is , if the ` ` CommitRequest ` ` is sent to Cloud
Spanner more than once ( for instance , due to retries in the application ,
or in the transport library ) , it is possible that the mutations are
executed more than once . If this is undesirable , use
` ` BeginTransaction ` ` and ` ` Commit ` ` instead .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . spanner _ v1 . types . TransactionOptions `
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
A : class : ` ~ google . cloud . spanner _ v1 . types . CommitResponse ` instance .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid ."""
|
# Wrap the transport method to add retry and timeout logic .
if "commit" not in self . _inner_api_calls :
self . _inner_api_calls [ "commit" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . commit , default_retry = self . _method_configs [ "Commit" ] . retry , default_timeout = self . _method_configs [ "Commit" ] . timeout , client_info = self . _client_info , )
# Sanity check : We have some fields which are mutually exclusive ;
# raise ValueError if more than one is sent .
google . api_core . protobuf_helpers . check_oneof ( transaction_id = transaction_id , single_use_transaction = single_use_transaction )
request = spanner_pb2 . CommitRequest ( session = session , mutations = mutations , transaction_id = transaction_id , single_use_transaction = single_use_transaction , )
if metadata is None :
metadata = [ ]
metadata = list ( metadata )
try :
routing_header = [ ( "session" , session ) ]
except AttributeError :
pass
else :
routing_metadata = google . api_core . gapic_v1 . routing_header . to_grpc_metadata ( routing_header )
metadata . append ( routing_metadata )
return self . _inner_api_calls [ "commit" ] ( request , retry = retry , timeout = timeout , metadata = metadata )
|
def create_context ( self , message_queue , task_id ) :
"""Create data needed by upload _ project _ run ( DukeDS connection info ) .
: param message _ queue : Queue : queue background process can send messages to us on
: param task _ id : int : id of this command ' s task so message will be routed correctly"""
|
params = ( self . settings . dest_directory , self . file_url . json_data , self . seek_amt , self . bytes_to_read )
return DownloadContext ( self . settings , params , message_queue , task_id )
|
def train ( self , images ) :
r"""Train a standard intensity space and an associated transformation model .
Note that the passed images should be masked to contain only the foreground .
Parameters
images : sequence of array _ likes
A number of images .
Returns
IntensityRangeStandardization : IntensityRangeStandardization
This instance of IntensityRangeStandardization"""
|
self . __stdrange = self . __compute_stdrange ( images )
lim = [ ]
for idx , i in enumerate ( images ) :
ci = numpy . array ( numpy . percentile ( i , self . __cutoffp ) )
li = numpy . array ( numpy . percentile ( i , self . __landmarkp ) )
ipf = interp1d ( ci , self . __stdrange )
lim . append ( ipf ( li ) )
# treat single intensity accumulation error
if not len ( numpy . unique ( numpy . concatenate ( ( ci , li ) ) ) ) == len ( ci ) + len ( li ) :
raise SingleIntensityAccumulationError ( 'Image no.{} shows an unusual single-intensity accumulation that leads to a situation where two percentile values are equal. This situation is usually caused, when the background has not been removed from the image. Another possibility would be to reduce the number of landmark percentiles landmarkp or to change their distribution.' . format ( idx ) )
self . __model = [ self . __stdrange [ 0 ] ] + list ( numpy . mean ( lim , 0 ) ) + [ self . __stdrange [ 1 ] ]
self . __sc_umins = [ self . __stdrange [ 0 ] ] + list ( numpy . min ( lim , 0 ) ) + [ self . __stdrange [ 1 ] ]
self . __sc_umaxs = [ self . __stdrange [ 0 ] ] + list ( numpy . max ( lim , 0 ) ) + [ self . __stdrange [ 1 ] ]
return self
|
def inherit_handlers ( self , excluded_handlers ) : # type : ( Iterable [ str ] ) - > None
"""Merges the inherited configuration with the current ones
: param excluded _ handlers : Excluded handlers"""
|
if not excluded_handlers :
excluded_handlers = tuple ( )
for handler , configuration in self . __inherited_configuration . items ( ) :
if handler in excluded_handlers : # Excluded handler
continue
elif handler not in self . __handlers : # Fully inherited configuration
self . __handlers [ handler ] = configuration
# Merge configuration . . .
elif isinstance ( configuration , dict ) : # Dictionary
self . __handlers . setdefault ( handler , { } ) . update ( configuration )
elif isinstance ( configuration , list ) : # List
handler_conf = self . __handlers . setdefault ( handler , [ ] )
for item in configuration :
if item not in handler_conf :
handler_conf . append ( item )
# Clear the inherited configuration dictionary
self . __inherited_configuration . clear ( )
|
def show_settings ( self ) :
"""Open the Setting windows , after updating the values in GUI ."""
|
self . notes . config . put_values ( )
self . overview . config . put_values ( )
self . settings . config . put_values ( )
self . spectrum . config . put_values ( )
self . traces . config . put_values ( )
self . video . config . put_values ( )
self . settings . show ( )
|
def files_size ( fs0 , fs1 , files ) :
"""Gets the file size of the given files ."""
|
for file_meta in files [ 'deleted_files' ] :
file_meta [ 'size' ] = fs0 . stat ( file_meta [ 'path' ] ) [ 'size' ]
for file_meta in files [ 'created_files' ] + files [ 'modified_files' ] :
file_meta [ 'size' ] = fs1 . stat ( file_meta [ 'path' ] ) [ 'size' ]
return files
|
def first_timestamp ( self , event_key = None ) :
"""Obtain the first timestamp .
Args :
event _ key : the type key of the sought events ( e . g . , constants . NAN _ KEY ) .
If None , includes all event type keys .
Returns :
First ( earliest ) timestamp of all the events of the given type ( or all
event types if event _ key is None ) ."""
|
if event_key is None :
timestamps = [ self . _trackers [ key ] . first_timestamp for key in self . _trackers ]
return min ( timestamp for timestamp in timestamps if timestamp >= 0 )
else :
return self . _trackers [ event_key ] . first_timestamp
|
def update ( self , ** kwargs ) :
"""Update a service ' s configuration . Similar to the ` ` docker service
update ` ` command .
Takes the same parameters as : py : meth : ` ~ ServiceCollection . create ` .
Raises :
: py : class : ` docker . errors . APIError `
If the server returns an error ."""
|
# Image is required , so if it hasn ' t been set , use current image
if 'image' not in kwargs :
spec = self . attrs [ 'Spec' ] [ 'TaskTemplate' ] [ 'ContainerSpec' ]
kwargs [ 'image' ] = spec [ 'Image' ]
if kwargs . get ( 'force_update' ) is True :
task_template = self . attrs [ 'Spec' ] [ 'TaskTemplate' ]
current_value = int ( task_template . get ( 'ForceUpdate' , 0 ) )
kwargs [ 'force_update' ] = current_value + 1
create_kwargs = _get_create_service_kwargs ( 'update' , kwargs )
return self . client . api . update_service ( self . id , self . version , ** create_kwargs )
|
def wosParser ( isifile ) :
"""This is a function that is used to create [ RecordCollections ] ( . . / classes / RecordCollection . html # metaknowledge . RecordCollection ) from files .
* * wosParser * * ( ) reads the file given by the path isifile , checks that the header is correct then reads until it reaches EF . All WOS records it encounters are parsed with [ recordParser ( ) ] ( # metaknowledge . WOS . recordWOS . recordParser ) and converted into [ Records ] ( . . / classes / Record . html # metaknowledge . Record ) . A list of these ` Records ` is returned .
` BadWOSFile ` is raised if an issue is found with the file .
# Parameters
_ isifile _ : ` str `
> The path to the target file
# Returns
` List [ Record ] `
> All the ` Records ` found in _ isifile _"""
|
plst = set ( )
error = None
try :
with open ( isifile , 'r' , encoding = 'utf-8-sig' ) as openfile :
f = enumerate ( openfile , start = 0 )
while "VR 1.0" not in f . __next__ ( ) [ 1 ] :
pass
notEnd = True
while notEnd :
line = f . __next__ ( )
if line [ 1 ] == '' :
error = BadWOSFile ( "'{}' does not have an 'EF', lines 1 to {} were checked" . format ( isifile , line [ 0 ] + 1 ) )
elif line [ 1 ] . isspace ( ) :
continue
elif 'EF' in line [ 1 ] [ : 2 ] :
notEnd = False
continue
else :
try :
plst . add ( WOSRecord ( itertools . chain ( [ line ] , f ) , sFile = isifile , sLine = line [ 0 ] ) )
except BadWOSFile as e :
try :
s = f . __next__ ( ) [ 1 ]
while s [ : 2 ] != 'ER' :
s = f . __next__ ( ) [ 1 ]
except :
error = BadWOSFile ( "The file {} was not terminated corrrectly caused the following error:\n{}" . format ( isifile , str ( e ) ) )
try :
f . __next__ ( )
except StopIteration :
pass
else :
error = BadWOSFile ( "EF not at end of " + isifile )
except UnicodeDecodeError :
try :
error = BadWOSFile ( "'{}' has a unicode issue on line: {}." . format ( isifile , f . __next__ ( ) [ 0 ] ) )
except : # Fallback needed incase f . _ _ next _ _ ( ) causes issues
error = BadWOSFile ( "'{}' has a unicode issue. Probably when being opened or possibly on the first line" . format ( isifile ) )
except StopIteration :
error = BadWOSFile ( "The file '{}' ends before EF was found" . format ( isifile ) )
except KeyboardInterrupt as e :
error = e
finally :
if isinstance ( error , KeyboardInterrupt ) :
raise error
return plst , error
|
def romanize ( number ) :
"""Convert ` number ` to a Roman numeral ."""
|
roman = [ ]
for numeral , value in NUMERALS :
times , number = divmod ( number , value )
roman . append ( times * numeral )
return '' . join ( roman )
|
def normalize_excludes ( rootpath , excludes ) :
"""Normalize the excluded directory list ."""
|
return [ path . normpath ( path . abspath ( exclude ) ) for exclude in excludes ]
|
def _get_LMv1_response ( password , server_challenge ) :
"""[ MS - NLMP ] v28.0 2016-07-14
2.2.2.3 LM _ RESPONSE
The LM _ RESPONSE structure defines the NTLM v1 authentication LmChallengeResponse
in the AUTHENTICATE _ MESSAGE . This response is used only when NTLM v1
authentication is configured .
: param password : The password of the user we are trying to authenticate with
: param server _ challenge : A random 8 - byte response generated by the server in the CHALLENGE _ MESSAGE
: return response : LmChallengeResponse to the server challenge"""
|
lm_hash = comphash . _lmowfv1 ( password )
response = ComputeResponse . _calc_resp ( lm_hash , server_challenge )
return response
|
def delete_state_changes ( self , state_changes_to_delete : List [ int ] ) -> None :
"""Delete state changes .
Args :
state _ changes _ to _ delete : List of ids to delete ."""
|
with self . write_lock , self . conn :
self . conn . executemany ( 'DELETE FROM state_events WHERE identifier = ?' , state_changes_to_delete , )
|
def read_chunk_body ( self ) :
'''Read a fragment of a single chunk .
Call : meth : ` read _ chunk _ header ` first .
Returns :
tuple : 2 - item tuple with the content data and raw data .
First item is empty bytes string when chunk is fully read .
Coroutine .'''
|
# chunk _ size = self . _ chunk _ size
bytes_left = self . _bytes_left
# _ logger . debug ( _ _ ( ' Getting chunk size = { 0 } , remain = { 1 } . ' ,
# chunk _ size , bytes _ left ) )
if bytes_left > 0 :
size = min ( bytes_left , self . _read_size )
data = yield from self . _connection . read ( size )
self . _bytes_left -= len ( data )
return ( data , data )
elif bytes_left < 0 :
raise ProtocolError ( 'Chunked-transfer overrun.' )
elif bytes_left :
raise NetworkError ( 'Connection closed.' )
newline_data = yield from self . _connection . readline ( )
if len ( newline_data ) > 2 : # Should be either CRLF or LF
# This could our problem or the server ' s problem
raise ProtocolError ( 'Error reading newline after chunk.' )
self . _chunk_size = self . _bytes_left = None
return ( b'' , newline_data )
|
def readline ( self ) :
"""Read one line from the pseudoterminal , and return it as unicode .
Can block if there is nothing to read . Raises : exc : ` EOFError ` if the
terminal was closed ."""
|
try :
s = self . fileobj . readline ( )
except ( OSError , IOError ) as err :
if err . args [ 0 ] == errno . EIO : # Linux - style EOF
self . flag_eof = True
raise EOFError ( 'End Of File (EOF). Exception style platform.' )
raise
if s == b'' : # BSD - style EOF ( also appears to work on recent Solaris ( OpenIndiana ) )
self . flag_eof = True
raise EOFError ( 'End Of File (EOF). Empty string style platform.' )
return s
|
def copy_public_attrs ( source_obj , dest_obj ) :
"""Shallow copies all public attributes from source _ obj to dest _ obj .
Overwrites them if they already exist ."""
|
for name , value in inspect . getmembers ( source_obj ) :
if not any ( name . startswith ( x ) for x in [ "_" , "func" , "im" ] ) :
setattr ( dest_obj , name , value )
|
def list_role_secrets ( self , role_name , mount_point = 'approle' ) :
"""LIST / auth / < mount _ point > / role / < role name > / secret - id
: param role _ name : Name of the AppRole .
: type role _ name : str | unicode
: param mount _ point : The " path " the AppRole auth backend was mounted on . Vault currently defaults to " approle " .
: type mount _ point : str | unicode
: return : The JSON response of the request .
: rtype : dict"""
|
url = '/v1/auth/{mount_point}/role/{name}/secret-id' . format ( mount_point = mount_point , name = role_name )
return self . _adapter . list ( url ) . json ( )
|
def _update_time ( self ) :
"""Increments the timestep counter by one .
Furthermore ` ` self . time [ ' days _ elapsed ' ] ` ` and
` ` self . time [ ' num _ steps _ per _ year ' ] ` ` are updated .
The function is called by the time stepping methods ."""
|
self . time [ 'steps' ] += 1
# time in days since beginning
self . time [ 'days_elapsed' ] += self . time [ 'timestep' ] / const . seconds_per_day
if self . time [ 'day_of_year_index' ] >= self . time [ 'num_steps_per_year' ] - 1 :
self . _do_new_calendar_year ( )
else :
self . time [ 'day_of_year_index' ] += 1
|
def sort_values ( self , return_indexer = False , ascending = True ) :
"""Return sorted copy of Index ."""
|
if return_indexer :
_as = self . argsort ( )
if not ascending :
_as = _as [ : : - 1 ]
sorted_index = self . take ( _as )
return sorted_index , _as
else :
sorted_values = np . sort ( self . _ndarray_values )
attribs = self . _get_attributes_dict ( )
freq = attribs [ 'freq' ]
if freq is not None and not is_period_dtype ( self ) :
if freq . n > 0 and not ascending :
freq = freq * - 1
elif freq . n < 0 and ascending :
freq = freq * - 1
attribs [ 'freq' ] = freq
if not ascending :
sorted_values = sorted_values [ : : - 1 ]
return self . _simple_new ( sorted_values , ** attribs )
|
def mongoimport ( json , database , ip = 'localhost' , port = 27017 , user = None , password = None , delim = '_' , delim1 = None , delim2 = None , delim_occurance = 1 , delim1_occurance = 1 , delim2_occurance = 1 ) :
'''Performs mongoimport on one or more json files .
Args :
json : Can be one of several things :
- path to a single JSON file
- an iterable ( list or tuple ) of one or more JSON file paths
- path to a directory containing one or more JSON files
database ( str ) : Name of the database into which the JSON files
will be imported
ip ( str ) : IP address of the MongoDB server . Default is ` ` localhost ` ` .
port ( int ) : Port of the MongoDB database . Default is ` ` 27017 ` ` .
user ( str ) : Username for the MongoDB database , if authentication is enabled .
Default is ` ` None ` ` , which results in attempting connection without
authentication .
password ( str ) : Password for the MongoDB database , if authentication is enabled .
Default is ` ` None ` ` , which results in attempting connection without
authentication .
delim ( str ) : Delimiter , when generating collection names using a single delimiter .
Default is ` ` _ ` `
delim _ occurance ( int ) : Occurance at which to split filename when using a
single delimiter . Default is ` ` 1 ` `
delim1 ( str ) : Left delimiter when splitting with two delimiters . Default is None .
delim1 _ occurance ( int ) : Occurance of ` ` delim1 ` ` at which to split filename .
Default is ` ` 1 ` `
delim2 ( str ) : Right delimiter when splitting with two delimiters . Default is None .
delim2 _ occurance ( int ) : Occurance of ` ` delim2 ` ` at which to split filename .
Default is ` ` 1 ` `'''
|
logger = log . get_logger ( 'mongodb' )
_print_mongoimport_info ( logger )
if type ( json ) in ( list , tuple ) :
pass
elif os . path . isdir ( json ) :
from abtools . utils . pipeline import list_files
json = list_files ( json )
else :
json = [ json , ]
jsons = sorted ( [ os . path . expanduser ( j ) for j in json if j . endswith ( '.json' ) ] )
collections = _get_import_collections ( jsons , delim , delim_occurance , delim1 , delim1_occurance , delim2 , delim2_occurance )
logger . info ( 'Found {} files to import' . format ( len ( jsons ) ) )
logger . info ( '' )
for i , ( json_file , collection ) in enumerate ( zip ( jsons , collections ) ) :
logger . info ( '[ {} ] {} --> {}' . format ( i + 1 , os . path . basename ( json_file ) , collection ) )
# logger . info ( " Performing mongoimport on { } . " . format ( os . path . basename ( json _ file ) ) )
# logger . info ( " Importing the file into collection { } . " . format ( collection ) )
if all ( [ user , password ] ) :
host = '--host {} --port {} -username {} -password {}' . format ( ip , port , user , password )
else :
host = '--host {} --port {}' . format ( ip , port )
mongo_cmd = "mongoimport {} --db {} --collection {} --file {}" . format ( host , database , collection , json_file )
mongo = sp . Popen ( mongo_cmd , shell = True , stdout = sp . PIPE , stderr = sp . PIPE )
stdout , stderr = mongo . communicate ( )
|
def _filter_meta_data ( self , source , soup , data , url = None ) :
"""This method filters the web page content for meta tags that match patterns given in the ` ` FILTER _ MAPS ` `
: param source : The key of the meta dictionary in ` ` FILTER _ MAPS [ ' meta ' ] ` `
: type source : string
: param soup : BeautifulSoup instance to find meta tags
: type soup : instance
: param data : The response dictionary to manipulate
: type data : ( dict )"""
|
meta = FILTER_MAPS [ 'meta' ] [ source ]
meta_map = meta [ 'map' ]
html = soup . find_all ( 'meta' , { meta [ 'key' ] : meta [ 'pattern' ] } )
image = { }
video = { }
for line in html :
prop = line . get ( meta [ 'key' ] )
value = line . get ( 'content' )
_prop = meta_map . get ( prop )
if prop in meta_map and _prop and not data . get ( _prop ) : # this could be bad in cases where any values that the property
# is mapped up to ( i . e . " src " , " type " , etc ) are found in ` ` data ` `
# TODO : Figure out a smoother way to prevent conflicts ^ ^ ^ ^ ^
image_prop = meta [ 'image_key' ]
video_prop = meta [ 'video_key' ]
if prop . startswith ( ( image_prop , video_prop ) ) and prop . endswith ( ( 'width' , 'height' ) ) :
if prop . endswith ( ( 'width' , 'height' ) ) :
value = convert_to_int ( value )
if meta_map [ prop ] == 'locale' :
locale = normalize_locale ( value )
if locale :
data [ 'locale' ] = locale
if prop == 'keywords' :
if isinstance ( value , str ) :
value = [ v . strip ( ) for v in value . split ( ',' ) ]
else :
value = [ ]
if image_prop and prop . startswith ( image_prop ) and value : # og : image URLs can be relative
if prop == 'og:image' and url :
value = urljoin ( url , value )
image [ meta_map [ prop ] ] = value
elif video_prop and prop . startswith ( video_prop ) and value :
video [ meta_map [ prop ] ] = value
else :
data [ meta_map [ prop ] ] = value
if image :
image [ 'type' ] = image_prop
data [ 'images' ] . append ( image )
if video :
data [ 'videos' ] . append ( video )
|
def remove_datasource ( jboss_config , name , profile = None ) :
'''Remove an existing datasource from the running jboss instance .
jboss _ config
Configuration dictionary with properties specified above .
name
Datasource name
profile
The profile ( JBoss domain mode only )
CLI Example :
. . code - block : : bash
salt ' * ' jboss7 . remove _ datasource ' { " cli _ path " : " integration . modules . sysmod . SysModuleTest . test _ valid _ docs " , " controller " : " 10.11.12.13:9999 " , " cli _ user " : " jbossadm " , " cli _ password " : " jbossadm " } ' my _ datasource _ name'''
|
log . debug ( "======================== MODULE FUNCTION: jboss7.remove_datasource, name=%s, profile=%s" , name , profile )
operation = '/subsystem=datasources/data-source={name}:remove' . format ( name = name )
if profile is not None :
operation = '/profile="{profile}"' . format ( profile = profile ) + operation
return __salt__ [ 'jboss7_cli.run_operation' ] ( jboss_config , operation , fail_on_error = False )
|
def stop_instance ( self , instance_id ) :
"""Stops the instance gracefully .
: param str instance _ id : instance identifier"""
|
self . _init_az_api ( )
cluster_name , node_name = instance_id
self . _init_inventory ( cluster_name )
for name , api_version in [ # we must delete resources in a specific order : e . g . ,
# a public IP address cannot be deleted if it ' s still
# in use by a NIC . . .
( node_name , '2018-06-01' ) , ( node_name + '-nic' , '2018-10-01' ) , ( node_name + '-public-ip' , '2018-10-01' ) , ( node_name + '-disk' , '2018-09-30' ) , ( self . _make_storage_account_name ( cluster_name , node_name ) , '2018-07-01' ) , ] :
rsc_id = self . _inventory [ name ]
log . debug ( "Deleting resource %s (`%s`) ..." , name , rsc_id )
oper = self . _resource_client . resources . delete_by_id ( rsc_id , api_version )
oper . wait ( )
del self . _inventory [ name ]
self . _vm_details . pop ( node_name , None )
# if this was the last VM to be deleted , clean up leftover resource group
with self . __lock :
if len ( self . _inventory ) == 2 :
log . debug ( "Cleaning up leftover resource group ..." )
oper = self . _resource_client . resource_groups . delete ( cluster_name )
oper . wait ( )
self . _inventory = { }
|
def convert2wavenumber ( self ) :
"""Convert from wavelengths to wavenumber .
Units :
Wavelength : micro meters ( 1e - 6 m )
Wavenumber : cm - 1"""
|
self . wavenumber = 1. / ( 1e-4 * self . wavelength [ : : - 1 ] )
self . irradiance = ( self . irradiance [ : : - 1 ] * self . wavelength [ : : - 1 ] * self . wavelength [ : : - 1 ] * 0.1 )
self . wavelength = None
|
def update_question ( self , question , publisher_name , extension_name , question_id ) :
"""UpdateQuestion .
[ Preview API ] Updates an existing question for an extension .
: param : class : ` < Question > < azure . devops . v5_1 . gallery . models . Question > ` question : Updated question to be set for the extension .
: param str publisher _ name : Name of the publisher who published the extension .
: param str extension _ name : Name of the extension .
: param long question _ id : Identifier of the question to be updated for the extension .
: rtype : : class : ` < Question > < azure . devops . v5_1 . gallery . models . Question > `"""
|
route_values = { }
if publisher_name is not None :
route_values [ 'publisherName' ] = self . _serialize . url ( 'publisher_name' , publisher_name , 'str' )
if extension_name is not None :
route_values [ 'extensionName' ] = self . _serialize . url ( 'extension_name' , extension_name , 'str' )
if question_id is not None :
route_values [ 'questionId' ] = self . _serialize . url ( 'question_id' , question_id , 'long' )
content = self . _serialize . body ( question , 'Question' )
response = self . _send ( http_method = 'PATCH' , location_id = '6d1d9741-eca8-4701-a3a5-235afc82dfa4' , version = '5.1-preview.1' , route_values = route_values , content = content )
return self . _deserialize ( 'Question' , response )
|
def _set_ssh_server_port ( self , v , load = False ) :
"""Setter method for ssh _ server _ port , mapped from YANG variable / ssh _ sa / ssh / server / ssh _ server _ port ( uint32)
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ ssh _ server _ port is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ ssh _ server _ port ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) , restriction_dict = { 'range' : [ u'22' , u'1024..49151' ] } ) , is_leaf = True , yang_name = "ssh-server-port" , rest_name = "port" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'The designated SSH server port' , u'cli-full-command' : None , u'alt-name' : u'port' , u'callpoint' : u'ssh_server_port_cp' } } , namespace = 'urn:brocade.com:mgmt:brocade-sec-services' , defining_module = 'brocade-sec-services' , yang_type = 'uint32' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """ssh_server_port must be of a type compatible with uint32""" , 'defined-type' : "uint32" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'22', u'1024..49151']}), is_leaf=True, yang_name="ssh-server-port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The designated SSH server port', u'cli-full-command': None, u'alt-name': u'port', u'callpoint': u'ssh_server_port_cp'}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='uint32', is_config=True)""" , } )
self . __ssh_server_port = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def qt_message_handler ( msg_type , msg_log_context , msg_string ) :
"""Qt warning messages are intercepted by this handler .
On some operating systems , warning messages might be displayed
even if the actual message does not apply . This filter adds a
blacklist for messages that are being printed for no apparent
reason . Anything else will get printed in the internal console .
In DEV mode , all messages are printed ."""
|
BLACKLIST = [ 'QMainWidget::resizeDocks: all sizes need to be larger than 0' , ]
if DEV or msg_string not in BLACKLIST :
print ( msg_string )
|
def line ( self , x = None , y = None , ** kwds ) :
"""Plot DataFrame columns as lines .
This function is useful to plot lines using DataFrame ' s values
as coordinates .
Parameters
x : int or str , optional
Columns to use for the horizontal axis .
Either the location or the label of the columns to be used .
By default , it will use the DataFrame indices .
y : int , str , or list of them , optional
The values to be plotted .
Either the location or the label of the columns to be used .
By default , it will use the remaining DataFrame numeric columns .
* * kwds
Keyword arguments to pass on to : meth : ` DataFrame . plot ` .
Returns
: class : ` matplotlib . axes . Axes ` or : class : ` numpy . ndarray `
Return an ndarray when ` ` subplots = True ` ` .
See Also
matplotlib . pyplot . plot : Plot y versus x as lines and / or markers .
Examples
. . plot : :
: context : close - figs
The following example shows the populations for some animals
over the years .
> > > df = pd . DataFrame ( {
. . . ' pig ' : [ 20 , 18 , 489 , 675 , 1776 ] ,
. . . ' horse ' : [ 4 , 25 , 281 , 600 , 1900]
. . . } , index = [ 1990 , 1997 , 2003 , 2009 , 2014 ] )
> > > lines = df . plot . line ( )
. . plot : :
: context : close - figs
An example with subplots , so an array of axes is returned .
> > > axes = df . plot . line ( subplots = True )
> > > type ( axes )
< class ' numpy . ndarray ' >
. . plot : :
: context : close - figs
The following example shows the relationship between both
populations .
> > > lines = df . plot . line ( x = ' pig ' , y = ' horse ' )"""
|
return self ( kind = 'line' , x = x , y = y , ** kwds )
|
def parse_metrics ( self , f ) :
"""Parse the metrics . tsv file from RNA - SeQC"""
|
headers = None
for l in f [ 'f' ] . splitlines ( ) :
s = l . strip ( ) . split ( "\t" )
if headers is None :
headers = s
else :
s_name = s [ headers . index ( 'Sample' ) ]
data = dict ( )
for idx , h in enumerate ( headers ) :
try :
data [ h ] = float ( s [ idx ] )
except ValueError :
data [ h ] = s [ idx ]
self . rna_seqc_metrics [ s_name ] = data
|
def set ( self , id , value ) :
"""根据 id 写入数据 。
: param id : 要写入的 id
: param value : 要写入的数据 , 可以是一个 ` ` dict ` ` 对象"""
|
session = json_dumps ( value )
self . collection . replace_one ( { "wechat_id" : id } , { "wechat_id" : id , "session" : session } , upsert = True )
|
def find_min ( a , b ) :
"""This function determines the smaller of two numbers .
Examples :
> > > find _ min ( 10 , 20)
10
> > > find _ min ( 19 , 15)
15
> > > find _ min ( - 10 , - 20)
-20
: param a : First number
: param b : Second number
: return : Returns the minimum of a and b ."""
|
return a if a < b else b
|
def display ( method = EraseMethod . ALL_MOVE ) :
"""Clear the screen or part of the screen , and possibly moves the cursor
to the " home " position ( 1 , 1 ) . See ` method ` argument below .
Esc [ < method > J
Arguments :
method : One of these possible values :
EraseMethod . END or 0:
Clear from cursor to the end of the screen .
EraseMethod . START or 1:
Clear from cursor to the start of the screen .
EraseMethod . ALL _ MOVE or 2:
Clear all , and move home .
EraseMethod . ALL _ ERASE or 3:
Clear all , and erase scrollback buffer .
EraseMethod . ALL _ MOVE _ ERASE or 4:
Like doing 2 and 3 in succession .
This is a feature of Colr . It is not standard .
Default : EraseMethod . ALL _ MOVE ( 2)"""
|
accepted_methods = ( '0' , '1' , '2' , '3' , '4' )
methodstr = str ( method )
if methodstr not in accepted_methods :
raise ValueError ( 'Invalid method, expected {}. Got: {!r}' . format ( ', ' . join ( accepted_methods ) , method , ) )
if methodstr == '4' :
methods = ( 2 , 3 )
else :
methods = ( method , )
return EscapeCode ( '' . join ( str ( EscapeCode ( '{}J' . format ( m ) ) ) for m in methods ) )
|
def nobs ( self ) :
"""get the number of observations
Returns
nobs : int
the number of observations"""
|
self . control_data . nobs = self . observation_data . shape [ 0 ]
return self . control_data . nobs
|
def entitlement ( self , token ) :
"""Client applications can use a specific endpoint to obtain a special
security token called a requesting party token ( RPT ) . This token
consists of all the entitlements ( or permissions ) for a user as a
result of the evaluation of the permissions and authorization policies
associated with the resources being requested . With an RPT , client
applications can gain access to protected resources at the resource
server .
http : / / www . keycloak . org / docs / latest / authorization _ services / index
. html # _ service _ entitlement _ api
: rtype : dict"""
|
headers = { "Authorization" : "Bearer %s" % token }
url = self . _realm . client . get_full_url ( PATH_ENTITLEMENT . format ( self . _realm . realm_name , self . _client_id ) )
return self . _realm . client . get ( url , headers = headers )
|
def touch ( self , expiration ) :
"""Updates the current document ' s expiration value .
: param expiration : Expiration in seconds for the document to be removed by
couchbase server , defaults to 0 - will never expire .
: type expiration : int
: returns : Response from CouchbaseClient .
: rtype : unicode
: raises : : exc : ' cbwrapper . errors . DoesNotExist ' or
: exc : ' couchbase . exception . TemporaryFailError '"""
|
if not self . cas_value or not self . doc_id :
raise self . DoesNotExist ( self )
return self . bucket . touch ( self . doc_id , expiration )
|
def getSymmetricallyEncryptedVal ( val , secretKey : Union [ str , bytes ] = None ) -> Tuple [ str , str ] :
"""Encrypt the provided value with symmetric encryption
: param val : the value to encrypt
: param secretKey : Optional key , if provided should be either in hex or bytes
: return : Tuple of the encrypted value and secret key encoded in hex"""
|
if isinstance ( val , str ) :
val = val . encode ( "utf-8" )
if secretKey :
if isHex ( secretKey ) :
secretKey = bytes ( bytearray . fromhex ( secretKey ) )
elif not isinstance ( secretKey , bytes ) :
error ( "Secret key must be either in hex or bytes" )
box = libnacl . secret . SecretBox ( secretKey )
else :
box = libnacl . secret . SecretBox ( )
return box . encrypt ( val ) . hex ( ) , box . sk . hex ( )
|
def revnet_range ( rhp ) :
"""Hyperparameters for tuning revnet ."""
|
rhp . set_float ( 'learning_rate' , 0.05 , 0.2 , scale = rhp . LOG_SCALE )
rhp . set_float ( 'weight_decay' , 1e-5 , 1e-3 , scale = rhp . LOG_SCALE )
rhp . set_discrete ( 'num_channels_init_block' , [ 64 , 128 ] )
return rhp
|
def subtract ( self , years = 0 , months = 0 , weeks = 0 , days = 0 , hours = 0 , minutes = 0 , seconds = 0 , microseconds = 0 , ) :
"""Remove duration from the instance .
: param years : The number of years
: type years : int
: param months : The number of months
: type months : int
: param weeks : The number of weeks
: type weeks : int
: param days : The number of days
: type days : int
: param hours : The number of hours
: type hours : int
: param minutes : The number of minutes
: type minutes : int
: param seconds : The number of seconds
: type seconds : int
: param microseconds : The number of microseconds
: type microseconds : int
: rtype : DateTime"""
|
return self . add ( years = - years , months = - months , weeks = - weeks , days = - days , hours = - hours , minutes = - minutes , seconds = - seconds , microseconds = - microseconds , )
|
def _set_pvlan_tag ( self , v , load = False ) :
"""Setter method for pvlan _ tag , mapped from YANG variable / interface / port _ channel / switchport / private _ vlan / trunk / pvlan _ tag ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ pvlan _ tag is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ pvlan _ tag ( ) directly .
YANG Description : This specifies vlan tagging characteristics for a
trunk port ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = pvlan_tag . pvlan_tag , is_container = 'container' , presence = False , yang_name = "pvlan-tag" , rest_name = "tag" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Enable tagging' , u'alt-name' : u'tag' , u'cli-incomplete-no' : None , u'cli-incomplete-command' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-interface' , defining_module = 'brocade-interface' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """pvlan_tag must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=pvlan_tag.pvlan_tag, is_container='container', presence=False, yang_name="pvlan-tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable tagging', u'alt-name': u'tag', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""" , } )
self . __pvlan_tag = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def authorize_security_group_egress ( DryRun = None , GroupId = None , SourceSecurityGroupName = None , SourceSecurityGroupOwnerId = None , IpProtocol = None , FromPort = None , ToPort = None , CidrIp = None , IpPermissions = None ) :
"""[ EC2 - VPC only ] Adds one or more egress rules to a security group for use with a VPC . Specifically , this action permits instances to send traffic to one or more destination IPv4 or IPv6 CIDR address ranges , or to one or more destination security groups for the same VPC . This action doesn ' t apply to security groups for use in EC2 - Classic . For more information , see Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide . For more information about security group limits , see Amazon VPC Limits .
Each rule consists of the protocol ( for example , TCP ) , plus either a CIDR range or a source group . For the TCP and UDP protocols , you must also specify the destination port or port range . For the ICMP protocol , you must also specify the ICMP type and code . You can use - 1 for the type or code to mean all types or all codes .
Rule changes are propagated to affected instances as quickly as possible . However , a small delay might occur .
See also : AWS API Documentation
: example : response = client . authorize _ security _ group _ egress (
DryRun = True | False ,
GroupId = ' string ' ,
SourceSecurityGroupName = ' string ' ,
SourceSecurityGroupOwnerId = ' string ' ,
IpProtocol = ' string ' ,
FromPort = 123,
ToPort = 123,
CidrIp = ' string ' ,
IpPermissions = [
' IpProtocol ' : ' string ' ,
' FromPort ' : 123,
' ToPort ' : 123,
' UserIdGroupPairs ' : [
' UserId ' : ' string ' ,
' GroupName ' : ' string ' ,
' GroupId ' : ' string ' ,
' VpcId ' : ' string ' ,
' VpcPeeringConnectionId ' : ' string ' ,
' PeeringStatus ' : ' string '
' IpRanges ' : [
' CidrIp ' : ' string '
' Ipv6Ranges ' : [
' CidrIpv6 ' : ' string '
' PrefixListIds ' : [
' PrefixListId ' : ' string '
: type DryRun : boolean
: param DryRun : Checks whether you have the required permissions for the action , without actually making the request , and provides an error response . If you have the required permissions , the error response is DryRunOperation . Otherwise , it is UnauthorizedOperation .
: type GroupId : string
: param GroupId : [ REQUIRED ]
The ID of the security group .
: type SourceSecurityGroupName : string
: param SourceSecurityGroupName : The name of a destination security group . To authorize outbound access to a destination security group , we recommend that you use a set of IP permissions instead .
: type SourceSecurityGroupOwnerId : string
: param SourceSecurityGroupOwnerId : The AWS account number for a destination security group . To authorize outbound access to a destination security group , we recommend that you use a set of IP permissions instead .
: type IpProtocol : string
: param IpProtocol : The IP protocol name or number . We recommend that you specify the protocol in a set of IP permissions instead .
: type FromPort : integer
: param FromPort : The start of port range for the TCP and UDP protocols , or an ICMP type number . We recommend that you specify the port range in a set of IP permissions instead .
: type ToPort : integer
: param ToPort : The end of port range for the TCP and UDP protocols , or an ICMP type number . We recommend that you specify the port range in a set of IP permissions instead .
: type CidrIp : string
: param CidrIp : The CIDR IPv4 address range . We recommend that you specify the CIDR range in a set of IP permissions instead .
: type IpPermissions : list
: param IpPermissions : A set of IP permissions . You can ' t specify a destination security group and a CIDR IP address range .
( dict ) - - Describes a security group rule .
IpProtocol ( string ) - - The IP protocol name ( tcp , udp , icmp ) or number ( see Protocol Numbers ) .
[ EC2 - VPC only ] Use - 1 to specify all protocols . When authorizing security group rules , specifying - 1 or a protocol number other than tcp , udp , icmp , or 58 ( ICMPv6 ) allows traffic on all ports , regardless of any port range you specify . For tcp , udp , and icmp , you must specify a port range . For 58 ( ICMPv6 ) , you can optionally specify a port range ; if you don ' t , traffic for all types and codes is allowed when authorizing rules .
FromPort ( integer ) - - The start of port range for the TCP and UDP protocols , or an ICMP / ICMPv6 type number . A value of - 1 indicates all ICMP / ICMPv6 types .
ToPort ( integer ) - - The end of port range for the TCP and UDP protocols , or an ICMP / ICMPv6 code . A value of - 1 indicates all ICMP / ICMPv6 codes for the specified ICMP type .
UserIdGroupPairs ( list ) - - One or more security group and AWS account ID pairs .
( dict ) - - Describes a security group and AWS account ID pair .
UserId ( string ) - - The ID of an AWS account . For a referenced security group in another VPC , the account ID of the referenced security group is returned .
[ EC2 - Classic ] Required when adding or removing rules that reference a security group in another AWS account .
GroupName ( string ) - - The name of the security group . In a request , use this parameter for a security group in EC2 - Classic or a default VPC only . For a security group in a nondefault VPC , use the security group ID .
GroupId ( string ) - - The ID of the security group .
VpcId ( string ) - - The ID of the VPC for the referenced security group , if applicable .
VpcPeeringConnectionId ( string ) - - The ID of the VPC peering connection , if applicable .
PeeringStatus ( string ) - - The status of a VPC peering connection , if applicable .
IpRanges ( list ) - - One or more IPv4 ranges .
( dict ) - - Describes an IPv4 range .
CidrIp ( string ) - - The IPv4 CIDR range . You can either specify a CIDR range or a source security group , not both . To specify a single IPv4 address , use the / 32 prefix .
Ipv6Ranges ( list ) - - [ EC2 - VPC only ] One or more IPv6 ranges .
( dict ) - - [ EC2 - VPC only ] Describes an IPv6 range .
CidrIpv6 ( string ) - - The IPv6 CIDR range . You can either specify a CIDR range or a source security group , not both . To specify a single IPv6 address , use the / 128 prefix .
PrefixListIds ( list ) - - ( Valid for AuthorizeSecurityGroupEgress , RevokeSecurityGroupEgress and DescribeSecurityGroups only ) One or more prefix list IDs for an AWS service . In an AuthorizeSecurityGroupEgress request , this is the AWS service that you want to access through a VPC endpoint from instances associated with the security group .
( dict ) - - The ID of the prefix .
PrefixListId ( string ) - - The ID of the prefix ."""
|
pass
|
def from_filename ( self , filename ) :
'''Build an IntentSchema from a file path
creates a new intent schema if the file does not exist , throws an error if the file
exists but cannot be loaded as a JSON'''
|
if os . path . exists ( filename ) :
with open ( filename ) as fp :
return IntentSchema ( json . load ( fp , object_pairs_hook = OrderedDict ) )
else :
print ( 'File does not exist' )
return IntentSchema ( )
|
async def make_request ( self , service : str , method : str , path : str , body : bytes = None , query : str = None , headers : dict = None , correlation_id : str = None , content_type : str = None , timeout : int = 30 , ** kwargs ) -> webtypes . Response :
"""Method for actually making a request
: param service : service to make request too
: param method : HTTP method : GET / PUT / POST etc .
: param path : routing path .
Should support dots ` foo . 2 . bars ` or slashes ` foo / 2 / bars `
: param body : request body . Bytes - like object
: param query : query string . Example : ` foo = bar & cabbage = green `
: param headers : Dictionary of headers
: param correlation _ id :
: param content _ type : example : ` application / json `
: param timeout : time to wait for response in seconds before getting
an asyncio . TimeoutError
: param kwargs : Should except * * kwargs for compatability for
other possible options on other transports
( for example , http might need a ` port ` option )
: return :"""
| |
def format_time ( timestamp , precision = datetime . timedelta ( seconds = 1 ) ) :
'''Formats timedelta / datetime / seconds
> > > format _ time ( ' 1 ' )
'0:00:01'
> > > format _ time ( 1.234)
'0:00:01'
> > > format _ time ( 1)
'0:00:01'
> > > format _ time ( datetime . datetime ( 2000 , 1 , 2 , 3 , 4 , 5 , 6 ) )
'2000-01-02 03:04:05'
> > > format _ time ( datetime . date ( 2000 , 1 , 2 ) )
'2000-01-02'
> > > format _ time ( datetime . timedelta ( seconds = 3661 ) )
'1:01:01'
> > > format _ time ( None )
> > > format _ time ( format _ time ) # doctest : + ELLIPSIS
Traceback ( most recent call last ) :
TypeError : Unknown type . . .'''
|
precision_seconds = precision . total_seconds ( )
if isinstance ( timestamp , six . string_types + six . integer_types + ( float , ) ) :
try :
castfunc = six . integer_types [ - 1 ]
timestamp = datetime . timedelta ( seconds = castfunc ( timestamp ) )
except OverflowError : # pragma : no cover
timestamp = None
if isinstance ( timestamp , datetime . timedelta ) :
seconds = timestamp . total_seconds ( )
# Truncate the number to the given precision
seconds = seconds - ( seconds % precision_seconds )
return str ( datetime . timedelta ( seconds = seconds ) )
elif isinstance ( timestamp , datetime . datetime ) : # Python 2 doesn ' t have the timestamp method
if hasattr ( timestamp , 'timestamp' ) : # pragma : no cover
seconds = timestamp . timestamp ( )
else :
seconds = timedelta_to_seconds ( timestamp - epoch )
# Truncate the number to the given precision
seconds = seconds - ( seconds % precision_seconds )
try : # pragma : no cover
if six . PY3 :
dt = datetime . datetime . fromtimestamp ( seconds )
else :
dt = datetime . datetime . utcfromtimestamp ( seconds )
except ValueError : # pragma : no cover
dt = datetime . datetime . max
return str ( dt )
elif isinstance ( timestamp , datetime . date ) :
return str ( timestamp )
elif timestamp is None :
return '--:--:--'
else :
raise TypeError ( 'Unknown type %s: %r' % ( type ( timestamp ) , timestamp ) )
|
def allowed_image ( self , module_id ) :
"""Given a module id , determine whether the image is allowed to be built ."""
|
shutit_global . shutit_global_object . yield_to_draw ( )
self . log ( "In allowed_image: " + module_id , level = logging . DEBUG )
cfg = self . cfg
if self . build [ 'ignoreimage' ] :
self . log ( "ignoreimage == true, returning true" + module_id , level = logging . DEBUG )
return True
self . log ( str ( cfg [ module_id ] [ 'shutit.core.module.allowed_images' ] ) , level = logging . DEBUG )
if cfg [ module_id ] [ 'shutit.core.module.allowed_images' ] : # Try allowed images as regexps
for regexp in cfg [ module_id ] [ 'shutit.core.module.allowed_images' ] :
if not shutit_util . check_regexp ( regexp ) :
self . fail ( 'Illegal regexp found in allowed_images: ' + regexp )
# pragma : no cover
if re . match ( '^' + regexp + '$' , self . target [ 'docker_image' ] ) :
return True
return False
|
def send_to_websocket ( self , data ) :
"""Send ( data ) directly to the websocket ."""
|
data = json . dumps ( data )
self . websocket . send ( data )
|
def get_all_users ( self , nid = None ) :
"""Get a listing of data for each user in a network ` nid `
: type nid : str
: param nid : This is the ID of the network to get users
from . This is optional and only to override the existing
` network _ id ` entered when created the class
: returns : Python object containing returned data , a list
of dicts containing user data ."""
|
r = self . request ( method = "network.get_all_users" , nid = nid )
return self . _handle_error ( r , "Could not get users." )
|
def configureLogger ( logFolder , logFile ) :
'''Start the logger instance and configure it'''
|
# Set debug level
logLevel = 'DEBUG'
logger = logging . getLogger ( )
logger . setLevel ( logLevel )
# Format
formatter = logging . Formatter ( '%(asctime)s - %(levelname)s | %(name)s -> %(message)s' , '%Y-%m-%d %H:%M:%S' )
# Remove default handler to keep only clean one
for hdlr in logger . handlers :
logger . removeHandler ( hdlr )
# Create missing folder if needed
if not os . path . exists ( logFolder ) :
os . makedirs ( logFolder , 0700 )
# CREATE CONSOLE HANDLER
# Create console handler
consoleh = logging . StreamHandler ( )
consoleh . setLevel ( logLevel )
consoleh . setFormatter ( formatter )
# Set our custom handler
logger . addHandler ( consoleh )
# CREATE FILE HANDLER
fileh = logging . FileHandler ( logFile , 'a' )
fileh . setLevel ( logLevel )
fileh . setFormatter ( formatter )
# Set our custom handler
logger . addHandler ( fileh )
|
def fixup_msg ( lvl , msg ) :
"""Fixup for this ERROR to a WARNING because it has a reasonable fallback .
WARNING : ROOT . TGClient . TGClient ] can ' t open display " localhost : 10.0 " , switching to batch mode . . .
In case you run from a remote ssh session , reconnect with ssh - Y"""
|
if "switching to batch mode..." in msg and lvl == logging . ERROR :
return logging . WARNING , msg
return lvl , msg
|
def diagnostic_send ( self , diagFl1 , diagFl2 , diagFl3 , diagSh1 , diagSh2 , diagSh3 , force_mavlink1 = False ) :
'''Configurable diagnostic messages .
diagFl1 : Diagnostic float 1 ( float )
diagFl2 : Diagnostic float 2 ( float )
diagFl3 : Diagnostic float 3 ( float )
diagSh1 : Diagnostic short 1 ( int16 _ t )
diagSh2 : Diagnostic short 2 ( int16 _ t )
diagSh3 : Diagnostic short 3 ( int16 _ t )'''
|
return self . send ( self . diagnostic_encode ( diagFl1 , diagFl2 , diagFl3 , diagSh1 , diagSh2 , diagSh3 ) , force_mavlink1 = force_mavlink1 )
|
def regular_subset ( spikes , n_spikes_max = None , offset = 0 ) :
"""Prune the current selection to get at most n _ spikes _ max spikes ."""
|
assert spikes is not None
# Nothing to do if the selection already satisfies n _ spikes _ max .
if n_spikes_max is None or len ( spikes ) <= n_spikes_max : # pragma : no cover
return spikes
step = math . ceil ( np . clip ( 1. / n_spikes_max * len ( spikes ) , 1 , len ( spikes ) ) )
step = int ( step )
# Note : randomly - changing selections are confusing . . .
my_spikes = spikes [ offset : : step ] [ : n_spikes_max ]
assert len ( my_spikes ) <= len ( spikes )
assert len ( my_spikes ) <= n_spikes_max
return my_spikes
|
def calls ( ctx , obj , limit ) :
"""List call / short positions of an account or an asset"""
|
if obj . upper ( ) == obj : # Asset
from bitshares . asset import Asset
asset = Asset ( obj , full = True )
calls = asset . get_call_orders ( limit )
t = [ [ "acount" , "debt" , "collateral" , "call price" , "ratio" ] ]
for call in calls :
t . append ( [ str ( call [ "account" ] [ "name" ] ) , str ( call [ "debt" ] ) , str ( call [ "collateral" ] ) , str ( call [ "call_price" ] ) , "%.2f" % ( call [ "ratio" ] ) , ] )
print_table ( t )
else : # Account
from bitshares . dex import Dex
dex = Dex ( bitshares_instance = ctx . bitshares )
calls = dex . list_debt_positions ( account = obj )
t = [ [ "debt" , "collateral" , "call price" , "ratio" ] ]
for symbol in calls :
t . append ( [ str ( calls [ symbol ] [ "debt" ] ) , str ( calls [ symbol ] [ "collateral" ] ) , str ( calls [ symbol ] [ "call_price" ] ) , "%.2f" % ( calls [ symbol ] [ "ratio" ] ) , ] )
print_table ( t )
|
def dirsize_get ( l_filesWithoutPath , ** kwargs ) :
"""Sample callback that determines a directory size ."""
|
str_path = ""
for k , v in kwargs . items ( ) :
if k == 'path' :
str_path = v
d_ret = { }
l_size = [ ]
size = 0
for f in l_filesWithoutPath :
str_f = '%s/%s' % ( str_path , f )
if not os . path . islink ( str_f ) :
try :
size += os . path . getsize ( str_f )
except :
pass
str_size = pftree . sizeof_fmt ( size )
return { 'status' : True , 'diskUsage_raw' : size , 'diskUsage_human' : str_size }
|
def _call_numpy ( self , x ) :
"""Return ` ` self ( x ) ` ` using numpy .
Parameters
x : ` numpy . ndarray `
Input array to be transformed
Returns
out : ` numpy . ndarray `
Result of the transform"""
|
if self . halfcomplex :
return np . fft . irfftn ( x , axes = self . axes )
else :
if self . sign == '+' :
return np . fft . ifftn ( x , axes = self . axes )
else :
return ( np . fft . fftn ( x , axes = self . axes ) / np . prod ( np . take ( self . domain . shape , self . axes ) ) )
|
def redis_from_url ( url ) :
"""Converts a redis URL used by celery into a ` redis . Redis ` object ."""
|
# Makes sure that we only try to import redis when we need
# to use it
import redis
url = url or ""
parsed_url = urlparse ( url )
if parsed_url . scheme != "redis" :
return None
kwargs = { }
match = PASS_HOST_PORT . match ( parsed_url . netloc )
if match . group ( 'password' ) is not None :
kwargs [ 'password' ] = match . group ( 'password' )
if match . group ( 'host' ) is not None :
kwargs [ 'host' ] = match . group ( 'host' )
if match . group ( 'port' ) is not None :
kwargs [ 'port' ] = int ( match . group ( 'port' ) )
if len ( parsed_url . path ) > 1 : # Removes " / " from the beginning
kwargs [ 'db' ] = int ( parsed_url . path [ 1 : ] )
return redis . StrictRedis ( ** kwargs )
|
def _compile_tag_re ( self ) :
"""Compile regex strings from device _ tag _ re option and return list of compiled regex / tag pairs"""
|
device_tag_list = [ ]
for regex_str , tags in iteritems ( self . _device_tag_re ) :
try :
device_tag_list . append ( [ re . compile ( regex_str , IGNORE_CASE ) , [ t . strip ( ) for t in tags . split ( ',' ) ] ] )
except TypeError :
self . log . warning ( '{} is not a valid regular expression and will be ignored' . format ( regex_str ) )
self . _device_tag_re = device_tag_list
|
def valid ( self , name ) :
"""Ensure a variable name is valid .
Note : Assumes variable names are ASCII , which isn ' t necessarily true in
Python 3.
Args :
name : A proposed variable name .
Returns :
A valid version of the name ."""
|
name = re . sub ( '[^0-9a-zA-Z_]' , '' , name )
if re . match ( '[0-9]' , name ) :
name = '_' + name
return name
|
def autoconf ( self ) :
"""Implements Munin Plugin Auto - Configuration Option .
@ return : True if plugin can be auto - configured , False otherwise ."""
|
fpminfo = PHPfpmInfo ( self . _host , self . _port , self . _user , self . _password , self . _monpath , self . _ssl )
return fpminfo is not None
|
def gen_client_id ( ) :
"""Generates random client ID
: return :"""
|
import random
gen_id = 'hbmqtt/'
for i in range ( 7 , 23 ) :
gen_id += chr ( random . randint ( 0 , 74 ) + 48 )
return gen_id
|
def remove_module_load ( state_dict ) :
"""create new OrderedDict that does not contain ` module . `"""
|
new_state_dict = OrderedDict ( )
for k , v in state_dict . items ( ) :
new_state_dict [ k [ 7 : ] ] = v
return new_state_dict
|
def CreateInstance ( r , mode , name , disk_template , disks , nics , ** kwargs ) :
"""Creates a new instance .
More details for parameters can be found in the RAPI documentation .
@ type mode : string
@ param mode : Instance creation mode
@ type name : string
@ param name : Hostname of the instance to create
@ type disk _ template : string
@ param disk _ template : Disk template for instance ( e . g . plain , diskless ,
file , or drbd )
@ type disks : list of dicts
@ param disks : List of disk definitions
@ type nics : list of dicts
@ param nics : List of NIC definitions
@ type dry _ run : bool
@ keyword dry _ run : whether to perform a dry run
@ type no _ install : bool
@ keyword no _ install : whether to create without installing OS ( true = don ' t install )
@ rtype : int
@ return : job id"""
|
if INST_CREATE_REQV1 not in r . features :
raise GanetiApiError ( "Cannot create Ganeti 2.1-style instances" )
query = { }
if kwargs . get ( "dry_run" ) :
query [ "dry-run" ] = 1
if kwargs . get ( "no_install" ) :
query [ "no-install" ] = 1
# Make a version 1 request .
body = { _REQ_DATA_VERSION_FIELD : 1 , "mode" : mode , "name" : name , "disk_template" : disk_template , "disks" : disks , "nics" : nics , }
conflicts = set ( kwargs . iterkeys ( ) ) & set ( body . iterkeys ( ) )
if conflicts :
raise GanetiApiError ( "Required fields can not be specified as" " keywords: %s" % ", " . join ( conflicts ) )
kwargs . pop ( "dry_run" , None )
body . update ( kwargs )
return r . request ( "post" , "/2/instances" , query = query , content = body )
|
def configure ( self , subscription_id , tenant_id , client_id = "" , client_secret = "" , environment = 'AzurePublicCloud' , mount_point = DEFAULT_MOUNT_POINT ) :
"""Configure the credentials required for the plugin to perform API calls to Azure .
These credentials will be used to query roles and create / delete service principals . Environment variables will
override any parameters set in the config .
Supported methods :
POST : / { mount _ point } / config . Produces : 204 ( empty body )
: param subscription _ id : The subscription id for the Azure Active Directory
: type subscription _ id : str | unicode
: param tenant _ id : The tenant id for the Azure Active Directory .
: type tenant _ id : str | unicode
: param client _ id : The OAuth2 client id to connect to Azure .
: type client _ id : str | unicode
: param client _ secret : The OAuth2 client secret to connect to Azure .
: type client _ secret : str | unicode
: param environment : The Azure environment . If not specified , Vault will use Azure Public Cloud .
: type environment : str | unicode
: param mount _ point : The OAuth2 client secret to connect to Azure .
: type mount _ point : str | unicode
: return : The response of the request .
: rtype : requests . Response"""
|
if environment not in VALID_ENVIRONMENTS :
error_msg = 'invalid environment argument provided "{arg}", supported environments: "{environments}"'
raise exceptions . ParamValidationError ( error_msg . format ( arg = environment , environments = ',' . join ( VALID_ENVIRONMENTS ) , ) )
params = { 'subscription_id' : subscription_id , 'tenant_id' : tenant_id , 'client_id' : client_id , 'client_secret' : client_secret , 'environment' : environment , }
api_path = '/v1/{mount_point}/config' . format ( mount_point = mount_point )
return self . _adapter . post ( url = api_path , json = params , )
|
def get_stp_mst_detail_output_msti_port_oper_bpdu_guard ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_stp_mst_detail = ET . Element ( "get_stp_mst_detail" )
config = get_stp_mst_detail
output = ET . SubElement ( get_stp_mst_detail , "output" )
msti = ET . SubElement ( output , "msti" )
instance_id_key = ET . SubElement ( msti , "instance-id" )
instance_id_key . text = kwargs . pop ( 'instance_id' )
port = ET . SubElement ( msti , "port" )
oper_bpdu_guard = ET . SubElement ( port , "oper-bpdu-guard" )
oper_bpdu_guard . text = kwargs . pop ( 'oper_bpdu_guard' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _get_batch_representative ( items , key ) :
"""Retrieve a representative data item from a batch .
Handles standard bcbio cases ( a single data item ) and CWL cases with
batches that have a consistent variant file ."""
|
if isinstance ( items , dict ) :
return items , items
else :
vals = set ( [ ] )
out = [ ]
for data in items :
if key in data :
vals . add ( data [ key ] )
out . append ( data )
if len ( vals ) != 1 :
raise ValueError ( "Incorrect values for %s: %s" % ( key , list ( vals ) ) )
return out [ 0 ] , items
|
def write_can_msg ( self , channel , can_msg ) :
"""Transmits one ore more CAN messages through the specified CAN channel of the device .
: param int channel :
CAN channel , which is to be used ( : data : ` Channel . CHANNEL _ CH0 ` or : data : ` Channel . CHANNEL _ CH1 ` ) .
: param list ( CanMsg ) can _ msg : List of CAN message structure ( see structure : class : ` CanMsg ` ) .
: return : The number of successfully transmitted CAN messages .
: rtype : int"""
|
c_can_msg = ( CanMsg * len ( can_msg ) ) ( * can_msg )
c_count = DWORD ( len ( can_msg ) )
UcanWriteCanMsgEx ( self . _handle , channel , c_can_msg , c_count )
return c_count
|
def list_templates ( self , offset = 0 , count = 20 ) :
"""获取本账号内所有模板
详情请参考
https : / / open . weixin . qq . com / cgi - bin / showdocument ? action = dir _ list & id = open1500465446 _ j4CgR
: param offset : 用于分页 , 表示起始量 , 最小值为0
: type offset : int
: param count : 用于分页 , 表示拉取数量 , 最大值为20
: type count : int
: return : 模板列表
: rtype : list [ dict ]"""
|
return self . _post ( 'cgi-bin/wxopen/template/list' , data = { 'offset' : offset , 'count' : count , } , result_processor = lambda x : x [ 'list' ] , )
|
def delete ( cls , ** kwargs ) :
'''If a record matching the instance id exists in the database , delete it .'''
|
q = cls . _get_instance ( ** kwargs )
if q :
_action_and_commit ( q , session . delete )
|
def _comm ( self , thermostat = False , kp = 0.06 , ki = 0.0075 , kd = 0.01 , heater_segments = 8 , ext_sw_heater_drive = False , update_data_event = None ) :
"""Do not call this directly - call auto _ connect ( ) , which will spawn
comm ( ) for you .
This is the main communications loop to the roaster .
whenever a valid packet is received from the device , if an
update _ data _ event is available , it will be signalled .
Args :
thermostat ( bool ) : thermostat mode .
if set to True , turns on thermostat mode . In thermostat
mode , freshroastsr700 takes control of heat _ setting and does
software PID control to hit the demanded target _ temp .
ext _ sw _ heater _ drive ( bool ) : enable direct control over the internal
heat _ controller object . Defaults to False . When set to True , the
thermostat field is IGNORED , and assumed to be False . Direct
control over the software heater _ level means that the
PID controller cannot control the heater . Since thermostat and
ext _ sw _ heater _ drive cannot be allowed to both be True , this arg
is given precedence over the thermostat arg .
kp ( float ) : Kp value to use for PID control . Defaults to 0.06.
ki ( float ) : Ki value to use for PID control . Defaults to 0.0075.
kd ( float ) : Kd value to use for PID control . Defaults to 0.01.
heater _ segments ( int ) : the pseudo - control range for the internal
heat _ controller object . Defaults to 8.
update _ data _ event ( multiprocessing . Event ) : If set , allows the
comm _ process to signal to the parent process that new device data
is available .
Returns :
nothing"""
|
# since this process is started with daemon = True , it should exit
# when the owning process terminates . Therefore , safe to loop forever .
while not self . _teardown . value : # waiting for command to attempt connect
# print ( " waiting for command to attempt connect " )
while self . _attempting_connect . value == self . CA_NONE :
time . sleep ( 0.25 )
if self . _teardown . value :
break
# if we ' re tearing down , bail now .
if self . _teardown . value :
break
# we got the command to attempt to connect
# change state to ' attempting _ connect '
self . _connect_state . value = self . CS_ATTEMPTING_CONNECT
# attempt connection
if self . CA_AUTO == self . _attempting_connect . value : # this call will block until a connection is achieved
# it will also set _ connect _ state to CS _ CONNECTING
# if appropriate
if self . _auto_connect ( ) : # when we unblock , it is an indication of a successful
# connection
self . _connected . value = 1
self . _connect_state . value = self . CS_CONNECTED
else : # failure , normally due to a timeout
self . _connected . value = 0
self . _connect_state . value = self . CS_NOT_CONNECTED
# we failed to connect - start over from the top
# reset flag
self . _attempting_connect . value = self . CA_NONE
continue
elif self . CA_SINGLE_SHOT == self . _attempting_connect . value : # try once , now , if failure , start teh big loop over
try :
self . _connect ( )
self . _connected . value = 1
self . _connect_state . value = self . CS_CONNECTED
except exceptions . RoasterLookupError :
self . _connected . value = 0
self . _connect_state . value = self . CS_NOT_CONNECTED
if self . _connect_state . value != self . CS_CONNECTED : # we failed to connect - start over from the top
# reset flag
self . _attempting_connect . value = self . CA_NONE
continue
else : # shouldn ' t be here
# reset flag
self . _attempting_connect . value = self . CA_NONE
continue
# We are connected !
# print ( " We are connected ! " )
# reset flag right away
self . _attempting_connect . value = self . CA_NONE
# Initialize PID controller if thermostat function was specified at
# init time
pidc = None
heater = None
if ( thermostat ) :
pidc = pid . PID ( kp , ki , kd , Output_max = heater_segments , Output_min = 0 )
if thermostat or ext_sw_heater_drive :
heater = heat_controller ( number_of_segments = heater_segments )
read_state = self . LOOKING_FOR_HEADER_1
r = [ ]
write_errors = 0
read_errors = 0
while not self . _disconnect . value :
start = datetime . datetime . now ( )
# write to device
if not self . _write_to_device ( ) :
logging . error ( 'comm - _write_to_device() failed!' )
write_errors += 1
if write_errors > 3 : # it ' s time to consider the device as being " gone "
logging . error ( 'comm - 3 successive write ' 'failures, disconnecting.' )
self . _disconnect . value = 1
continue
else : # reset write _ errors
write_errors = 0
# read from device
try :
while self . _ser . in_waiting :
_byte = self . _ser . read ( 1 )
read_state , r , err = ( self . _process_reponse_byte ( read_state , _byte , r , update_data_event ) )
except IOError : # typically happens when device is suddenly unplugged
logging . error ( 'comm - read from device failed!' )
read_errors += 1
if write_errors > 3 : # it ' s time to consider the device as being " gone "
logging . error ( 'comm - 3 successive read ' 'failures, disconnecting.' )
self . _disconnect . value = 1
continue
else :
read_errors = 0
# next , drive SW heater when using
# thermostat mode ( PID controller calcs )
# or in external sw heater drive mode ,
# when roasting .
if thermostat or ext_sw_heater_drive :
if 'roasting' == self . get_roaster_state ( ) :
if heater . about_to_rollover ( ) : # it ' s time to use the PID controller value
# and set new output level on heater !
if ext_sw_heater_drive : # read user - supplied value
heater . heat_level = self . _heater_level . value
else : # thermostat
output = pidc . update ( self . current_temp , self . target_temp )
heater . heat_level = output
# make this number visible to other processes . . .
self . _heater_level . value = heater . heat_level
# read bang - bang heater output array element & apply it
if heater . generate_bangbang_output ( ) : # ON
self . heat_setting = 3
else : # OFF
self . heat_setting = 0
else : # for all other states , heat _ level = OFF
heater . heat_level = 0
# make this number visible to other processes . . .
self . _heater_level . value = heater . heat_level
self . heat_setting = 0
# calculate sleep time to stick to 0.25sec period
comp_time = datetime . datetime . now ( ) - start
sleep_duration = 0.25 - comp_time . total_seconds ( )
if sleep_duration > 0 :
time . sleep ( sleep_duration )
self . _ser . close ( )
# reset disconnect flag
self . _disconnect . value = 0
# reset connection values
self . _connected . value = 0
self . _connect_state . value = self . CS_NOT_CONNECTED
|
def read_uint16 ( self , little_endian = True ) :
"""Read 2 byte as an unsigned integer value from the stream .
Args :
little _ endian ( bool ) : specify the endianness . ( Default ) Little endian .
Returns :
int :"""
|
if little_endian :
endian = "<"
else :
endian = ">"
return self . unpack ( '%sH' % endian , 2 )
|
def to_ds9 ( self , free = 'box' , fixed = 'cross' , frame = 'fk5' , color = 'green' , header = True ) :
"""Returns a list of ds9 region definitions
Parameters
free : bool
one of the supported ds9 point symbols , used for free sources , see here : http : / / ds9 . si . edu / doc / ref / region . html
fixed : bool
as free but for fixed sources
frame : str
typically fk5 , more to be implemented
color : str
color used for symbols ( only ds9 compatible colors )
header : bool
if True , will prepend a global header line .
Returns
lines : list
list of regions ( and header if requested )"""
|
# todo : add support for extended sources ? !
allowed_symbols = [ 'circle' , 'box' , 'diamond' , 'cross' , 'x' , 'arrow' , 'boxcircle' ]
# adding some checks .
assert free in allowed_symbols , "symbol %s not supported" % free
assert fixed in allowed_symbols , "symbol %s not supported" % fixed
lines = [ ]
if header :
lines . append ( "global color=%s" % color )
for src in self . get_sources ( ) : # self . get _ sources will return both Source , but also IsoSource and MapCube , in which case the sources
# should be ignored ( since they are by construction all - sky and have no corresponding ds9 region string )
if not isinstance ( src , Source ) :
continue
# otherwise get ra , dec
ra , dec = src . radec
line = "%s; point( %1.5f, %1.5f) # point=%s text={%s} color=%s" % ( frame , ra , dec , free if src . is_free else fixed , src . name , color )
lines . append ( line )
return lines
|
def get_download_url ( self , instance , default = None ) :
"""Calculate the download url"""
|
download = default
# calculate the download url
download = "{url}/@@download/{fieldname}/{filename}" . format ( url = api . get_url ( instance ) , fieldname = self . get_field_name ( ) , filename = self . get_filename ( instance ) , )
return download
|
def do_dictsort ( value , case_sensitive = False , by = 'key' , reverse = False ) :
"""Sort a dict and yield ( key , value ) pairs . Because python dicts are
unsorted you may want to use this function to order them by either
key or value :
. . sourcecode : : jinja
{ % for item in mydict | dictsort % }
sort the dict by key , case insensitive
{ % for item in mydict | dictsort ( reverse = true ) % }
sort the dict by key , case insensitive , reverse order
{ % for item in mydict | dictsort ( true ) % }
sort the dict by key , case sensitive
{ % for item in mydict | dictsort ( false , ' value ' ) % }
sort the dict by value , case insensitive"""
|
if by == 'key' :
pos = 0
elif by == 'value' :
pos = 1
else :
raise FilterArgumentError ( 'You can only sort by either "key" or "value"' )
def sort_func ( item ) :
value = item [ pos ]
if not case_sensitive :
value = ignore_case ( value )
return value
return sorted ( value . items ( ) , key = sort_func , reverse = reverse )
|
def getMusicAlbumList ( self , tagtype = 0 , startnum = 0 , pagingrow = 100 , dummy = 51467 ) :
"""Get music album list .
: param tagtype : ?
: return : ` ` metadata ` ` or ` ` False ` `
: metadata :
- u ' album ' : u ' Greatest Hits Coldplay ' ,
- u ' artist ' : u ' Coldplay ' ,
- u ' href ' : u ' / Coldplay - Clocks . mp3 ' ,
- u ' musiccount ' : 1,
- u ' resourceno ' : 12459548378,
- u ' tagtype ' : 1,
- u ' thumbnailpath ' : u ' N ' ,
- u ' totalpath ' : u ' / '"""
|
data = { 'tagtype' : tagtype , 'startnum' : startnum , 'pagingrow' : pagingrow , 'userid' : self . user_id , 'useridx' : self . useridx , 'dummy' : dummy , }
s , metadata = self . POST ( 'getMusicAlbumList' , data )
if s is True :
return metadata
else :
return False
|
def delete ( self ) :
"""Del or Backspace pressed . Delete selection"""
|
with self . _qpart :
for cursor in self . cursors ( ) :
if cursor . hasSelection ( ) :
cursor . deleteChar ( )
|
def lemmatize ( self , input_text , return_raw = False , return_string = False ) :
"""Take incoming string or list of tokens . Lookup done against a
key - value list of lemmata - headword . If a string , tokenize with
` ` PunktLanguageVars ( ) ` ` . If a final period appears on a token , remove
it , then re - add once replacement done .
TODO : rm check for final period , change PunktLanguageVars ( ) to nltk _ tokenize _ words ( )"""
|
assert type ( input_text ) in [ list , str ] , logger . error ( 'Input must be a list or string.' )
if type ( input_text ) is str :
punkt = PunktLanguageVars ( )
tokens = punkt . word_tokenize ( input_text )
else :
tokens = input_text
lemmatized_tokens = [ ]
for token in tokens : # check for final period
final_period = False
if token [ - 1 ] == '.' :
final_period = True
token = token [ : - 1 ]
# look for token in lemma dict keys
if token . lower ( ) in self . lemmata . keys ( ) :
headword = self . lemmata [ token . lower ( ) ]
# re - add final period if rm ' d
if final_period :
headword += '.'
# append to return list
if not return_raw :
lemmatized_tokens . append ( headword )
else :
lemmatized_tokens . append ( token + '/' + headword )
# if token not found in lemma - headword list
else : # re - add final period if rm ' d
if final_period :
token += '.'
if not return_raw :
lemmatized_tokens . append ( token )
else :
lemmatized_tokens . append ( token + '/' + token )
if not return_string :
return lemmatized_tokens
elif return_string :
return ' ' . join ( lemmatized_tokens )
|
def full_split ( text , regex ) :
"""Split the text by the regex , keeping all parts .
The parts should re - join back into the original text .
> > > list ( full _ split ( ' word ' , re . compile ( ' & . * ? ' ) ) )
[ ' word ' ]"""
|
while text :
m = regex . search ( text )
if not m :
yield text
break
left = text [ : m . start ( ) ]
middle = text [ m . start ( ) : m . end ( ) ]
right = text [ m . end ( ) : ]
if left :
yield left
if middle :
yield middle
text = right
|
def printed_out ( self , name ) :
"""Create a string representation of the action"""
|
opt = self . variables ( ) . optional_namestring ( )
req = self . variables ( ) . required_namestring ( )
out = ''
out += '| |\n'
out += '| |---{}({}{})\n' . format ( name , req , opt )
if self . description :
out += '| | {}\n' . format ( self . description )
return out
|
def find_config_files ( self ) :
"""Find as many configuration files as should be processed for this
platform , and return a list of filenames in the order in which they
should be parsed . The filenames returned are guaranteed to exist
( modulo nasty race conditions ) .
There are three possible config files : distutils . cfg in the
Distutils installation directory ( ie . where the top - level
Distutils _ _ inst _ _ . py file lives ) , a file in the user ' s home
directory named . pydistutils . cfg on Unix and pydistutils . cfg
on Windows / Mac ; and setup . cfg in the current directory .
The file in the user ' s home directory can be disabled with the
- - no - user - cfg option ."""
|
files = [ ]
check_environ ( )
# Where to look for the system - wide Distutils config file
sys_dir = os . path . dirname ( sys . modules [ 'distutils' ] . __file__ )
# Look for the system config file
sys_file = os . path . join ( sys_dir , "distutils.cfg" )
if os . path . isfile ( sys_file ) :
files . append ( sys_file )
# What to call the per - user config file
if os . name == 'posix' :
user_filename = ".pydistutils.cfg"
else :
user_filename = "pydistutils.cfg"
# And look for the user config file
if self . want_user_cfg :
user_file = os . path . join ( os . path . expanduser ( '~' ) , user_filename )
if os . path . isfile ( user_file ) :
files . append ( user_file )
# All platforms support local setup . cfg
local_file = "setup.cfg"
if os . path . isfile ( local_file ) :
files . append ( local_file )
local_dev_file = "setup-dev.cfg"
if os . path . isfile ( local_dev_file ) :
files . append ( local_dev_file )
if DEBUG :
self . announce ( "using config files: %s" % ', ' . join ( files ) )
return files
|
def prog ( text ) :
"""Decorator used to specify the program name for the console script
help message .
: param text : The text to use for the program name ."""
|
def decorator ( func ) :
adaptor = ScriptAdaptor . _get_adaptor ( func )
adaptor . prog = text
return func
return decorator
|
def add_values_to_run_xml ( self , run ) :
"""This function adds the result values to the XML representation of a run ."""
|
runElem = run . xml
for elem in list ( runElem ) :
runElem . remove ( elem )
self . add_column_to_xml ( runElem , 'status' , run . status )
self . add_column_to_xml ( runElem , 'cputime' , run . cputime )
self . add_column_to_xml ( runElem , 'walltime' , run . walltime )
self . add_column_to_xml ( runElem , '@category' , run . category )
# hidden
self . add_column_to_xml ( runElem , '' , run . values )
for column in run . columns :
self . add_column_to_xml ( runElem , column . title , column . value )
# Sort child elements by hidden and title attributes
runElem [ : ] = sorted ( runElem , key = lambda elem : ( elem . get ( 'hidden' , '' ) , elem . get ( 'title' ) ) )
|
def __is_video_app_launch_directive_present ( self ) : # type : ( ) - > bool
"""Checks if the video app launch directive is present or not .
: return : boolean to show if video app launch directive is
present or not .
: rtype : bool"""
|
if self . response . directives is None :
return False
for directive in self . response . directives :
if ( directive is not None and directive . object_type == "VideoApp.Launch" ) :
return True
return False
|
def get_declared_fields ( bases , attrs ) :
"""Find all fields and return them as a dictionary .
note : : this function is copied and modified
from django . forms . get _ declared _ fields"""
|
def is_field ( prop ) :
return isinstance ( prop , forms . Field ) or isinstance ( prop , BaseRepresentation )
fields = [ ( field_name , attrs . pop ( field_name ) ) for field_name , obj in attrs . items ( ) if is_field ( obj ) ]
# add fields from base classes :
for base in bases [ : : - 1 ] :
if hasattr ( base , 'base_fields' ) :
fields = base . base_fields . items ( ) + fields
return dict ( fields )
|
def _check_rename_constraints ( self , old_key , new_key ) :
"""Check the rename constraints , and return whether or not the rename
can proceed .
If the new key is already present , that is an error .
If the old key is absent , we debug log and return False , assuming it ' s
a temp table being renamed .
: param _ ReferenceKey old _ key : The existing key , to rename from .
: param _ ReferenceKey new _ key : The new key , to rename to .
: return bool : If the old relation exists for renaming .
: raises InternalError : If the new key is already present ."""
|
if new_key in self . relations :
dbt . exceptions . raise_cache_inconsistent ( 'in rename, new key {} already in cache: {}' . format ( new_key , list ( self . relations . keys ( ) ) ) )
if old_key not in self . relations :
logger . debug ( 'old key {} not found in self.relations, assuming temporary' . format ( old_key ) )
return False
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.