signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def send_raw_email ( self , raw_message , source = None , destinations = None ) :
"""Sends an email message , with header and content specified by the
client . The SendRawEmail action is useful for sending multipart MIME
emails , with attachments or inline content . The raw text of the message
must comply with Internet email standards ; otherwise , the message
cannot be sent .
: type source : string
: param source : The sender ' s email address . Amazon ' s docs say :
If you specify the Source parameter , then bounce notifications and
complaints will be sent to this email address . This takes precedence
over any Return - Path header that you might include in the raw text of
the message .
: type raw _ message : string
: param raw _ message : The raw text of the message . The client is
responsible for ensuring the following :
- Message must contain a header and a body , separated by a blank line .
- All required header fields must be present .
- Each part of a multipart MIME message must be formatted properly .
- MIME content types must be among those supported by Amazon SES .
Refer to the Amazon SES Developer Guide for more details .
- Content must be base64 - encoded , if MIME requires it .
: type destinations : list of strings or string
: param destinations : A list of destinations for the message ."""
|
params = { 'RawMessage.Data' : base64 . b64encode ( raw_message ) , }
if source :
params [ 'Source' ] = source
if destinations :
self . _build_list_params ( params , destinations , 'Destinations.member' )
return self . _make_request ( 'SendRawEmail' , params )
|
def witness_tx ( tx_ins , tx_outs , tx_witnesses , ** kwargs ) :
'''Construct a fully - signed segwit transaction
Args :
tx _ ins list ( TxIn instances ) : list of transaction inputs
tx _ outs list ( TxOut instances ) : list of transaction outputs
tx _ witnesses list ( TxWitness instances ) : list of transaction witnsses
* * kwargs :
version ( int ) : transaction version number
locktime ( hex ) : transaction locktime
Returns :
( Tx instance ) : signed transaction with witnesses'''
|
# Parse legacy scripts AND witness scripts for OP _ CLTV
deser = [ script_ser . deserialize ( tx_in . redeem_script ) for tx_in in tx_ins if tx_in is not None ]
for w in tx_witnesses :
try :
deser . append ( script_ser . deserialize ( w . stack [ - 1 ] . item ) )
except ( NotImplementedError , ValueError ) :
pass
version = max ( [ guess_version ( d ) for d in deser ] )
if 'lock_time' in kwargs :
lock_time = kwargs [ 'lock_time' ]
else :
lock_time = max ( [ guess_locktime ( d ) for d in deser ] )
return tb . make_tx ( version = version , tx_ins = tx_ins , tx_outs = tx_outs , lock_time = lock_time , tx_witnesses = tx_witnesses )
|
def t_octalValue ( t ) :
r'[ + - ] ? 0[0-9 ] +'
|
# We must match [ 0-9 ] , and then check the validity of the octal number .
# If we match [ 0-7 ] , the invalid octal number " 08 " would match
# ' decimalValue ' 0 and ' decimalValue ' 8.
if re . search ( r'[8-9]' , t . value ) is not None :
msg = _format ( "Invalid octal number {0!A}" , t . value )
t . lexer . last_msg = msg
t . type = 'error'
# Setting error causes the value to be automatically skipped
else :
t . value = int ( t . value , 8 )
return t
|
def subdomains_init ( blockstack_opts , working_dir , atlas_state ) :
"""Set up subdomain state
Returns a SubdomainIndex object that has been successfully connected to Atlas"""
|
if not is_subdomains_enabled ( blockstack_opts ) :
return None
subdomain_state = SubdomainIndex ( blockstack_opts [ 'subdomaindb_path' ] , blockstack_opts = blockstack_opts )
atlas_node_add_callback ( atlas_state , 'store_zonefile' , subdomain_state . enqueue_zonefile )
return subdomain_state
|
def get_under_hollow ( self ) :
"""Return HCP if an atom is present below the adsorbate in the
subsurface layer and FCC if not"""
|
C0 = self . B [ - 1 : ] * ( 3 , 3 , 1 )
ads_pos = C0 . positions [ 4 ]
C = self . get_subsurface_layer ( ) * ( 3 , 3 , 1 )
ret = 'FCC'
if np . any ( [ np . linalg . norm ( ads_pos [ : 2 ] - ele . position [ : 2 ] ) < 0.5 * cradii [ ele . number ] for ele in C ] ) :
ret = 'HCP'
return ret
|
def get_error ( self , error ) :
"""A helper function , gets standard information from the error ."""
|
error_type = type ( error )
if error . error_type == ET_CLIENT :
error_type_name = 'Client'
else :
error_type_name = 'Server'
return { 'type' : error_type_name , 'name' : error_type . __name__ , 'prefix' : getattr ( error_type , '__module__' , '' ) , 'message' : unicode ( error ) , 'params' : error . args , }
|
def dumps ( self ) :
"""Return path command representation ."""
|
ret_str = self . path_type
if self . options is not None :
ret_str += self . options . dumps ( )
return ret_str
|
def on_connect ( self , connection ) :
"Called when the stream connects"
|
self . _stream = connection . _reader
self . _buffer = SocketBuffer ( self . _stream , self . _read_size )
if connection . decode_responses :
self . encoding = connection . encoding
|
def cat_colors ( N : int = 1 , * , hue : str = None , luminosity : str = None , bgvalue : int = None , loop : bool = False , seed : str = "cat" ) -> Union [ List [ Any ] , colors . LinearSegmentedColormap ] :
"""Return a colormap suitable for N categorical values , optimized to be both aesthetically pleasing and perceptually distinct .
Args :
NThe number of colors requested .
hueControls the hue of the generated color . You can pass a string representing a color name : " red " , " orange " , " yellow " , " green " , " blue " , " purple " , " pink " and " monochrome " are currently supported . If you pass a hexidecimal color string such as " # 00FFFF " , its hue value will be used to generate colors .
luminosityControls the luminosity of the generated color : " bright " , " light " or " dark " .
bgvalueIf not None , then the corresponding index color will be set to light gray
loopIf True , loop the color alphabet instead of generating random colors
seedIf not None , use as the random seed ( default : " cat " )
Returns :
A set of colors in the requested format , either a list of values or a matplotlib LinearSegmentedColormap ( when format = " cmap " )
If N < = 25 and hue and luminosity are both None , a subset of the optimally perceptually distinct " color alphabet " is returned .
Else , a pleasing set of random colors is returned .
Colors are designed to be displayed on a white background ."""
|
c : List [ str ] = [ ]
if N <= 25 and hue is None and luminosity is None :
c = _color_alphabet [ : N ]
elif not loop :
c = RandomColor ( seed = seed ) . generate ( count = N , hue = hue , luminosity = luminosity , format_ = "hex" )
else :
n = N
while n > 0 :
c += _color_alphabet [ : n ]
n -= 25
if bgvalue is not None :
c [ bgvalue ] = "#aaaaaa"
return colors . LinearSegmentedColormap . from_list ( "" , c , N )
|
def ParseContactRow ( self , parser_mediator , query , row , ** unused_kwargs ) :
"""Parses a contact row from the database .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
query ( str ) : query that created the row .
row ( sqlite3 . Row ) : row resulting from query ."""
|
query_hash = hash ( query )
event_data = TangoAndroidContactEventData ( )
first_name = self . _GetRowValue ( query_hash , row , 'first_name' )
try :
decoded_text = base64_decode ( first_name )
event_data . first_name = codecs . decode ( decoded_text , 'utf-8' )
except ValueError :
event_data . first_name = first_name
parser_mediator . ProduceExtractionWarning ( 'unable to parse first name: {0:s}' . format ( first_name ) )
last_name = self . _GetRowValue ( query_hash , row , 'last_name' )
try :
decoded_text = base64_decode ( last_name )
event_data . last_name = codecs . decode ( decoded_text , 'utf-8' )
except ValueError :
event_data . last_name = last_name
parser_mediator . ProduceExtractionWarning ( 'unable to parse last name: {0:s}' . format ( last_name ) )
event_data . birthday = self . _GetRowValue ( query_hash , row , 'birthday' )
event_data . gender = self . _GetRowValue ( query_hash , row , 'gender' )
status = self . _GetRowValue ( query_hash , row , 'status' )
try :
decoded_text = base64_decode ( status )
event_data . status = codecs . decode ( decoded_text , 'utf-8' )
except ValueError :
event_data . status = status
parser_mediator . ProduceExtractionWarning ( 'unable to parse status: {0:s}' . format ( status ) )
event_data . distance = self . _GetRowValue ( query_hash , row , 'distance' )
is_friend = self . _GetRowValue ( query_hash , row , 'friend' )
event_data . is_friend = False
if is_friend :
event_data . is_friend = True
event_data . friend_request_type = self . _GetRowValue ( query_hash , row , 'friend_request_type' )
friend_request_message = self . _GetRowValue ( query_hash , row , 'friend_request_message' )
try :
decoded_text = base64_decode ( friend_request_message )
event_data . friend_request_message = codecs . decode ( decoded_text , 'utf-8' )
except ValueError :
event_data . friend_request_message = friend_request_message
parser_mediator . ProduceExtractionWarning ( 'unable to parse status: {0:s}' . format ( friend_request_message ) )
timestamp = self . _GetRowValue ( query_hash , row , 'last_active_time' )
if timestamp :
date_time = dfdatetime_java_time . JavaTime ( timestamp = timestamp )
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_LAST_ACTIVE )
parser_mediator . ProduceEventWithEventData ( event , event_data )
timestamp = self . _GetRowValue ( query_hash , row , 'last_access_time' )
if timestamp :
date_time = dfdatetime_java_time . JavaTime ( timestamp = timestamp )
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_LAST_ACCESS )
parser_mediator . ProduceEventWithEventData ( event , event_data )
timestamp = self . _GetRowValue ( query_hash , row , 'friend_request_time' )
if timestamp :
date_time = dfdatetime_java_time . JavaTime ( timestamp = timestamp )
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_SENT )
parser_mediator . ProduceEventWithEventData ( event , event_data )
|
def aes_ecb_decrypt ( self , key_handle , ciphertext ) :
"""AES ECB decrypt using a key handle .
@ warning : Please be aware of the known limitations of AES ECB mode before using it !
@ param key _ handle : Key handle to use for AES ECB decryption
@ param ciphertext : Data to decrypt
@ type key _ handle : integer or string
@ type ciphertext : string
@ returns : Plaintext
@ rtype : string
@ see : L { pyhsm . aes _ ecb _ cmd . YHSM _ Cmd _ AES _ ECB _ Decrypt }"""
|
return pyhsm . aes_ecb_cmd . YHSM_Cmd_AES_ECB_Decrypt ( self . stick , key_handle , ciphertext ) . execute ( )
|
def absent ( name , protocol = None , service_address = None ) :
'''Ensure the LVS service is absent .
name
The name of the LVS service
protocol
The service protocol
service _ address
The LVS service address'''
|
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
# check if service exists and remove it
service_check = __salt__ [ 'lvs.check_service' ] ( protocol = protocol , service_address = service_address )
if service_check is True :
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = 'LVS Service {0} is present and needs to be removed' . format ( name )
return ret
service_delete = __salt__ [ 'lvs.delete_service' ] ( protocol = protocol , service_address = service_address )
if service_delete is True :
ret [ 'comment' ] = 'LVS Service {0} has been removed' . format ( name )
ret [ 'changes' ] [ name ] = 'Absent'
return ret
else :
ret [ 'comment' ] = 'LVS Service {0} removed failed({1})' . format ( name , service_delete )
ret [ 'result' ] = False
return ret
else :
ret [ 'comment' ] = 'LVS Service {0} is not present, so it cannot be removed' . format ( name )
return ret
|
def summary_engine ( ** kwargs ) :
"""engine to extract summary data"""
|
logger . debug ( "summary_engine" )
# farms = kwargs [ " farms " ]
farms = [ ]
experiments = kwargs [ "experiments" ]
for experiment in experiments :
if experiment . selected_summaries is None :
selected_summaries = [ "discharge_capacity" , "charge_capacity" , "coulombic_efficiency" , "cumulated_coulombic_efficiency" , "ir_discharge" , "ir_charge" , "end_voltage_discharge" , "end_voltage_charge" , ]
else :
selected_summaries = experiment . selected_summaries
farm = helper . join_summaries ( experiment . summary_frames , selected_summaries )
farms . append ( farm )
barn = "batch_dir"
return farms , barn
|
def _GetPropertyValue ( self , parser_mediator , properties , property_name ) :
"""Retrieves a property value .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
properties ( dict [ str , object ] ) : properties .
property _ name ( str ) : name of the property .
Returns :
str : property value ."""
|
property_value = properties . get ( property_name , None )
if isinstance ( property_value , py2to3 . BYTES_TYPE ) :
try : # TODO : get encoding form XML metadata .
property_value = property_value . decode ( 'utf-8' )
except UnicodeDecodeError :
parser_mediator . ProduceExtractionWarning ( 'unable to decode property: {0:s}' . format ( property_name ) )
return property_value
|
def _ancestors ( collection ) :
"""Get the ancestors of the collection ."""
|
for index , c in enumerate ( collection . path_to_root ( ) ) :
if index > 0 and c . dbquery is not None :
raise StopIteration
yield c . name
raise StopIteration
|
def is_nested_subset ( superset_list , subset_list ) :
"""This function checks if a nested list is a subset of another nested list .
Examples :
> > > is _ nested _ subset ( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 ] , [ [ 12 , 18 , 23 , 25 , 45 ] , [ 7 , 11 , 19 , 24 , 28 ] , [ 1 , 5 , 8 , 18 , 15 , 16 ] ] )
False
> > > is _ nested _ subset ( [ [ 2 , 3 , 1 ] , [ 4 , 5 ] , [ 6 , 8 ] ] , [ [ 4 , 5 ] , [ 6 , 8 ] ] )
True
> > > is _ nested _ subset ( [ [ ' a ' , ' b ' ] , [ ' e ' ] , [ ' c ' , ' d ' ] ] , [ [ ' g ' ] ] )
False
Args :
superset _ list , list : The list to check if subset _ list is part of .
subset _ list , list : List for checking presence in superset _ list .
Returns :
bool : True if subset _ list is a subset of the superset _ list , else False ."""
|
presence = all ( item in superset_list for item in subset_list )
return presence
|
def _repack_archive ( archive1 , archive2 , verbosity = 0 , interactive = True ) :
"""Repackage an archive to a different format ."""
|
format1 , compression1 = get_archive_format ( archive1 )
format2 , compression2 = get_archive_format ( archive2 )
if format1 == format2 and compression1 == compression2 : # same format and compression allows to copy the file
util . link_or_copy ( archive1 , archive2 , verbosity = verbosity )
return
tmpdir = util . tmpdir ( )
try :
kwargs = dict ( verbosity = verbosity , outdir = tmpdir )
same_format = ( format1 == format2 and compression1 and compression2 )
if same_format : # only decompress since the format is the same
kwargs [ 'format' ] = compression1
path = _extract_archive ( archive1 , ** kwargs )
archive = os . path . abspath ( archive2 )
files = tuple ( os . listdir ( path ) )
olddir = os . getcwd ( )
os . chdir ( path )
try :
kwargs = dict ( verbosity = verbosity , interactive = interactive )
if same_format : # only compress since the format is the same
kwargs [ 'format' ] = compression2
_create_archive ( archive , files , ** kwargs )
finally :
os . chdir ( olddir )
finally :
shutil . rmtree ( tmpdir , onerror = rmtree_log_error )
|
def update_product ( AcceptLanguage = None , Id = None , Name = None , Owner = None , Description = None , Distributor = None , SupportDescription = None , SupportEmail = None , SupportUrl = None , AddTags = None , RemoveTags = None ) :
"""Updates an existing product .
See also : AWS API Documentation
: example : response = client . update _ product (
AcceptLanguage = ' string ' ,
Id = ' string ' ,
Name = ' string ' ,
Owner = ' string ' ,
Description = ' string ' ,
Distributor = ' string ' ,
SupportDescription = ' string ' ,
SupportEmail = ' string ' ,
SupportUrl = ' string ' ,
AddTags = [
' Key ' : ' string ' ,
' Value ' : ' string '
RemoveTags = [
' string ' ,
: type AcceptLanguage : string
: param AcceptLanguage : The language code to use for this operation . Supported language codes are as follows :
' en ' ( English )
' jp ' ( Japanese )
' zh ' ( Chinese )
If no code is specified , ' en ' is used as the default .
: type Id : string
: param Id : [ REQUIRED ]
The identifier of the product for the update request .
: type Name : string
: param Name : The updated product name .
: type Owner : string
: param Owner : The updated owner of the product .
: type Description : string
: param Description : The updated text description of the product .
: type Distributor : string
: param Distributor : The updated distributor of the product .
: type SupportDescription : string
: param SupportDescription : The updated support description for the product .
: type SupportEmail : string
: param SupportEmail : The updated support email for the product .
: type SupportUrl : string
: param SupportUrl : The updated support URL for the product .
: type AddTags : list
: param AddTags : Tags to add to the existing list of tags associated with the product .
( dict ) - - Key / value pairs to associate with this provisioning . These tags are entirely discretionary and are propagated to the resources created in the provisioning .
Key ( string ) - - [ REQUIRED ] The ProvisioningArtifactParameter . TagKey parameter from DescribeProvisioningParameters .
Value ( string ) - - [ REQUIRED ] The esired value for this key .
: type RemoveTags : list
: param RemoveTags : Tags to remove from the existing list of tags associated with the product .
( string ) - -
: rtype : dict
: return : {
' ProductViewDetail ' : {
' ProductViewSummary ' : {
' Id ' : ' string ' ,
' ProductId ' : ' string ' ,
' Name ' : ' string ' ,
' Owner ' : ' string ' ,
' ShortDescription ' : ' string ' ,
' Type ' : ' CLOUD _ FORMATION _ TEMPLATE ' ,
' Distributor ' : ' string ' ,
' HasDefaultPath ' : True | False ,
' SupportEmail ' : ' string ' ,
' SupportDescription ' : ' string ' ,
' SupportUrl ' : ' string '
' Status ' : ' AVAILABLE ' | ' CREATING ' | ' FAILED ' ,
' ProductARN ' : ' string ' ,
' CreatedTime ' : datetime ( 2015 , 1 , 1)
' Tags ' : [
' Key ' : ' string ' ,
' Value ' : ' string '"""
|
pass
|
def _set_scroll_area ( self , force = False ) :
"""Args :
force ( bool ) : Set the scroll area even if no change in height and position is detected
Sets the scroll window based on the counter positions"""
|
# Save scroll offset for resizing
oldOffset = self . scroll_offset
self . scroll_offset = newOffset = max ( self . counters . values ( ) ) + 1
if not self . enabled :
return
# Set exit handling only once
if not self . process_exit :
atexit . register ( self . _at_exit )
if not self . no_resize and RESIZE_SUPPORTED :
signal . signal ( signal . SIGWINCH , self . _resize_handler )
self . process_exit = True
if self . set_scroll :
term = self . term
newHeight = term . height
scrollPosition = max ( 0 , newHeight - newOffset )
if force or newOffset > oldOffset or newHeight != self . height :
self . height = newHeight
# Add line feeds so we don ' t overwrite existing output
if newOffset - oldOffset > 0 :
term . move_to ( 0 , max ( 0 , newHeight - oldOffset ) )
self . stream . write ( '\n' * ( newOffset - oldOffset ) )
# Reset scroll area
self . term . change_scroll ( scrollPosition )
# Always reset position
term . move_to ( 0 , scrollPosition )
if self . companion_term :
self . companion_term . move_to ( 0 , scrollPosition )
|
def get_pushes ( self , project , ** params ) :
"""Gets pushes from project , filtered by parameters
By default this method will just return the latest 10 pushes ( if they exist )
: param project : project ( repository name ) to query data for
: param params : keyword arguments to filter results"""
|
return self . _get_json_list ( self . PUSH_ENDPOINT , project , ** params )
|
def render_thread ( self ) :
"""A render loop that pulls observations off the queue to render ."""
|
obs = True
while obs : # Send something falsy through the queue to shut down .
obs = self . _obs_queue . get ( )
if obs :
for alert in obs . observation . alerts :
self . _alerts [ sc_pb . Alert . Name ( alert ) ] = time . time ( )
for err in obs . action_errors :
if err . result != sc_err . Success :
self . _alerts [ sc_err . ActionResult . Name ( err . result ) ] = time . time ( )
self . prepare_actions ( obs )
if self . _obs_queue . empty ( ) : # Only render the latest observation so we keep up with the game .
self . render_obs ( obs )
if self . _video_writer :
self . _video_writer . add ( np . transpose ( pygame . surfarray . pixels3d ( self . _window ) , axes = ( 1 , 0 , 2 ) ) )
self . _obs_queue . task_done ( )
|
def get_host_system_failfast ( self , name , verbose = False , host_system_term = 'HS' ) :
"""Get a HostSystem object
fail fast if the object isn ' t a valid reference"""
|
if verbose :
print ( "Finding HostSystem named %s..." % name )
hs = self . get_host_system ( name )
if hs is None :
print ( "Error: %s '%s' does not exist" % ( host_system_term , name ) )
sys . exit ( 1 )
if verbose :
print ( "Found HostSystem: {0} Name: {1}" % ( hs , hs . name ) )
return hs
|
def backfill_unk_emb ( self , E , filled_words ) :
"""Backfills an embedding matrix with the embedding for the unknown token .
: param E : original embedding matrix of dimensions ` ( vocab _ size , emb _ dim ) ` .
: param filled _ words : these words will not be backfilled with unk .
NOTE : this function is for internal use ."""
|
unk_emb = E [ self [ self . _unk ] ]
for i , word in enumerate ( self ) :
if word not in filled_words :
E [ i ] = unk_emb
|
def factor_hatch ( field_name , patterns , factors , start = 0 , end = None ) :
'''Create a ` ` DataSpec ` ` dict that applies a client - side
` ` CategoricalPatternMapper ` ` transformation to a ` ` ColumnDataSource ` `
column .
Args :
field _ name ( str ) : a field name to configure ` ` DataSpec ` ` with
patterns ( seq [ string ] ) : a list of hatch patterns to use to map to
factors ( seq ) : a sequences of categorical factors corresponding to
the palette
start ( int , optional ) : a start slice index to apply when the column
data has factors with multiple levels . ( default : 0)
end ( int , optional ) : an end slice index to apply when the column
data has factors with multiple levels . ( default : None )
Returns :
dict
Added in version 1.1.1'''
|
return field ( field_name , CategoricalPatternMapper ( patterns = patterns , factors = factors , start = start , end = end ) )
|
def get_instance ( self , payload ) :
"""Build an instance of YesterdayInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . api . v2010 . account . usage . record . yesterday . YesterdayInstance
: rtype : twilio . rest . api . v2010 . account . usage . record . yesterday . YesterdayInstance"""
|
return YesterdayInstance ( self . _version , payload , account_sid = self . _solution [ 'account_sid' ] , )
|
def atlasdb_format_query ( query , values ) :
"""Turn a query into a string for printing .
Useful for debugging ."""
|
return "" . join ( [ "%s %s" % ( frag , "'%s'" % val if type ( val ) in [ str , unicode ] else val ) for ( frag , val ) in zip ( query . split ( "?" ) , values + ( "" , ) ) ] )
|
def _metaclass_lookup_attribute ( self , name , context ) :
"""Search the given name in the implicit and the explicit metaclass ."""
|
attrs = set ( )
implicit_meta = self . implicit_metaclass ( )
metaclass = self . metaclass ( )
for cls in { implicit_meta , metaclass } :
if cls and cls != self and isinstance ( cls , ClassDef ) :
cls_attributes = self . _get_attribute_from_metaclass ( cls , name , context )
attrs . update ( set ( cls_attributes ) )
return attrs
|
def file_name ( self , file_name ) :
"""Updates the file _ name .
Args :
file _ name :"""
|
if not self . can_update ( ) :
self . _tcex . handle_error ( 910 , [ self . type ] )
self . _data [ 'fileName' ] = file_name
request = { 'fileName' : file_name }
return self . tc_requests . update ( self . api_type , self . api_sub_type , self . unique_id , request )
|
def assign_complex_to_samples ( items ) :
"""Assign complex inputs like variants and align outputs to samples .
Handles list inputs to record conversion where we have inputs from multiple
locations and need to ensure they are properly assigned to samples in many
environments .
The unpleasant approach here is to use standard file naming to match
with samples so this can work in environments where we don ' t download / stream
the input files ( for space / time savings ) ."""
|
extract_fns = { ( "variants" , "samples" ) : _get_vcf_samples , ( "align_bam" , ) : _get_bam_samples }
complex = { k : { } for k in extract_fns . keys ( ) }
for data in items :
for k in complex :
v = tz . get_in ( k , data )
if v is not None :
for s in extract_fns [ k ] ( v , items ) :
if s :
complex [ k ] [ s ] = v
out = [ ]
for data in items :
for k in complex :
newv = tz . get_in ( [ k , dd . get_sample_name ( data ) ] , complex )
if newv :
data = tz . update_in ( data , k , lambda x : newv )
out . append ( data )
return out
|
def sg_ctc ( tensor , opt ) :
r"""Computes the CTC ( Connectionist Temporal Classification ) Loss between ` tensor ` and ` target ` .
Args :
tensor : A 3 - D ` float Tensor ` .
opt :
target : A ` Tensor ` with the same length in the first dimension as the ` tensor ` . Labels . ( Dense tensor )
name : A ` string ` . A name to display in the tensor board web UI .
Returns :
A 1 - D ` Tensor ` with the same length in the first dimension of the ` tensor ` .
For example ,
tensor = [ [ [ 2 . , - 1 . , 3 . ] , [ 3 . , 1 . , - 2 . ] ] , [ [ 1 . , - 1 . , 2 . ] , [ 3 . , 1 . , - 2 . ] ] ]
target = [ [ 2 . , 1 . ] , [ 2 . , 3 . ] ]
tensor . sg _ ctc ( target = target ) = > [ 4.45940781 2.43091154]"""
|
assert opt . target is not None , 'target is mandatory.'
# default sequence length
shape = tf . shape ( tensor )
opt += tf . sg_opt ( seq_len = tf . ones ( ( shape [ 0 ] , ) , dtype = tf . sg_intx ) * shape [ 1 ] , merge = True )
# ctc loss
out = tf . nn . ctc_loss ( opt . target . sg_to_sparse ( ) , tensor , opt . seq_len , ctc_merge_repeated = opt . merge , time_major = False )
out = tf . identity ( out , 'ctc' )
# add summary
tf . sg_summary_loss ( out , name = opt . name )
return out
|
def generate_VD_junction_transfer_matrices ( self ) :
"""Compute the transfer matrices for the VD junction .
Sets the attributes Tvd , Svd , Dvd , lTvd , and lDvd ."""
|
nt2num = { 'A' : 0 , 'C' : 1 , 'G' : 2 , 'T' : 3 }
# Compute Tvd
Tvd = { }
for aa in self . codons_dict . keys ( ) :
current_Tvd = np . zeros ( ( 4 , 4 ) )
for init_nt in 'ACGT' :
for codon in self . codons_dict [ aa ] :
current_Tvd [ nt2num [ codon [ 2 ] ] , nt2num [ init_nt ] ] += self . Rvd [ nt2num [ codon [ 2 ] ] , nt2num [ codon [ 1 ] ] ] * self . Rvd [ nt2num [ codon [ 1 ] ] , nt2num [ codon [ 0 ] ] ] * self . Rvd [ nt2num [ codon [ 0 ] ] , nt2num [ init_nt ] ]
Tvd [ aa ] = current_Tvd
# Compute Svd
Svd = { }
for aa in self . codons_dict . keys ( ) :
current_Svd = np . zeros ( ( 4 , 4 ) )
for ins_nt in 'ACGT' :
if any ( [ codon . startswith ( ins_nt ) for codon in self . codons_dict [ aa ] ] ) :
current_Svd [ nt2num [ ins_nt ] , : ] = self . Rvd [ nt2num [ ins_nt ] , : ]
Svd [ aa ] = current_Svd
# Compute Dvd
Dvd = { }
for aa in self . codons_dict . keys ( ) :
current_Dvd = np . zeros ( ( 4 , 4 ) )
for init_nt in 'ACGT' :
for codon in self . codons_dict [ aa ] :
current_Dvd [ nt2num [ codon [ 2 ] ] , nt2num [ init_nt ] ] += self . Rvd [ nt2num [ codon [ 1 ] ] , nt2num [ codon [ 0 ] ] ] * self . Rvd [ nt2num [ codon [ 0 ] ] , nt2num [ init_nt ] ]
Dvd [ aa ] = current_Dvd
# Compute lTvd
lTvd = { }
for aa in self . codons_dict . keys ( ) :
current_lTvd = np . zeros ( ( 4 , 4 ) )
for codon in self . codons_dict [ aa ] :
current_lTvd [ nt2num [ codon [ 2 ] ] , nt2num [ codon [ 0 ] ] ] += self . Rvd [ nt2num [ codon [ 2 ] ] , nt2num [ codon [ 1 ] ] ] * self . first_nt_bias_insVD [ nt2num [ codon [ 1 ] ] ]
lTvd [ aa ] = current_lTvd
# Compute lDvd
lDvd = { }
for aa in self . codons_dict . keys ( ) :
current_lDvd = np . zeros ( ( 4 , 4 ) )
for codon in self . codons_dict [ aa ] :
current_lDvd [ nt2num [ codon [ 2 ] ] , nt2num [ codon [ 0 ] ] ] += self . first_nt_bias_insVD [ nt2num [ codon [ 1 ] ] ]
lDvd [ aa ] = current_lDvd
# Set the attributes
self . Tvd = Tvd
self . Svd = Svd
self . Dvd = Dvd
self . lTvd = lTvd
self . lDvd = lDvd
|
def increase ( self , infile ) :
'''Increase : ไปปๆใฎ็ฎๆใฎใใใคใๅใจ ใใใใๅคงใใชใตใคในใใฎไปปๆใฎใใใคใๅใจๅ
ฅใๆใใ'''
|
gf = infile [ 31 : ]
index = gf . index ( random . choice ( gf ) )
index_len = len ( gf [ index ] )
large_size_index = random . choice ( [ gf . index ( g ) for g in gf if len ( g ) > index_len ] )
gf [ index ] , gf [ large_size_index ] = gf [ large_size_index ] , gf [ index ]
return infile [ : 31 ] + gf
|
def first_n_items ( array , n_desired ) :
"""Returns the first n _ desired items of an array"""
|
# Unfortunately , we can ' t just do array . flat [ : n _ desired ] here because it
# might not be a numpy . ndarray . Moreover , access to elements of the array
# could be very expensive ( e . g . if it ' s only available over DAP ) , so go out
# of our way to get them in a single call to _ _ getitem _ _ using only slices .
if n_desired < 1 :
raise ValueError ( 'must request at least one item' )
if array . size == 0 : # work around for https : / / github . com / numpy / numpy / issues / 5195
return [ ]
if n_desired < array . size :
indexer = _get_indexer_at_least_n_items ( array . shape , n_desired , from_end = False )
array = array [ indexer ]
return np . asarray ( array ) . flat [ : n_desired ]
|
def read ( self ) :
"""Reads the cache file as pickle file ."""
|
def warn ( msg , elapsed_time , current_time ) :
desc = self . _cache_id_desc ( )
self . _warnings ( "{0} {1}: {2}s < {3}s" , msg , desc , elapsed_time , current_time )
file_time = get_time ( )
out = self . _out
if out is None :
if self . verbose :
self . _warnings ( "reading {0} from disk" , self . _cache_id_desc ( ) )
with open ( self . _cache_file , 'rb' ) as f_in :
out = None
while True :
t_out = f_in . read ( CHUNK_SIZE )
if not len ( t_out ) :
break
if out is not None :
out += t_out
else :
out = t_out
self . _out = out
( cache_id_obj , elapsed_time , res ) = self . _read ( out )
self . ensure_cache_id ( cache_id_obj )
real_time = get_time ( ) - file_time
if elapsed_time is not None and real_time > elapsed_time :
warn ( "reading cache from disk takes longer than computing!" , elapsed_time , real_time )
elif self . _start_time is not None and elapsed_time is not None :
current_time = get_time ( ) - self . _start_time
if elapsed_time < current_time :
warn ( "reading cache takes longer than computing!" , elapsed_time , current_time )
self . _last_access = get_time ( )
return res
|
def register_extension ( self , group , name , extension ) :
"""Register an extension .
Args :
group ( str ) : The type of the extension
name ( str ) : A name for the extension
extension ( str or class ) : If this is a string , then it will be
interpreted as a path to import and load . Otherwise it
will be treated as the extension object itself ."""
|
if isinstance ( extension , str ) :
name , extension = self . load_extension ( extension ) [ 0 ]
if group not in self . _registered_extensions :
self . _registered_extensions [ group ] = [ ]
self . _registered_extensions [ group ] . append ( ( name , extension ) )
|
def spline ( x , y , n , yp1 , ypn , y2 ) :
'''/ * CALCULATE 2ND DERIVATIVES OF CUBIC SPLINE INTERP FUNCTION
* ADAPTED FROM NUMERICAL RECIPES BY PRESS ET AL
* X , Y : ARRAYS OF TABULATED FUNCTION IN ASCENDING ORDER BY X
* N : SIZE OF ARRAYS X , Y
* YP1 , YPN : SPECIFIED DERIVATIVES AT X [ 0 ] AND X [ N - 1 ] ; VALUES
* > = 1E30 SIGNAL SIGNAL SECOND DERIVATIVE ZERO
* Y2 : OUTPUT ARRAY OF SECOND DERIVATIVES'''
|
u = [ 0.0 ] * n
# I think this is the same as malloc
# no need for the out of memory
if ( yp1 > 0.99E30 ) : # pragma : no cover
y2 [ 0 ] = 0
u [ 0 ] = 0
else :
y2 [ 0 ] = - 0.5
u [ 0 ] = ( 3.0 / ( x [ 1 ] - x [ 0 ] ) ) * ( ( y [ 1 ] - y [ 0 ] ) / ( x [ 1 ] - x [ 0 ] ) - yp1 )
for i in range ( 1 , n - 1 ) :
sig = ( x [ i ] - x [ i - 1 ] ) / ( x [ i + 1 ] - x [ i - 1 ] )
p = sig * y2 [ i - 1 ] + 2.0
y2 [ i ] = ( sig - 1.0 ) / p
u [ i ] = ( 6.0 * ( ( y [ i + 1 ] - y [ i ] ) / ( x [ i + 1 ] - x [ i ] ) - ( y [ i ] - y [ i - 1 ] ) / ( x [ i ] - x [ i - 1 ] ) ) / ( x [ i + 1 ] - x [ i - 1 ] ) - sig * u [ i - 1 ] ) / p ;
if ( ypn > 0.99E30 ) : # pragma : no cover
qn = 0 ;
un = 0
else :
qn = 0.5
un = ( 3.0 / ( x [ n - 1 ] - x [ n - 2 ] ) ) * ( ypn - ( y [ n - 1 ] - y [ n - 2 ] ) / ( x [ n - 1 ] - x [ n - 2 ] ) ) ;
y2 [ n - 1 ] = ( un - qn * u [ n - 2 ] ) / ( qn * y2 [ n - 2 ] + 1.0 ) ;
# it uses a for loop here , but its not something python can do ( I dont think )
k = n - 2
while ( k >= 0 ) :
y2 [ k ] = y2 [ k ] * y2 [ k + 1 ] + u [ k ]
k -= 1
# This for loop might work
# for k in range ( n - 2 , - 1 , - 1 ) :
# y2 [ k ] = y2 [ k ] * y2 [ k + 1 ] + u [ k ]
# no need to free u here
return
|
def setonce ( decorator ) :
"""A descriptor modifier which allows _ _ set _ _ to be called at most once ."""
|
def decorate ( fn , * args , ** kwargs ) :
parent = decorator ( fn , * args , ** kwargs )
# doc = _ add _ msg ( getattr ( parent , ' _ _ doc _ _ ' , None ) , ' * @ setonce * ' )
doc = getattr ( parent , '__doc__' , None )
assert hasattr ( parent , "__set__" )
# don ' t use for non - data descriptors !
return _setonce ( fn . __name__ , parent , doc )
return decorate
|
def add_hostname_cn_ip ( self , addresses ) :
"""Add an address to the SAN list for the hostname request
: param addr : [ ] List of address to be added"""
|
for addr in addresses :
if addr not in self . hostname_entry [ 'addresses' ] :
self . hostname_entry [ 'addresses' ] . append ( addr )
|
def plot_ic_hist ( ic , ax = None ) :
"""Plots Spearman Rank Information Coefficient histogram for a given factor .
Parameters
ic : pd . DataFrame
DataFrame indexed by date , with IC for each forward return .
ax : matplotlib . Axes , optional
Axes upon which to plot .
Returns
ax : matplotlib . Axes
The axes that were plotted on ."""
|
ic = ic . copy ( )
num_plots = len ( ic . columns )
v_spaces = ( ( num_plots - 1 ) // 3 ) + 1
if ax is None :
f , ax = plt . subplots ( v_spaces , 3 , figsize = ( 18 , v_spaces * 6 ) )
ax = ax . flatten ( )
for a , ( period_num , ic ) in zip ( ax , ic . iteritems ( ) ) :
sns . distplot ( ic . replace ( np . nan , 0. ) , norm_hist = True , ax = a )
a . set ( title = "%s Period IC" % period_num , xlabel = 'IC' )
a . set_xlim ( [ - 1 , 1 ] )
a . text ( .05 , .95 , "Mean %.3f \n Std. %.3f" % ( ic . mean ( ) , ic . std ( ) ) , fontsize = 16 , bbox = { 'facecolor' : 'white' , 'alpha' : 1 , 'pad' : 5 } , transform = a . transAxes , verticalalignment = 'top' )
a . axvline ( ic . mean ( ) , color = 'w' , linestyle = 'dashed' , linewidth = 2 )
if num_plots < len ( ax ) :
ax [ - 1 ] . set_visible ( False )
return ax
|
def submit ( self , q , context = None , task_name = "casjobs" , estimate = 30 ) :
"""Submit a job to CasJobs .
# # Arguments
* ` q ` ( str ) : The SQL query .
# # Keyword Arguments
* ` context ` ( str ) : Casjobs context used for this query .
* ` task _ name ` ( str ) : The task name .
* ` estimate ` ( int ) : Estimate of the time this job will take ( in minutes ) .
# # Returns
* ` job _ id ` ( int ) : The submission ID ."""
|
if not context :
context = self . context
params = { "qry" : q , "context" : context , "taskname" : task_name , "estimate" : estimate }
r = self . _send_request ( "SubmitJob" , params = params )
job_id = int ( self . _parse_single ( r . text , "long" ) )
return job_id
|
def _scale_mesh ( self , scale ) :
"""TODO : add documentation"""
|
pos_ks = [ 'vertices' , 'centers' ]
# TODO : scale velocities ? ? ?
# handle scale
self . update_columns_dict ( { k : self [ k ] * scale for k in pos_ks } )
self . update_columns ( areas = self . areas * ( scale ** 2 ) )
self . _volume *= scale ** 3
if self . _area is not None : # self . _ area is None for wd meshes
self . _area += scale ** 2
|
def decorate ( cls , app , * args , run_middleware = False , with_context = False , ** kwargs ) :
"""This is a decorator that can be used to apply this plugin to a specific
route / view on your app , rather than the whole app .
: param app :
: type app : Sanic | Blueprint
: param args :
: type args : tuple ( Any )
: param run _ middleware :
: type run _ middleware : bool
: param with _ context :
: type with _ context : bool
: param kwargs :
: param kwargs : dict ( Any )
: return : the decorated route / view
: rtype : fn"""
|
from spf . framework import SanicPluginsFramework
spf = SanicPluginsFramework ( app )
# get the singleton from the app
try :
assoc = spf . register_plugin ( cls , skip_reg = True )
except ValueError as e : # this is normal , if this plugin has been registered previously
assert e . args and len ( e . args ) > 1
assoc = e . args [ 1 ]
( plugin , reg ) = assoc
inst = spf . get_plugin ( plugin )
# plugin may not actually be registered
# registered might be True , False or None at this point
regd = True if inst else None
if regd is True : # middleware will be run on this route anyway , because the plugin
# is registered on the app . Turn it off on the route - level .
run_middleware = False
req_middleware = deque ( )
resp_middleware = deque ( )
if run_middleware :
for i , m in enumerate ( plugin . _middlewares ) :
attach_to = m . kwargs . pop ( 'attach_to' , 'request' )
priority = m . kwargs . pop ( 'priority' , 5 )
with_context = m . kwargs . pop ( 'with_context' , False )
mw_handle_fn = m . middleware
if attach_to == 'response' :
relative = m . kwargs . pop ( 'relative' , 'post' )
if relative == "pre" :
mw = ( 0 , 0 - priority , 0 - i , mw_handle_fn , with_context , m . args , m . kwargs )
else : # relative = " post "
mw = ( 1 , 0 - priority , 0 - i , mw_handle_fn , with_context , m . args , m . kwargs )
resp_middleware . append ( mw )
else : # attach _ to = " request "
relative = m . kwargs . pop ( 'relative' , 'pre' )
if relative == "post" :
mw = ( 1 , priority , i , mw_handle_fn , with_context , m . args , m . kwargs )
else : # relative = " pre "
mw = ( 0 , priority , i , mw_handle_fn , with_context , m . args , m . kwargs )
req_middleware . append ( mw )
req_middleware = tuple ( sorted ( req_middleware ) )
resp_middleware = tuple ( sorted ( resp_middleware ) )
def _decorator ( f ) :
nonlocal spf , plugin , regd , run_middleware , with_context
nonlocal req_middleware , resp_middleware , args , kwargs
async def wrapper ( request , * a , ** kw ) :
nonlocal spf , plugin , regd , run_middleware , with_context
nonlocal req_middleware , resp_middleware , f , args , kwargs
# the plugin was not registered on the app , it might be now
if regd is None :
_inst = spf . get_plugin ( plugin )
regd = _inst is not None
context = plugin . get_context_from_spf ( spf )
if run_middleware and not regd and len ( req_middleware ) > 0 :
for ( _a , _p , _i , handler , with_context , args , kwargs ) in req_middleware :
if with_context :
resp = handler ( request , * args , context = context , ** kwargs )
else :
resp = handler ( request , * args , ** kwargs )
if isawaitable ( resp ) :
resp = await resp
if resp :
return
response = await plugin . route_wrapper ( f , request , context , a , kw , * args , with_context = with_context , ** kwargs )
if isawaitable ( response ) :
response = await response
if run_middleware and not regd and len ( resp_middleware ) > 0 :
for ( _a , _p , _i , handler , with_context , args , kwargs ) in resp_middleware :
if with_context :
_resp = handler ( request , response , * args , context = context , ** kwargs )
else :
_resp = handler ( request , response , * args , ** kwargs )
if isawaitable ( _resp ) :
_resp = await _resp
if _resp :
response = _resp
break
return response
return update_wrapper ( wrapper , f )
return _decorator
|
def password_attributes_character_restriction_upper ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
password_attributes = ET . SubElement ( config , "password-attributes" , xmlns = "urn:brocade.com:mgmt:brocade-aaa" )
character_restriction = ET . SubElement ( password_attributes , "character-restriction" )
upper = ET . SubElement ( character_restriction , "upper" )
upper . text = kwargs . pop ( 'upper' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def load_parcellation_coords ( parcellation_name ) :
"""Loads coordinates of included parcellations .
Parameters
parcellation _ name : str
options : ' gordon2014_333 ' , ' power2012_264 ' , ' shen2013_278 ' .
Returns
parc : array
parcellation cordinates"""
|
path = tenetopath [ 0 ] + '/data/parcellation/' + parcellation_name + '.csv'
parc = np . loadtxt ( path , skiprows = 1 , delimiter = ',' , usecols = [ 1 , 2 , 3 ] )
return parc
|
def reconstruct_url ( self , path_info = None , query_string = None , relative = False ) :
"""Reconstructs the request URL using the algorithm provided by PEP3333"""
|
environ = self . environ
if relative :
url = ''
else :
url = environ [ 'wsgi.url_scheme' ] + '://'
if environ . get ( 'HTTP_HOST' ) :
url += environ [ 'HTTP_HOST' ]
else :
url += environ [ 'SERVER_NAME' ]
if environ [ 'wsgi.url_scheme' ] == 'https' :
if environ [ 'SERVER_PORT' ] != '443' :
url += ':' + environ [ 'SERVER_PORT' ]
else :
if environ [ 'SERVER_PORT' ] != '80' :
url += ':' + environ [ 'SERVER_PORT' ]
url += quote ( environ . get ( 'SCRIPT_NAME' , '' ) )
if path_info is None :
url += quote ( environ . get ( 'PATH_INFO' , '' ) )
else :
url += path_info
if query_string is None :
if environ . get ( 'QUERY_STRING' ) :
url += '?' + environ [ 'QUERY_STRING' ]
else :
if query_string :
if isinstance ( query_string , str ) :
url += '?' + query_string
else :
url += '?' + encode_query_string ( query_string )
return url
|
def from_urlpath ( cls , path , app = None ) :
'''Alternative constructor which accepts a path as taken from URL and uses
the given app or the current app config to get the real path .
If class has attribute ` generic ` set to True , ` directory _ class ` or
` file _ class ` will be used as type .
: param path : relative path as from URL
: param app : optional , flask application
: return : file object pointing to path
: rtype : File'''
|
app = app or current_app
base = app . config [ 'directory_base' ]
path = urlpath_to_abspath ( path , base )
if not cls . generic :
kls = cls
elif os . path . isdir ( path ) :
kls = cls . directory_class
else :
kls = cls . file_class
return kls ( path = path , app = app )
|
def find_logs ( self , user_name , first_date , start_time , last_date , end_time , action , functionality , parameter , pagination ) :
"""Search all logs , filtering by the given parameters .
: param user _ name : Filter by user _ name
: param first _ date : Sets initial date for begin of the filter
: param start _ time : Sets initial time
: param last _ date : Sets final date
: param end _ time : Sets final time and ends the filter . That defines the searching gap
: param action : Filter by action ( Create , Update or Delete )
: param functionality : Filter by class
: param parameter : Filter by parameter
: param pagination : Class with all data needed to paginate
: return : Following dictionary :
{ ' eventlog ' : { ' id _ usuario ' : < id _ user > ,
' hora _ evento ' : < hora _ evento > ,
' acao ' : < acao > ,
' funcionalidade ' : < funcionalidade > ,
' parametro _ anterior ' : < parametro _ anterior > ,
' parametro _ atual ' : < parametro _ atual > }
' total ' : { < total _ registros > } }
: raise InvalidParameterError : Some parameter was invalid .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response ."""
|
if not isinstance ( pagination , Pagination ) :
raise InvalidParameterError ( u"Invalid parameter: pagination must be a class of type 'Pagination'." )
eventlog_map = dict ( )
eventlog_map [ "start_record" ] = pagination . start_record
eventlog_map [ "end_record" ] = pagination . end_record
eventlog_map [ "asorting_cols" ] = pagination . asorting_cols
eventlog_map [ "searchable_columns" ] = pagination . searchable_columns
eventlog_map [ "custom_search" ] = pagination . custom_search
eventlog_map [ "usuario" ] = user_name
eventlog_map [ "data_inicial" ] = first_date
eventlog_map [ "hora_inicial" ] = start_time
eventlog_map [ "data_final" ] = last_date
eventlog_map [ "hora_final" ] = end_time
eventlog_map [ "acao" ] = action
eventlog_map [ "funcionalidade" ] = functionality
eventlog_map [ "parametro" ] = parameter
url = "eventlog/find/"
code , xml = self . submit ( { 'eventlog' : eventlog_map } , 'POST' , url )
key = "eventlog"
return get_list_map ( self . response ( code , xml , key ) , key )
|
def open_file ( loc ) :
"""Handle . gz , . tar . gz or unzipped files"""
|
loc = ensure_path ( loc )
if tarfile . is_tarfile ( str ( loc ) ) :
return tarfile . open ( str ( loc ) , "r:gz" )
elif loc . parts [ - 1 ] . endswith ( "gz" ) :
return ( line . decode ( "utf8" ) for line in gzip . open ( str ( loc ) , "r" ) )
elif loc . parts [ - 1 ] . endswith ( "zip" ) :
zip_file = zipfile . ZipFile ( str ( loc ) )
names = zip_file . namelist ( )
file_ = zip_file . open ( names [ 0 ] )
return ( line . decode ( "utf8" ) for line in file_ )
else :
return loc . open ( "r" , encoding = "utf8" )
|
def get_subdomain_DID_info ( fqn , db_path = None , zonefiles_dir = None ) :
"""Get a subdomain ' s DID info .
Return None if not found"""
|
opts = get_blockstack_opts ( )
if not is_subdomains_enabled ( opts ) :
log . warn ( "Subdomain support is disabled" )
return None
if db_path is None :
db_path = opts [ 'subdomaindb_path' ]
if zonefiles_dir is None :
zonefiles_dir = opts [ 'zonefiles' ]
db = SubdomainDB ( db_path , zonefiles_dir )
try :
subrec = db . get_subdomain_entry ( fqn )
except SubdomainNotFound :
log . warn ( "No such subdomain: {}" . format ( fqn ) )
return None
try :
return db . get_subdomain_DID_info ( fqn )
except SubdomainNotFound :
return None
|
def setWidth ( self , personID , width ) :
"""setWidth ( string , double ) - > None
Sets the width in m for this person ."""
|
self . _connection . _sendDoubleCmd ( tc . CMD_SET_PERSON_VARIABLE , tc . VAR_WIDTH , personID , width )
|
def add_arrow ( self , x1 , y1 , x2 , y2 , ** kws ) :
"""add arrow to plot"""
|
self . panel . add_arrow ( x1 , y1 , x2 , y2 , ** kws )
|
def register ( cls ) :
"""Register variable handling in YAML"""
|
if not cls . IS_LOADED :
cls . IS_LOADED = True
yaml . add_constructor ( '!param' , Parameter . parameter_constructor , Loader = yaml . SafeLoader )
yaml . add_constructor ( '!env' , EnvironmentVariable . parameter_constructor , Loader = yaml . SafeLoader )
|
def configure_sbi ( ) :
"""Configure an SBI using POSTed configuration ."""
|
# Need an ID for the subarray - guessing I just get
# the list of inactive subarrays and use the first
inactive_list = SubarrayList ( ) . inactive
request_data = request . data
LOG . debug ( 'request is of type %s' , type ( request_data ) )
try :
sbi = Subarray ( inactive_list [ 0 ] )
sbi . activate ( )
sbi . configure_sbi ( request_data )
except jsonschema . exceptions . ValidationError as error :
LOG . error ( 'Error configuring SBI: %s' , error )
return dict ( path = error . absolute_path . __str__ ( ) , schema_path = error . schema_path . __str__ ( ) , message = error . message )
return dict ( status = "Accepted SBI: {}" . format ( sbi . id ) )
|
def refresh_items ( self ) :
"""Refresh the items of the pattern .
This method destroys the old items and creates and initializes
the new items .
It is overridden to NOT insert the children to the parent . The Fragment
adapter handles this ."""
|
items = [ ]
if self . condition :
for nodes , key , f_locals in self . pattern_nodes :
with new_scope ( key , f_locals ) :
for node in nodes :
child = node ( None )
if isinstance ( child , list ) :
items . extend ( child )
else :
items . append ( child )
for old in self . items :
if not old . is_destroyed :
old . destroy ( )
# : Insert items into THIS node , NOT the PARENT
# if len ( items ) > 0:
# self . parent . insert _ children ( self , items )
self . items = items
|
def setEditorData ( self , editor , index ) :
"""Updates the editor with the model data .
: param editor | < QtGui . QWidget >
index | < QtGui . QModelIndex >"""
|
data = unwrapVariant ( index . data ( ) )
editor . setCurrentIndex ( editor . findText ( data ) )
|
def for_attempt ( self , attempt ) :
""": meth : ` for _ attempt ` returns the duration for a specific attempt .
This is useful if you have a large number of independent backoffs ,
but don ' t want to use unnecessary memory storing the backoff parameters
per backoff . The first attempt should be 0.
: meth : ` for _ attempt ` is thread - safe if non - zero values for
: attr : ` factor ` , : attr : ` max _ ms ` , and : attr : ` min _ ms ` are set before
any calls to : meth : ` for _ attempt ` are made .
: param attempt : the attempt you want to return duration for
: type attempt : float
: return : duration in seconds
: rtype : float"""
|
dur = float ( self . min_ms * pow ( self . factor , attempt ) )
if self . jitter :
dur = random . random ( ) * ( dur - self . min_ms ) + self . min_ms
if dur > self . max_ms :
return to_seconds ( self . max_ms )
return to_seconds ( dur )
|
def calcAspectRatioFromCorners ( corners , in_plane = False ) :
'''simple and better alg . than below
in _ plane - > whether object has no tilt , but only rotation and translation'''
|
q = corners
l0 = [ q [ 0 , 0 ] , q [ 0 , 1 ] , q [ 1 , 0 ] , q [ 1 , 1 ] ]
l1 = [ q [ 0 , 0 ] , q [ 0 , 1 ] , q [ - 1 , 0 ] , q [ - 1 , 1 ] ]
l2 = [ q [ 2 , 0 ] , q [ 2 , 1 ] , q [ 3 , 0 ] , q [ 3 , 1 ] ]
l3 = [ q [ 2 , 0 ] , q [ 2 , 1 ] , q [ 1 , 0 ] , q [ 1 , 1 ] ]
a1 = line . length ( l0 ) / line . length ( l1 )
a2 = line . length ( l2 ) / line . length ( l3 )
if in_plane : # take aspect ration from more rectangular corner
if ( abs ( 0.5 * np . pi - abs ( line . angle2 ( l0 , l1 ) ) ) < abs ( 0.5 * np . pi - abs ( line . angle2 ( l2 , l3 ) ) ) ) :
return a1
else :
return a2
return 0.5 * ( a1 + a2 )
|
def get_nearest_edges ( G , X , Y , method = None , dist = 0.0001 ) :
"""Return the graph edges nearest to a list of points . Pass in points
as separate vectors of X and Y coordinates . The ' kdtree ' method
is by far the fastest with large data sets , but only finds approximate
nearest edges if working in unprojected coordinates like lat - lng ( it
precisely finds the nearest edge if working in projected coordinates ) .
The ' balltree ' method is second fastest with large data sets , but it
is precise if working in unprojected coordinates like lat - lng .
Parameters
G : networkx multidigraph
X : list - like
The vector of longitudes or x ' s for which we will find the nearest
edge in the graph . For projected graphs , use the projected coordinates ,
usually in meters .
Y : list - like
The vector of latitudes or y ' s for which we will find the nearest
edge in the graph . For projected graphs , use the projected coordinates ,
usually in meters .
method : str { None , ' kdtree ' , ' balltree ' }
Which method to use for finding nearest edge to each point .
If None , we manually find each edge one at a time using
osmnx . utils . get _ nearest _ edge . If ' kdtree ' we use
scipy . spatial . cKDTree for very fast euclidean search . Recommended for
projected graphs . If ' balltree ' , we use sklearn . neighbors . BallTree for
fast haversine search . Recommended for unprojected graphs .
dist : float
spacing length along edges . Units are the same as the geom ; Degrees for
unprojected geometries and meters for projected geometries . The smaller
the value , the more points are created .
Returns
ne : ndarray
array of nearest edges represented by their startpoint and endpoint ids ,
u and v , the OSM ids of the nodes .
Info
The method creates equally distanced points along the edges of the network .
Then , these points are used in a kdTree or BallTree search to identify which
is nearest . Note that this method will not give the exact perpendicular point
along the edge , but the smaller the * dist * parameter , the closer the solution
will be .
Code is adapted from an answer by JHuw from this original question :
https : / / gis . stackexchange . com / questions / 222315 / geopandas - find - nearest - point
- in - other - dataframe"""
|
start_time = time . time ( )
if method is None : # calculate nearest edge one at a time for each point
ne = [ get_nearest_edge ( G , ( x , y ) ) for x , y in zip ( X , Y ) ]
ne = [ ( u , v ) for _ , u , v in ne ]
elif method == 'kdtree' : # check if we were able to import scipy . spatial . cKDTree successfully
if not cKDTree :
raise ImportError ( 'The scipy package must be installed to use this optional feature.' )
# transform graph into DataFrame
edges = graph_to_gdfs ( G , nodes = False , fill_edge_geometry = True )
# transform edges into evenly spaced points
edges [ 'points' ] = edges . apply ( lambda x : redistribute_vertices ( x . geometry , dist ) , axis = 1 )
# develop edges data for each created points
extended = edges [ 'points' ] . apply ( [ pd . Series ] ) . stack ( ) . reset_index ( level = 1 , drop = True ) . join ( edges ) . reset_index ( )
# Prepare btree arrays
nbdata = np . array ( list ( zip ( extended [ 'Series' ] . apply ( lambda x : x . x ) , extended [ 'Series' ] . apply ( lambda x : x . y ) ) ) )
# build a k - d tree for euclidean nearest node search
btree = cKDTree ( data = nbdata , compact_nodes = True , balanced_tree = True )
# query the tree for nearest node to each point
points = np . array ( [ X , Y ] ) . T
dist , idx = btree . query ( points , k = 1 )
# Returns ids of closest point
eidx = extended . loc [ idx , 'index' ]
ne = edges . loc [ eidx , [ 'u' , 'v' ] ]
elif method == 'balltree' : # check if we were able to import sklearn . neighbors . BallTree successfully
if not BallTree :
raise ImportError ( 'The scikit-learn package must be installed to use this optional feature.' )
# transform graph into DataFrame
edges = graph_to_gdfs ( G , nodes = False , fill_edge_geometry = True )
# transform edges into evenly spaced points
edges [ 'points' ] = edges . apply ( lambda x : redistribute_vertices ( x . geometry , dist ) , axis = 1 )
# develop edges data for each created points
extended = edges [ 'points' ] . apply ( [ pd . Series ] ) . stack ( ) . reset_index ( level = 1 , drop = True ) . join ( edges ) . reset_index ( )
# haversine requires data in form of [ lat , lng ] and inputs / outputs in units of radians
nodes = pd . DataFrame ( { 'x' : extended [ 'Series' ] . apply ( lambda x : x . x ) , 'y' : extended [ 'Series' ] . apply ( lambda x : x . y ) } )
nodes_rad = np . deg2rad ( nodes [ [ 'y' , 'x' ] ] . values . astype ( np . float ) )
points = np . array ( [ Y , X ] ) . T
points_rad = np . deg2rad ( points )
# build a ball tree for haversine nearest node search
tree = BallTree ( nodes_rad , metric = 'haversine' )
# query the tree for nearest node to each point
idx = tree . query ( points_rad , k = 1 , return_distance = False )
eidx = extended . loc [ idx [ : , 0 ] , 'index' ]
ne = edges . loc [ eidx , [ 'u' , 'v' ] ]
else :
raise ValueError ( 'You must pass a valid method name, or None.' )
log ( 'Found nearest edges to {:,} points in {:,.2f} seconds' . format ( len ( X ) , time . time ( ) - start_time ) )
return np . array ( ne )
|
def list_nodes_full ( mask = 'mask[id, hostname, primaryIpAddress, \
primaryBackendIpAddress, processorPhysicalCoreAmount, memoryCount]' , call = None ) :
'''Return a list of the VMs that are on the provider'''
|
if call == 'action' :
raise SaltCloudSystemExit ( 'The list_nodes_full function must be called with -f or --function.' )
ret = { }
conn = get_conn ( service = 'SoftLayer_Account' )
response = conn . getHardware ( mask = mask )
for node in response :
ret [ node [ 'hostname' ] ] = node
__utils__ [ 'cloud.cache_node_list' ] ( ret , __active_provider_name__ . split ( ':' ) [ 0 ] , __opts__ )
return ret
|
def _get_indexes_in_altered_table ( self , diff ) :
""": param diff : The table diff
: type diff : orator . dbal . table _ diff . TableDiff
: rtype : dict"""
|
indexes = diff . from_table . get_indexes ( )
column_names = self . _get_column_names_in_altered_table ( diff )
for key , index in OrderedDict ( [ ( k , v ) for k , v in indexes . items ( ) ] ) . items ( ) :
for old_index_name , renamed_index in diff . renamed_indexes . items ( ) :
if key . lower ( ) == old_index_name . lower ( ) :
del indexes [ key ]
changed = False
index_columns = [ ]
for column_name in index . get_columns ( ) :
normalized_column_name = column_name . lower ( )
if normalized_column_name not in column_names :
del indexes [ key ]
break
else :
index_columns . append ( column_names [ normalized_column_name ] )
if column_name != column_names [ normalized_column_name ] :
changed = True
if changed :
indexes [ key ] = Index ( index . get_name ( ) , index_columns , index . is_unique ( ) , index . is_primary ( ) , index . get_flags ( ) , )
for index in diff . removed_indexes . values ( ) :
index_name = index . get_name ( ) . lower ( )
if index_name and index_name in indexes :
del indexes [ index_name ]
changed_indexes = ( list ( diff . changed_indexes . values ( ) ) + list ( diff . added_indexes . values ( ) ) + list ( diff . renamed_indexes . values ( ) ) )
for index in changed_indexes :
index_name = index . get_name ( ) . lower ( )
if index_name :
indexes [ index_name ] = index
else :
indexes [ len ( indexes ) ] = index
return indexes
|
def has_child ( cls , child_type , query ) :
'''http : / / www . elasticsearch . org / guide / reference / query - dsl / has - child - query . html
The has _ child query accepts a query and the child type to run against , and results in parent documents that have child docs matching the query .
> child _ query = ElasticQuery ( ) . term ( tag = ' something ' )
> query = ElasticQuery ( ) . has _ Child ( ' blog _ tag ' , child _ query )'''
|
instance = cls ( has_child = { 'type' : child_type , 'query' : query } )
return instance
|
def stream_directory ( directory , recursive = False , patterns = '**' , chunk_size = default_chunk_size ) :
"""Gets a buffered generator for streaming directories .
Returns a buffered generator which encodes a directory as
: mimetype : ` multipart / form - data ` with the corresponding headers .
Parameters
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively ?
patterns : str | list
Single * glob * pattern or list of * glob * patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk _ size : int
Maximum size of each stream chunk"""
|
stream = DirectoryStream ( directory , recursive = recursive , patterns = patterns , chunk_size = chunk_size )
return stream . body ( ) , stream . headers
|
def data_filler_company ( self , number_of_rows , cursor , conn ) :
'''creates and fills the table with company data'''
|
companies_data = [ ]
try :
for i in range ( 0 , number_of_rows ) :
companies_data . append ( ( rnd_id_generator ( self ) , self . faker . company ( ) , self . faker . date ( pattern = "%Y-%m-%d" ) , self . faker . company_email ( ) , self . faker . safe_email ( ) , self . faker . city ( ) ) )
companies_payload = ( "INSERT INTO company " "(id, name, sdate, email, domain, city) " "VALUES (%s, %s, %s, %s, %s, %s)" )
cursor . executemany ( companies_payload , companies_data )
conn . commit ( )
logger . warning ( 'companies Commits are successful after write job!' , extra = extra_information )
except Exception as e :
logger . error ( e , extra = extra_information )
|
def print_at ( self , text , x , y , colour = 7 , attr = 0 , bg = 0 , transparent = False ) :
"""Print the text at the specified location using the specified colour and attributes .
: param text : The ( single line ) text to be printed .
: param x : The column ( x coord ) for the start of the text .
: param y : The line ( y coord ) for the start of the text .
: param colour : The colour of the text to be displayed .
: param attr : The cell attribute of the text to be displayed .
: param bg : The background colour of the text to be displayed .
: param transparent : Whether to print spaces or not , thus giving a
transparent effect .
The colours and attributes are the COLOUR _ xxx and A _ yyy constants
defined in the Screen class ."""
|
# Convert to the logically visible window that our double - buffer provides
y -= self . _start_line
# Trim text to the buffer vertically . Don ' t trim horizontally as we don ' t know whether any
# of these characters are dual - width yet . Handle it on the fly below . . .
if y < 0 or y >= self . _buffer_height or x > self . width :
return
if len ( text ) > 0 :
j = 0
for i , c in enumerate ( text ) : # Handle under - run and overrun of double - width glyphs now .
# Note that wcwidth uses significant resources , so only call when we have a
# unicode aware application . The rest of the time assume ASCII .
width = wcwidth ( c ) if self . _unicode_aware and ord ( c ) >= 256 else 1
if x + i + j < 0 :
x += ( width - 1 )
continue
if x + i + j + width > self . width :
return
# Now handle the update .
if c != " " or not transparent : # Fix up orphaned double - width glyphs that we ' ve just bisected .
if x + i + j - 1 >= 0 and self . _buffer . get ( x + i + j - 1 , y ) [ 4 ] == 2 :
self . _buffer . set ( x + i + j - 1 , y , ( ord ( "x" ) , 0 , 0 , 0 , 1 ) )
self . _buffer . set ( x + i + j , y , ( ord ( c ) , colour , attr , bg , width ) )
if width == 2 :
j += 1
if x + i + j < self . width :
self . _buffer . set ( x + i + j , y , ( ord ( c ) , colour , attr , bg , 0 ) )
# Now fix up any glyphs we may have bisected the other way .
if x + i + j + 1 < self . width and self . _buffer . get ( x + i + j + 1 , y ) [ 4 ] == 0 :
self . _buffer . set ( x + i + j + 1 , y , ( ord ( "x" ) , 0 , 0 , 0 , 1 ) )
|
def request_sensor_sampling_clear ( self , req ) :
"""Set all sampling strategies for this client to none .
Returns
success : { ' ok ' , ' fail ' }
Whether sending the list of devices succeeded .
Examples
? sensor - sampling - clear
! sensor - sampling - clear ok"""
|
f = Future ( )
@ gen . coroutine
def _clear_strategies ( ) :
self . clear_strategies ( req . client_connection )
raise gen . Return ( ( 'ok' , ) )
self . ioloop . add_callback ( lambda : chain_future ( _clear_strategies ( ) , f ) )
return f
|
def train_position_scales ( self , layout , layers ) :
"""Compute ranges for the x and y scales"""
|
_layout = layout . layout
panel_scales_x = layout . panel_scales_x
panel_scales_y = layout . panel_scales_y
# loop over each layer , training x and y scales in turn
for layer in layers :
data = layer . data
match_id = match ( data [ 'PANEL' ] , _layout [ 'PANEL' ] )
if panel_scales_x :
x_vars = list ( set ( panel_scales_x [ 0 ] . aesthetics ) & set ( data . columns ) )
# the scale index for each data point
SCALE_X = _layout [ 'SCALE_X' ] . iloc [ match_id ] . tolist ( )
panel_scales_x . train ( data , x_vars , SCALE_X )
if panel_scales_y :
y_vars = list ( set ( panel_scales_y [ 0 ] . aesthetics ) & set ( data . columns ) )
# the scale index for each data point
SCALE_Y = _layout [ 'SCALE_Y' ] . iloc [ match_id ] . tolist ( )
panel_scales_y . train ( data , y_vars , SCALE_Y )
return self
|
def run ( self ) :
"""Start the consumer"""
|
if self . profile_file :
LOGGER . info ( 'Profiling to %s' , self . profile_file )
profile . runctx ( 'self._run()' , globals ( ) , locals ( ) , self . profile_file )
else :
self . _run ( )
LOGGER . debug ( 'Exiting %s (%i, %i)' , self . name , os . getpid ( ) , os . getppid ( ) )
|
def handleError ( self , test , err ) :
"""Baseclass override . Called when a test raises an exception .
If the test isn ' t going to be rerun again , then report the error
to the nose test result .
: param test :
The test that has raised an error
: type test :
: class : ` nose . case . Test `
: param err :
Information about the test failure ( from sys . exc _ info ( ) )
: type err :
` tuple ` of ` class ` , : class : ` Exception ` , ` traceback `
: return :
True , if the test will be rerun ; False , if nose should handle it .
: rtype :
` bool `"""
|
# pylint : disable = invalid - name
want_error = self . _handle_test_error_or_failure ( test , err )
if not want_error and id ( test ) in self . _tests_that_reran :
self . _nose_result . addError ( test , err )
return want_error or None
|
def maybe_start_recording ( tokens , index ) :
"""Return a new _ InlineRSTRecorder when its time to record ."""
|
if tokens [ index ] . type == TokenType . BeginInlineRST :
return _InlineRSTRecorder ( index )
|
def get_filters_values ( self ) :
"""Get different filters values as dicts ."""
|
# DATASETS - -
# badges
self . _DST_BADGES = requests . get ( self . base_url + "datasets/badges/" ) . json ( )
# licences
self . _DST_LICENSES = { l . get ( "id" ) : l . get ( "title" ) for l in requests . get ( self . base_url + "datasets/licenses" ) . json ( ) }
# frequencies
self . _DST_FREQUENCIES = { f . get ( "id" ) : f . get ( "label" ) for f in requests . get ( self . base_url + "datasets/frequencies" ) . json ( ) }
# ORGANIZATIONS - -
# badges
self . _ORG_BADGES = requests . get ( self . base_url + "organizations/badges/" ) . json ( )
# # licences
# self . _ DST _ LICENSES = { l . get ( " id " ) : l . get ( " title " )
# for l in requests . get ( self . base _ url + " datasets / licenses " ) . json ( ) }
# # frequencies
# self . _ DST _ FREQUENCIES = { f . get ( " id " ) : f . get ( " label " )
# for f in requests . get ( self . base _ url + " datasets / frequencies " ) . json ( ) }
# SPATIAL - -
# granularities
self . _GRANULARITIES = { g . get ( "id" ) : g . get ( "name" ) for g in requests . get ( self . base_url + "spatial/granularities" ) . json ( ) }
# levels
self . _LEVELS = { g . get ( "id" ) : g . get ( "name" ) for g in requests . get ( self . base_url + "spatial/levels" ) . json ( ) }
# MISC - -
# facets
self . _FACETS = ( "all" , "badge" , "featured" , "format" , "geozone" , "granularity" , "license" , "owner" , "organization" , "reuses" , "tag" , "temporal_coverage" , )
# reuses
self . _REUSES = ( "none" , "few" , "quite" , "many" )
|
def post_collection ( self , session , data , api_type ) :
"""Create a new Resource .
: param session : SQLAlchemy session
: param data : Request JSON Data
: param params : Keyword arguments"""
|
model = self . _fetch_model ( api_type )
self . _check_json_data ( data )
orm_desc_keys = model . __mapper__ . all_orm_descriptors . keys ( )
if 'type' not in data [ 'data' ] . keys ( ) :
raise MissingTypeError ( )
if data [ 'data' ] [ 'type' ] != model . __jsonapi_type__ :
raise InvalidTypeForEndpointError ( model . __jsonapi_type__ , data [ 'data' ] [ 'type' ] )
resource = model ( )
check_permission ( resource , None , Permissions . CREATE )
data [ 'data' ] . setdefault ( 'relationships' , { } )
data [ 'data' ] . setdefault ( 'attributes' , { } )
data_keys = set ( map ( ( lambda x : resource . __jsonapi_map_to_py__ . get ( x , MissingKey ( x ) ) ) , data [ 'data' ] . get ( 'relationships' , { } ) . keys ( ) ) )
model_keys = set ( resource . __mapper__ . relationships . keys ( ) )
if not data_keys <= model_keys :
data_keys = set ( [ key . elem if isinstance ( key , MissingKey ) else key for key in data_keys ] )
# pragma : no cover
raise BadRequestError ( '{} not relationships for {}' . format ( ', ' . join ( [ repr ( key ) for key in list ( data_keys - model_keys ) ] ) , model . __jsonapi_type__ ) )
attrs_to_ignore = { '__mapper__' , 'id' }
setters = [ ]
try :
if 'id' in data [ 'data' ] . keys ( ) :
resource . id = data [ 'data' ] [ 'id' ]
for key , relationship in resource . __mapper__ . relationships . items ( ) :
attrs_to_ignore |= set ( relationship . local_columns ) | { key }
api_key = resource . __jsonapi_map_to_api__ [ key ]
if 'relationships' not in data [ 'data' ] . keys ( ) or api_key not in data [ 'data' ] [ 'relationships' ] . keys ( ) :
continue
data_rel = data [ 'data' ] [ 'relationships' ] [ api_key ]
if 'data' not in data_rel . keys ( ) :
raise BadRequestError ( 'Missing data key in relationship {}' . format ( key ) )
data_rel = data_rel [ 'data' ]
remote_side = relationship . back_populates
if relationship . direction == MANYTOONE :
setter = get_rel_desc ( resource , key , RelationshipActions . SET )
if data_rel is None :
setters . append ( [ setter , None ] )
else :
if not isinstance ( data_rel , dict ) :
raise BadRequestError ( '{} must be a hash' . format ( key ) )
if not { 'type' , 'id' } == set ( data_rel . keys ( ) ) :
raise BadRequestError ( '{} must have type and id keys' . format ( key ) )
to_relate = self . _fetch_resource ( session , data_rel [ 'type' ] , data_rel [ 'id' ] , Permissions . EDIT )
rem = to_relate . __mapper__ . relationships [ remote_side ]
if rem . direction == MANYTOONE :
check_permission ( to_relate , remote_side , Permissions . EDIT )
else :
check_permission ( to_relate , remote_side , Permissions . CREATE )
setters . append ( [ setter , to_relate ] )
else :
setter = get_rel_desc ( resource , key , RelationshipActions . APPEND )
if not isinstance ( data_rel , list ) :
raise BadRequestError ( '{} must be an array' . format ( key ) )
for item in data_rel :
if 'type' not in item . keys ( ) or 'id' not in item . keys ( ) :
raise BadRequestError ( '{} must have type and id keys' . format ( key ) )
# pragma : no cover
to_relate = self . _fetch_resource ( session , item [ 'type' ] , item [ 'id' ] , Permissions . EDIT )
rem = to_relate . __mapper__ . relationships [ remote_side ]
if rem . direction == MANYTOONE :
check_permission ( to_relate , remote_side , Permissions . EDIT )
else :
check_permission ( to_relate , remote_side , Permissions . CREATE )
setters . append ( [ setter , to_relate ] )
data_keys = set ( map ( ( lambda x : resource . __jsonapi_map_to_py__ . get ( x , None ) ) , data [ 'data' ] . get ( 'attributes' , { } ) . keys ( ) ) )
model_keys = set ( orm_desc_keys ) - attrs_to_ignore
if not data_keys <= model_keys :
raise BadRequestError ( '{} not attributes for {}' . format ( ', ' . join ( list ( data_keys - model_keys ) ) , model . __jsonapi_type__ ) )
with session . no_autoflush :
for setter , value in setters :
setter ( resource , value )
for key in data_keys :
api_key = resource . __jsonapi_map_to_api__ [ key ]
setter = get_attr_desc ( resource , key , AttributeActions . SET )
setter ( resource , data [ 'data' ] [ 'attributes' ] [ api_key ] )
session . add ( resource )
session . commit ( )
except IntegrityError as e :
session . rollback ( )
raise ValidationError ( str ( e . orig ) )
except AssertionError as e : # pragma : no cover
session . rollback ( )
raise ValidationError ( e . msg )
except TypeError as e :
session . rollback ( )
raise ValidationError ( 'Incompatible data type' )
session . refresh ( resource )
response = self . get_resource ( session , { } , model . __jsonapi_type__ , resource . id )
response . status_code = 201
return response
|
def create_environment ( home_dir , site_packages = False , clear = False , unzip_setuptools = False , prompt = None , search_dirs = None , download = False , no_setuptools = False , no_pip = False , no_wheel = False , symlink = True ) :
"""Creates a new environment in ` ` home _ dir ` ` .
If ` ` site _ packages ` ` is true , then the global ` ` site - packages / ` `
directory will be on the path .
If ` ` clear ` ` is true ( default False ) then the environment will
first be cleared ."""
|
home_dir , lib_dir , inc_dir , bin_dir = path_locations ( home_dir )
py_executable = os . path . abspath ( install_python ( home_dir , lib_dir , inc_dir , bin_dir , site_packages = site_packages , clear = clear , symlink = symlink ) )
install_distutils ( home_dir )
to_install = [ ]
if not no_setuptools :
to_install . append ( 'setuptools' )
if not no_pip :
to_install . append ( 'pip' )
if not no_wheel :
to_install . append ( 'wheel' )
if to_install :
install_wheel ( to_install , py_executable , search_dirs , download = download , )
install_activate ( home_dir , bin_dir , prompt )
install_python_config ( home_dir , bin_dir , prompt )
|
def plots_html_page ( query_module ) :
"""Generate analysis output as html page
Args :
query _ module ( module ) : module to use for querying data for the
desired model / pipeline variant , e . g . leonardo . standard . queries"""
|
# page template
template = jenv . get_template ( "analysis.html" )
# container for template context
context = dict ( extended = config . EXTENDED )
# a database client / session to run queries in
cl = client . get_client ( )
session = cl . create_session ( )
# general styling
seaborn . set_style ( 'whitegrid' )
# plot : painting area by decade , with linear regression
decade_df = query_module . decade_query ( )
pix_size = pixels_to_inches ( ( 600 , 400 ) )
ax = seaborn . lmplot ( x = 'decade' , y = 'area' , data = decade_df , size = pix_size [ 1 ] , aspect = pix_size [ 0 ] / pix_size [ 1 ] , scatter_kws = { "s" : 30 , "alpha" : 0.3 } )
ax . set ( xlabel = 'Decade' , ylabel = 'Area, m^2' )
context [ 'area_by_decade_svg' ] = fig_to_svg ( plt . gcf ( ) )
plt . close ( 'all' )
# plot : painting area by gender , with logistic regression
if config . EXTENDED :
gender_df = query_module . gender_query ( )
pix_size = pixels_to_inches ( ( 600 , 400 ) )
g = seaborn . FacetGrid ( gender_df , hue = "gender" , margin_titles = True , size = pix_size [ 1 ] , aspect = pix_size [ 0 ] / pix_size [ 1 ] )
bins = np . linspace ( 0 , 5 , 30 )
g . map ( plt . hist , "area" , bins = bins , lw = 0 , alpha = 0.5 , normed = True )
g . axes [ 0 , 0 ] . set_xlabel ( 'Area, m^2' )
g . axes [ 0 , 0 ] . set_ylabel ( 'Percentage of paintings' )
context [ 'area_by_gender_svg' ] = fig_to_svg ( plt . gcf ( ) )
plt . close ( 'all' )
# render template
out_file = path . join ( out_dir , "analysis.html" )
html_content = template . render ( ** context )
with open ( out_file , 'w' ) as f :
f . write ( html_content )
# done , clean up
plt . close ( 'all' )
session . close ( )
|
def backend_from_fobj ( f ) :
"""Determine backend module object from a file object ."""
|
if magic is None :
warn ( "magic lib is not installed; assuming mime type %r" % ( DEFAULT_MIME ) )
return backend_from_mime ( DEFAULT_MIME )
else :
offset = f . tell ( )
try :
f . seek ( 0 )
chunk = f . read ( MAGIC_BUFFER_SIZE )
mime = magic . from_buffer ( chunk , mime = True )
return backend_from_mime ( mime )
finally :
f . seek ( offset )
|
def send_many ( self , outputs_array , fee = None , change_addr = None , id = None , endpoint = None ) :
"""Args :
outputs _ array : ( dict ) array , the data structure of each element in the array is as follows :
{ " asset " : < asset > , " value " : < value > , " address " : < address > }
asset : ( str ) asset identifier ( for NEO : ' c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b ' , for GAS : ' 602c79718b16e442de58778e148d0b1084e3b2dffd5de6b7b16cee7969282de7 ' )
value : ( int / decimal ) transfer amount
address : ( str ) destination address
fee : ( decimal , optional ) Paying the handling fee helps elevate the priority of the network to process the transfer . It defaults to 0 , and can be set to a minimum of 0.000001 . The low priority threshold is 0.001.
change _ addr : ( str , optional ) Change address , default is the first standard address in the wallet .
id : ( int , optional ) id to use for response tracking
endpoint : ( RPCEndpoint , optional ) endpoint to specify to use"""
|
params = [ outputs_array ]
if fee :
params . append ( fee )
if fee and change_addr :
params . append ( change_addr )
elif not fee and change_addr :
params . append ( 0 )
params . append ( change_addr )
return self . _call_endpoint ( SEND_MANY , params = params , id = id , endpoint = endpoint )
|
def console_get_char_foreground ( con : tcod . console . Console , x : int , y : int ) -> Color :
"""Return the foreground color at the x , y of this console .
. . deprecated : : 8.4
Array access performs significantly faster than using this function .
See : any : ` Console . fg ` ."""
|
return Color . _new_from_cdata ( lib . TCOD_console_get_char_foreground ( _console ( con ) , x , y ) )
|
def _already_resized_on_flickr ( self , fn , pid , _megapixels ) :
"""Checks if image file ( fn ) with photo _ id ( pid ) has already
been resized on flickr . If so , returns True"""
|
logger . debug ( "%s - resize requested" % ( fn ) )
# Get width / height from flickr
width_flickr , height_flickr = self . _getphoto_originalsize ( pid )
# Now compute what image will be if we resize it
new_width , new_height = pusher_utils . resize_compute_width_height ( fn , _megapixels )
if width_flickr == new_width and height_flickr == new_height :
return True
# Also return true if image couldn ' t be resized
elif not new_width :
return True
return False
|
def _loglr ( self ) :
r"""Computes the log likelihood ratio
Returns
float
The value of the log likelihood ratio ."""
|
# calculate < d - h | d - h > = < h | h > - 2 < h | d > + < d | d > up to a constant
p = self . current_params . copy ( )
p . update ( self . static_params )
if self . time is None :
self . time = p [ 'tc' ]
shloglr = hhloglr = 0
for ifo in self . sh :
fp , fc = self . det [ ifo ] . antenna_pattern ( p [ 'ra' ] , p [ 'dec' ] , p [ 'polarization' ] , self . time )
dt = self . det [ ifo ] . time_delay_from_earth_center ( p [ 'ra' ] , p [ 'dec' ] , self . time )
ip = numpy . cos ( p [ 'inclination' ] )
ic = 0.5 * ( 1.0 + ip * ip )
htf = ( fp * ip + 1.0j * fc * ic ) / p [ 'distance' ]
sh = self . sh [ ifo ] . at_time ( p [ 'tc' ] + dt ) * htf
shloglr += sh
hhloglr += self . hh [ ifo ] * abs ( htf ) ** 2.0
vloglr = numpy . log ( scipy . special . i0e ( abs ( shloglr ) ) )
vloglr += abs ( shloglr ) + hhloglr
return float ( vloglr )
|
def inform_hook_client_factory ( self , host , port , * args , ** kwargs ) :
"""Return an instance of : class : ` _ InformHookDeviceClient ` or similar
Provided to ease testing . Dynamically overriding this method after instantiation
but before start ( ) is called allows for deep brain surgery . See
: class : ` katcp . fake _ clients . TBD `"""
|
return _InformHookDeviceClient ( host , port , * args , ** kwargs )
|
def make_path ( phase ) -> str :
"""Create the path to the folder at which the metadata and optimizer pickle should be saved"""
|
return "{}/{}{}{}" . format ( conf . instance . output_path , phase . phase_path , phase . phase_name , phase . phase_tag )
|
def schedule_function ( queue_name , function_name , * args , ** kwargs ) :
"""Schedule a function named ` function _ name ` to be run by workers on
the queue ` queue _ name ` with * args and * * kwargs as specified by that
function ."""
|
body = create_request_body ( function_name , * args , ** kwargs )
if getattr ( settings , 'BEANSTALK_DISPATCH_EXECUTE_SYNCHRONOUSLY' , False ) :
execute_function ( json . loads ( body ) )
else :
connection = boto . connect_sqs ( settings . BEANSTALK_DISPATCH_SQS_KEY , settings . BEANSTALK_DISPATCH_SQS_SECRET )
queue = connection . get_queue ( queue_name )
if not queue :
queue = connection . create_queue ( queue_name )
message = boto . sqs . message . Message ( )
message . set_body ( body )
queue . write ( message )
|
def tags ( self ) :
"Iterate over all tags yielding ( name , function )"
|
for bucket in self :
for k , v in self [ bucket ] . items ( ) :
yield k , v
|
def update ( self ) :
"""Monolithic update method .
This method calls the following methods with the dynamic loss scaling .
1 . solver . zerograd
2 . feed data
3 . loss . forward
4 . loss . backward
5 . comm . all _ reduce ( if it is specified )
6 . solver . update"""
|
# Initialize gradients .
self . solver . zero_grad ( )
# Forward and backward
for _ in range ( self . accum_grad ) : # feed data
self . data_feeder ( )
# forward
self . loss . forward ( clear_no_need_grad = self . clear_buffer )
# backward with scale
self . loss . backward ( self . scale , clear_buffer = self . clear_buffer )
# AllReduce
if self . comm and len ( self . grads ) != 0 :
self . comm . all_reduce ( self . grads , division = False , inplace = False )
# Check Inf / NaN in grads
if self . solver . check_inf_or_nan_grad ( ) :
self . scale /= self . scaling_factor
self . _counter = 0
# Recursively call udpate function until no inf nor nan .
self . _recursive_count += 1
if self . _recursive_count > self . _max_recursive_count :
self . _recursive_count = 0
return
# skip
return self . update ( )
self . _recursive_count = 0
# Rescale grads
self . solver . scale_grad ( 1. / self . scale )
# Do some gradient clipping , etc .
if self . weight_decay is not None :
self . solver . weight_decay ( self . weight_decay )
# Update
self . solver . update ( )
if self . _counter > self . N :
self . scale *= self . scaling_factor
self . _counter = 0
self . _counter += 1
|
def processStoredSms ( self , unreadOnly = False ) :
"""Process all SMS messages currently stored on the device / SIM card .
Reads all ( or just unread ) received SMS messages currently stored on the
device / SIM card , initiates " SMS received " events for them , and removes
them from the SIM card .
This is useful if SMS messages were received during a period that
python - gsmmodem was not running but the modem was powered on .
: param unreadOnly : If True , only process unread SMS messages
: type unreadOnly : boolean"""
|
states = [ Sms . STATUS_RECEIVED_UNREAD ]
if not unreadOnly :
states . insert ( 0 , Sms . STATUS_RECEIVED_READ )
for msgStatus in states :
messages = self . listStoredSms ( status = msgStatus , delete = True )
for sms in messages :
self . smsReceivedCallback ( sms )
|
def validate ( self , pA , pB ) :
"""Validate that the two properties may be linked .
@ param pA : Endpoint ( A ) to link .
@ type pA : L { Endpoint }
@ param pB : Endpoint ( B ) to link .
@ type pB : L { Endpoint }
@ return : self
@ rtype : L { Link }"""
|
if pA in pB . links or pB in pA . links :
raise Exception ( 'Already linked' )
dA = pA . domains ( )
dB = pB . domains ( )
for d in dA :
if d in dB :
raise Exception ( 'Duplicate domain "%s" found' % d )
for d in dB :
if d in dA :
raise Exception ( 'Duplicate domain "%s" found' % d )
kA = pA . keys ( )
kB = pB . keys ( )
for k in kA :
if k in kB :
raise Exception ( 'Duplicate key %s found' % k )
for k in kB :
if k in kA :
raise Exception ( 'Duplicate key %s found' % k )
return self
|
def kdeconf ( kde , conf = 0.683 , xmin = None , xmax = None , npts = 500 , shortest = True , conftol = 0.001 , return_max = False ) :
"""Returns desired confidence interval for provided KDE object"""
|
if xmin is None :
xmin = kde . dataset . min ( )
if xmax is None :
xmax = kde . dataset . max ( )
x = np . linspace ( xmin , xmax , npts )
return conf_interval ( x , kde ( x ) , shortest = shortest , conf = conf , conftol = conftol , return_max = return_max )
|
def _set_traffic_state ( self , v , load = False ) :
"""Setter method for traffic _ state , mapped from YANG variable / traffic _ state ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ traffic _ state is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ traffic _ state ( ) directly .
YANG Description : IS - IS packet counts"""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = traffic_state . traffic_state , is_container = 'container' , presence = False , yang_name = "traffic-state" , rest_name = "traffic-state" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'isis-traffic' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-isis-operational' , defining_module = 'brocade-isis-operational' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """traffic_state must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=traffic_state.traffic_state, is_container='container', presence=False, yang_name="traffic-state", rest_name="traffic-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-traffic', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=True)""" , } )
self . __traffic_state = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def interp_like ( self , other , method = 'linear' , assume_sorted = False , kwargs = { } ) :
"""Interpolate this object onto the coordinates of another object ,
filling the out of range values with NaN .
Parameters
other : Dataset or DataArray
Object with an ' indexes ' attribute giving a mapping from dimension
names to an 1d array - like , which provides coordinates upon
which to index the variables in this dataset .
method : string , optional .
{ ' linear ' , ' nearest ' } for multidimensional array ,
{ ' linear ' , ' nearest ' , ' zero ' , ' slinear ' , ' quadratic ' , ' cubic ' }
for 1 - dimensional array . ' linear ' is used by default .
assume _ sorted : boolean , optional
If False , values of coordinates that are interpolated over can be
in any order and they are sorted first . If True , interpolated
coordinates are assumed to be an array of monotonically increasing
values .
kwargs : dictionary , optional
Additional keyword passed to scipy ' s interpolator .
Returns
interpolated : xr . Dataset
Another dataset by interpolating this dataset ' s data along the
coordinates of the other object .
Notes
scipy is required .
If the dataset has object - type coordinates , reindex is used for these
coordinates instead of the interpolation .
See Also
Dataset . interp
Dataset . reindex _ like"""
|
coords = alignment . reindex_like_indexers ( self , other )
numeric_coords = OrderedDict ( )
object_coords = OrderedDict ( )
for k , v in coords . items ( ) :
if v . dtype . kind in 'uifcMm' :
numeric_coords [ k ] = v
else :
object_coords [ k ] = v
ds = self
if object_coords : # We do not support interpolation along object coordinate .
# reindex instead .
ds = self . reindex ( object_coords )
return ds . interp ( numeric_coords , method , assume_sorted , kwargs )
|
def add ( self , path ) :
"""Add the given path to the decided place in sys . path"""
|
# sys . path always has absolute paths .
path = os . path . abspath ( path )
# It must exist .
if not os . path . exists ( path ) :
return
# It must not already be in sys . path .
if path in sys . path :
return
if self . index is not None :
sys . path . insert ( self . index , path )
self . index += 1
else :
sys . path . append ( path )
|
def reset ( self , data , size ) :
"""Set new contents for frame"""
|
return lib . zframe_reset ( self . _as_parameter_ , data , size )
|
def PushSection ( self , name , pre_formatters ) :
"""Given a section name , push it on the top of the stack .
Returns :
The new section , or None if there is no such section ."""
|
if name == '@' :
value = self . stack [ - 1 ] . context
else :
value = self . stack [ - 1 ] . context . get ( name )
# Apply pre - formatters
for i , ( f , args , formatter_type ) in enumerate ( pre_formatters ) :
if formatter_type == ENHANCED_FUNC :
value = f ( value , self , args )
elif formatter_type == SIMPLE_FUNC :
value = f ( value )
else :
assert False , 'Invalid formatter type %r' % formatter_type
self . stack . append ( _Frame ( value ) )
return value
|
def get_role_secret_id ( self , role_name , secret_id , mount_point = 'approle' ) :
"""POST / auth / < mount _ point > / role / < role name > / secret - id / lookup
: param role _ name :
: type role _ name :
: param secret _ id :
: type secret _ id :
: param mount _ point :
: type mount _ point :
: return :
: rtype :"""
|
url = '/v1/auth/{0}/role/{1}/secret-id/lookup' . format ( mount_point , role_name )
params = { 'secret_id' : secret_id }
return self . _adapter . post ( url , json = params ) . json ( )
|
def __fade_in ( self ) :
"""Starts the Widget fade in ."""
|
self . __timer . stop ( )
self . __vector = self . __fade_speed
self . __timer . start ( )
|
def proxy_set ( self , value ) :
"""A helper to easily call the proxy _ setter of the field"""
|
setter = getattr ( self , self . proxy_setter )
if isinstance ( value , ( list , tuple , set ) ) :
result = setter ( * value )
elif isinstance ( value , dict ) :
result = setter ( ** value )
else :
result = setter ( value )
return result
|
def query_item ( name , query_string , order = 'Rank' ) :
'''Query a type of record for one or more items . Requires a valid query string .
See https : / / rally1 . rallydev . com / slm / doc / webservice / introduction . jsp for
information on query syntax .
CLI Example :
. . code - block : : bash
salt myminion rallydev . query _ < item name > < query string > [ < order > ]
salt myminion rallydev . query _ task ' ( Name contains github ) '
salt myminion rallydev . query _ task ' ( Name contains reactor ) ' Rank'''
|
status , result = _query ( action = name , args = { 'query' : query_string , 'order' : order } )
return result
|
def update_campaign_list ( self , ** kwargs ) : # noqa : E501
"""List all campaigns # noqa : E501
Get update campaigns for devices specified by a filter . # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . update _ campaign _ list ( asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param int limit : How many update campaigns to retrieve
: param str order : The order of the records . Acceptable values : ASC , DESC . Default : ASC
: param str after : The ID of the the item after which to retrieve the next page
: param str include : A comma - separated list of data fields to return . Currently supported : total _ count
: param str filter : URL - encoded query string parameter to filter returned data ` ? filter = { URL - encoded query string } ` # # # # # Filterable fields : The below table lists all the fields that can be filtered on with certain filters : < table > < thead > < tr > < th > Field < / th > < th > = / _ _ eq / _ _ neq < / th > < th > _ _ in / _ _ nin < / th > < th > _ _ lte / _ _ gte < / th > < tr > < thead > < tbody > < tr > < td > created _ at < / td > < td > โ < / td > < td > โ < / td > < td > โ < / td > < / tr > < tr > < td > description < / td > < td > โ < / td > < td > โ < / td > < td > & nbsp ; < / td > < / tr > < tr > < td > device _ filter < / td > < td > โ < / td > < td > โ < / td > < td > & nbsp ; < / td > < / tr > < tr > < td > etag < / td > < td > โ < / td > < td > โ < / td > < td > โ < / td > < / tr > < tr > < td > finished < / td > < td > โ < / td > < td > โ < / td > < td > โ < / td > < / tr > < tr > < td > id < / td > < td > โ < / td > < td > โ < / td > < td > & nbsp ; < / td > < / tr > < tr > < td > name < / td > < td > โ < / td > < td > โ < / td > < td > & nbsp ; < / td > < / tr > < tr > < td > root _ manifest _ id < / td > < td > โ < / td > < td > โ < / td > < td > & nbsp ; < / td > < / tr > < tr > < td > started _ at < / td > < td > โ < / td > < td > โ < / td > < td > โ < / td > < / tr > < tr > < td > state < / td > < td > โ < / td > < td > โ < / td > < td > & nbsp ; < / td > < / tr > < tr > < td > updated _ at < / td > < td > โ < / td > < td > โ < / td > < td > โ < / td > < / tr > < tr > < td > when < / td > < td > โ < / td > < td > โ < / td > < td > โ < / td > < / tr > < / tbody > < / table > & nbsp ; The query string is made up of key - value pairs separated by ampersands . For example , this query : ` key1 _ _ eq = value1 & key2 _ _ eq = value2 & key3 _ _ eq = value3 ` would be URL - encoded as : ` ? filter = key1 _ _ eq % 3Dvalue1%26key2 _ _ eq % 3Dvalue2%26key3 _ _ eq % 3Dvalue3 ` * * Filtering by campaign properties * * ` state _ _ eq = [ draft | scheduled | devicefectch | devicecopy | publishing | deploying | deployed | manifestremoved | expired ] ` ` root _ manifest _ id _ _ eq = 43217771234242e594ddb433816c498a ` * * Filtering on date - time fields * * Date - time fields should be specified in UTC RFC3339 format , ` YYYY - MM - DDThh : mm : ss . msZ ` . There are three permitted variations : * UTC RFC3339 with milliseconds . Example : ` 2016-11-30T16:25:12.1234Z ` * UTC RFC3339 without milliseconds . Example : ` 2016-11-30T16:25:12Z ` * UTC RFC3339 shortened without milliseconds and punctuation . Example : ` 20161130T162512Z ` Date - time filtering supports three operators : * equality by appending ` _ _ eq ` to the field name * greater than or equal to by appending ` _ _ gte ` to the field name * less than or equal to by appending ` _ _ lte ` to the field name ` { field name } [ | _ _ eq | _ _ lte | _ _ gte ] = { UTC RFC3339 date - time } ` Time ranges may be specified by including both the ` _ _ gte ` and ` _ _ lte ` forms in the filter . For example : ` created _ at _ _ gte = 2016-11-30T16:25:12.1234Z & created _ at _ _ lte = 2016-12-30T00:00:00Z ` * * Filtering on multiple fields * * ` state _ _ eq = deployed & created _ at _ _ gte = 2016-11-30T16:25:12.1234Z & created _ at _ _ lte = 2016-12-30T00:00:00Z ` * * Filtering with filter operators * * String field filtering supports the following operators : * equality : ` _ _ eq ` * non - equality : ` _ _ neq ` * in : ` _ _ in ` * not in : ` _ _ nin ` For ` _ _ in ` and ` _ _ nin ` filters list of parameters must be comma - separated : ` name _ _ in = fw - image1 , fw - image2 `
: return : UpdateCampaignPage
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . update_campaign_list_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . update_campaign_list_with_http_info ( ** kwargs )
# noqa : E501
return data
|
def fmt_margin ( text , margin = None , margin_left = None , margin_right = None , margin_char = ' ' ) :
"""Surround given text with given margin characters ."""
|
if margin_left is None :
margin_left = margin
if margin_right is None :
margin_right = margin
if margin_left is not None :
text = '{}{}' . format ( str ( margin_char ) [ 0 ] * int ( margin_left ) , text )
if margin_right is not None :
text = '{}{}' . format ( text , str ( margin_char ) [ 0 ] * int ( margin_right ) )
return text
|
def check_script ( vouts ) :
"""Looks into the vouts list of a transaction
and returns the ` ` op _ return ` ` if one exists .
Args ;
vouts ( list ) : List of outputs of a transaction .
Returns :
str : String representation of the ` ` op _ return ` ` .
Raises :
Exception : If no ` ` vout ` ` having a supported
verb ( : attr : ` supported _ actions ` ) is found ."""
|
for vout in [ v for v in vouts [ : : - 1 ] if v [ 'hex' ] . startswith ( '6a' ) ] :
verb = BlockchainSpider . decode_op_return ( vout [ 'hex' ] )
action = Spoolverb . from_verb ( verb ) . action
if action in Spoolverb . supported_actions :
return verb
raise Exception ( "Invalid ascribe transaction" )
|
def bcrypt_set_password ( self , raw_password ) :
"""Sets the user ' s password to * raw _ password * , hashed with bcrypt ."""
|
if not is_enabled ( ) or raw_password is None :
_set_password ( self , raw_password )
else :
salt = bcrypt . gensalt ( get_rounds ( ) )
self . password = 'bc$' + bcrypt . hashpw ( smart_str ( raw_password ) , salt )
|
def distinct ( self , fieldname , key = None ) :
"""Returns the unique values seen at ` fieldname ` ."""
|
return tuple ( unique_everseen ( self [ fieldname ] , key = key ) )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.