signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def call_method_async ( self , method_name_or_object , params = None ) :
"""Calls the ` ` method _ name ` ` method from the given service asynchronously
and returns a : py : class : ` gemstone . client . structs . AsyncMethodCall ` instance .
: param method _ name _ or _ object : The name of te called method or a ` ` MethodCall ` ` instance
: param params : A list of dict representing the parameters for the request
: return : a : py : class : ` gemstone . client . structs . AsyncMethodCall ` instance ."""
|
thread_pool = self . _get_thread_pool ( )
if isinstance ( method_name_or_object , MethodCall ) :
req_obj = method_name_or_object
else :
req_obj = MethodCall ( method_name_or_object , params )
async_result_mp = thread_pool . apply_async ( self . handle_single_request , args = ( req_obj , ) )
return AsyncMethodCall ( req_obj = req_obj , async_resp_object = async_result_mp )
|
def match_notes ( ref_intervals , ref_pitches , ref_velocities , est_intervals , est_pitches , est_velocities , onset_tolerance = 0.05 , pitch_tolerance = 50.0 , offset_ratio = 0.2 , offset_min_tolerance = 0.05 , strict = False , velocity_tolerance = 0.1 ) :
"""Match notes , taking note velocity into consideration .
This function first calls : func : ` mir _ eval . transcription . match _ notes ` to
match notes according to the supplied intervals , pitches , onset , offset ,
and pitch tolerances . The velocities of the matched notes are then used to
estimate a slope and intercept which can rescale the estimated velocities
so that they are as close as possible ( in L2 sense ) to their matched
reference velocities . Velocities are then normalized to the range [ 0 , 1 ] . A
estimated note is then further only considered correct if its velocity is
within ` ` velocity _ tolerance ` ` of its matched ( according to pitch and
timing ) reference note .
Parameters
ref _ intervals : np . ndarray , shape = ( n , 2)
Array of reference notes time intervals ( onset and offset times )
ref _ pitches : np . ndarray , shape = ( n , )
Array of reference pitch values in Hertz
ref _ velocities : np . ndarray , shape = ( n , )
Array of MIDI velocities ( i . e . between 0 and 127 ) of reference notes
est _ intervals : np . ndarray , shape = ( m , 2)
Array of estimated notes time intervals ( onset and offset times )
est _ pitches : np . ndarray , shape = ( m , )
Array of estimated pitch values in Hertz
est _ velocities : np . ndarray , shape = ( m , )
Array of MIDI velocities ( i . e . between 0 and 127 ) of estimated notes
onset _ tolerance : float > 0
The tolerance for an estimated note ' s onset deviating from the
reference note ' s onset , in seconds . Default is 0.05 ( 50 ms ) .
pitch _ tolerance : float > 0
The tolerance for an estimated note ' s pitch deviating from the
reference note ' s pitch , in cents . Default is 50.0 ( 50 cents ) .
offset _ ratio : float > 0 or None
The ratio of the reference note ' s duration used to define the
offset _ tolerance . Default is 0.2 ( 20 % ) , meaning the
` ` offset _ tolerance ` ` will equal the ` ` ref _ duration * 0.2 ` ` , or 0.05 ( 50
ms ) , whichever is greater . If ` ` offset _ ratio ` ` is set to ` ` None ` ` ,
offsets are ignored in the matching .
offset _ min _ tolerance : float > 0
The minimum tolerance for offset matching . See offset _ ratio description
for an explanation of how the offset tolerance is determined . Note :
this parameter only influences the results if ` ` offset _ ratio ` ` is not
` ` None ` ` .
strict : bool
If ` ` strict = False ` ` ( the default ) , threshold checks for onset , offset ,
and pitch matching are performed using ` ` < = ` ` ( less than or equal ) . If
` ` strict = True ` ` , the threshold checks are performed using ` ` < ` ` ( less
than ) .
velocity _ tolerance : float > 0
Estimated notes are considered correct if , after rescaling and
normalization to [ 0 , 1 ] , they are within ` ` velocity _ tolerance ` ` of a
matched reference note .
Returns
matching : list of tuples
A list of matched reference and estimated notes .
` ` matching [ i ] = = ( i , j ) ` ` where reference note ` ` i ` ` matches estimated
note ` ` j ` ` ."""
|
# Compute note matching as usual using standard transcription function
matching = transcription . match_notes ( ref_intervals , ref_pitches , est_intervals , est_pitches , onset_tolerance , pitch_tolerance , offset_ratio , offset_min_tolerance , strict )
# Rescale reference velocities to the range [ 0 , 1]
min_velocity , max_velocity = np . min ( ref_velocities ) , np . max ( ref_velocities )
# Make the smallest possible range 1 to avoid divide by zero
velocity_range = max ( 1 , max_velocity - min_velocity )
ref_velocities = ( ref_velocities - min_velocity ) / float ( velocity_range )
# Convert matching list - of - tuples to array for fancy indexing
matching = np . array ( matching )
# When there is no matching , return an empty list
if matching . size == 0 :
return [ ]
# Grab velocities for matched notes
ref_matched_velocities = ref_velocities [ matching [ : , 0 ] ]
est_matched_velocities = est_velocities [ matching [ : , 1 ] ]
# Find slope and intercept of line which produces best least - squares fit
# between matched est and ref velocities
slope , intercept = np . linalg . lstsq ( np . vstack ( [ est_matched_velocities , np . ones ( len ( est_matched_velocities ) ) ] ) . T , ref_matched_velocities ) [ 0 ]
# Re - scale est velocities to match ref
est_matched_velocities = slope * est_matched_velocities + intercept
# Compute the absolute error of ( rescaled ) estimated velocities vs .
# normalized reference velocities . Error will be in [ 0 , 1]
velocity_diff = np . abs ( est_matched_velocities - ref_matched_velocities )
# Check whether each error is within the provided tolerance
velocity_within_tolerance = ( velocity_diff < velocity_tolerance )
# Only keep matches whose velocity was within the provided tolerance
matching = matching [ velocity_within_tolerance ]
# Convert back to list - of - tuple format
matching = [ tuple ( _ ) for _ in matching ]
return matching
|
def get_total_supply ( self ) -> int :
"""This interface is used to call the TotalSupply method in ope4
that return the total supply of the oep4 token .
: return : the total supply of the oep4 token ."""
|
func = InvokeFunction ( 'totalSupply' )
response = self . __sdk . get_network ( ) . send_neo_vm_transaction_pre_exec ( self . __hex_contract_address , None , func )
try :
total_supply = ContractDataParser . to_int ( response [ 'Result' ] )
except SDKException :
total_supply = 0
return total_supply
|
def sendJobsStartNext ( self , statusDetails = None ) :
"""* * Description * *
Publishes an MQTT message to the StartNextJobExecution topic . This will attempt to get the next pending
job execution and change its status to IN _ PROGRESS .
* * Syntax * *
. . code : : python
# Start next job ( set status to IN _ PROGRESS ) and update with optional statusDetails
myAWSIoTMQTTJobsClient . sendJobsStartNext ( { ' StartedBy ' : ' myClientId ' } )
* * Parameters * *
* statusDetails * - Dictionary containing the key value pairs to use for the status details of the job execution
* * Returns * *
True if the publish request has been sent to paho . False if the request did not reach paho ."""
|
topic = self . _thingJobManager . getJobTopic ( jobExecutionTopicType . JOB_START_NEXT_TOPIC , jobExecutionTopicReplyType . JOB_REQUEST_TYPE )
payload = self . _thingJobManager . serializeStartNextPendingJobExecutionPayload ( statusDetails )
return self . _AWSIoTMQTTClient . publish ( topic , payload , self . _QoS )
|
def slice ( string , start = None , end = None ) :
"""Returns a substring of the given string , counting graphemes instead of codepoints .
Negative indices is currently not supported .
> > > string = " tamil நி ( ni ) "
> > > string [ : 7]
' tamil ந '
> > > grapheme . slice ( string , end = 7)
' tamil நி '
> > > string [ 7 : ]
' ி ( ni ) '
> > > grapheme . slice ( string , 7)
' ( ni ) '"""
|
if start is None :
start = 0
if end is not None and start >= end :
return ""
if start < 0 :
raise NotImplementedError ( "Negative indexing is currently not supported." )
sum_ = 0
start_index = None
for grapheme_index , grapheme_length in enumerate ( grapheme_lengths ( string ) ) :
if grapheme_index == start :
start_index = sum_
elif grapheme_index == end :
return string [ start_index : sum_ ]
sum_ += grapheme_length
if start_index is not None :
return string [ start_index : ]
return ""
|
def create_spot_requests ( self , price , instance_type = 'default' , root_device_type = 'ebs' , size = 'default' , vol_type = 'gp2' , delete_on_termination = False , timeout = None ) :
"""Request creation of one or more EC2 spot instances .
: param size :
: param vol _ type :
: param delete _ on _ termination :
: param root _ device _ type : The type of the root device .
: type root _ device _ type : str
: param price : Max price to pay for spot instance per hour .
: type price : float
: param instance _ type : A section name in amazon . json
: type instance _ type : str
: param timeout : Seconds to keep the request open ( cancelled if not fulfilled ) .
: type timeout : int
: return : List of requests created
: rtype : list"""
|
name , size = self . _get_default_name_size ( instance_type , size )
if root_device_type == 'ebs' :
self . images [ instance_type ] [ 'block_device_map' ] = self . _configure_ebs_volume ( vol_type , name , size , delete_on_termination )
valid_until = None
if timeout is not None :
valid_until = ( datetime . datetime . now ( ) + datetime . timedelta ( seconds = timeout ) ) . isoformat ( )
requests = self . ec2 . request_spot_instances ( price , valid_until = valid_until , ** self . images [ instance_type ] )
return [ r . id for r in requests ]
|
def _unpack_tableswitch ( bc , offset ) :
"""function for unpacking the tableswitch op arguments"""
|
jump = ( offset % 4 )
if jump :
offset += ( 4 - jump )
( default , low , high ) , offset = _unpack ( _struct_iii , bc , offset )
joffs = list ( )
for _index in range ( ( high - low ) + 1 ) :
j , offset = _unpack ( _struct_i , bc , offset )
joffs . append ( j )
return ( default , low , high , joffs ) , offset
|
def defer ( coro , delay = 1 ) :
"""Returns a coroutine function wrapper that will defer the given coroutine
execution for a certain amount of seconds in a non - blocking way .
This function can be used as decorator .
Arguments :
coro ( coroutinefunction ) : coroutine function to defer .
delay ( int / float ) : number of seconds to defer execution .
Raises :
TypeError : if coro argument is not a coroutine function .
Returns :
filtered values ( list ) : ordered list of resultant values .
Usage : :
# Usage as function
await paco . defer ( coro , delay = 1)
await paco . defer ( coro , delay = 0.5)
# Usage as decorator
@ paco . defer ( delay = 1)
async def mul _ 2 ( num ) :
return num * 2
await mul _ 2(2)"""
|
assert_corofunction ( coro = coro )
@ asyncio . coroutine
def wrapper ( * args , ** kw ) : # Wait until we ' re done
yield from asyncio . sleep ( delay )
return ( yield from coro ( * args , ** kw ) )
return wrapper
|
def dot ( self , other ) :
"""Compute the dot product between the Series and the columns of other .
This method computes the dot product between the Series and another
one , or the Series and each columns of a DataFrame , or the Series and
each columns of an array .
It can also be called using ` self @ other ` in Python > = 3.5.
Parameters
other : Series , DataFrame or array - like
The other object to compute the dot product with its columns .
Returns
scalar , Series or numpy . ndarray
Return the dot product of the Series and other if other is a
Series , the Series of the dot product of Series and each rows of
other if other is a DataFrame or a numpy . ndarray between the Series
and each columns of the numpy array .
See Also
DataFrame . dot : Compute the matrix product with the DataFrame .
Series . mul : Multiplication of series and other , element - wise .
Notes
The Series and other has to share the same index if other is a Series
or a DataFrame .
Examples
> > > s = pd . Series ( [ 0 , 1 , 2 , 3 ] )
> > > other = pd . Series ( [ - 1 , 2 , - 3 , 4 ] )
> > > s . dot ( other )
> > > s @ other
> > > df = pd . DataFrame ( [ [ 0 , 1 ] , [ - 2 , 3 ] , [ 4 , - 5 ] , [ 6 , 7 ] ] )
> > > s . dot ( df )
0 24
1 14
dtype : int64
> > > arr = np . array ( [ [ 0 , 1 ] , [ - 2 , 3 ] , [ 4 , - 5 ] , [ 6 , 7 ] ] )
> > > s . dot ( arr )
array ( [ 24 , 14 ] )"""
|
from pandas . core . frame import DataFrame
if isinstance ( other , ( Series , DataFrame ) ) :
common = self . index . union ( other . index )
if ( len ( common ) > len ( self . index ) or len ( common ) > len ( other . index ) ) :
raise ValueError ( 'matrices are not aligned' )
left = self . reindex ( index = common , copy = False )
right = other . reindex ( index = common , copy = False )
lvals = left . values
rvals = right . values
else :
lvals = self . values
rvals = np . asarray ( other )
if lvals . shape [ 0 ] != rvals . shape [ 0 ] :
raise Exception ( 'Dot product shape mismatch, %s vs %s' % ( lvals . shape , rvals . shape ) )
if isinstance ( other , DataFrame ) :
return self . _constructor ( np . dot ( lvals , rvals ) , index = other . columns ) . __finalize__ ( self )
elif isinstance ( other , Series ) :
return np . dot ( lvals , rvals )
elif isinstance ( rvals , np . ndarray ) :
return np . dot ( lvals , rvals )
else : # pragma : no cover
raise TypeError ( 'unsupported type: %s' % type ( other ) )
|
def add ( self , item , count = 1 ) :
'''When we receive stream of data , we add them in the chunk
which has limit on the no . of items that it will store .
> > > s = StreamCounter ( 5,5)
> > > data _ stream = [ ' a ' , ' b ' , ' c ' , ' d ' ]
> > > for item in data _ stream :
. . . s . add ( item )
> > > s . chunk _ size
> > > s . n _ items _ seen
> > > s . n _ chunk _ items _ seen
> > > s . n _ chunks
> > > from pprint import pprint
> > > pprint ( s . chunked _ counts . get ( s . n _ chunks , { } ) )
{ ' a ' : 1 , ' b ' : 1 , ' c ' : 1 , ' d ' : 1}
> > > s . counts _ total
> > > data _ stream = [ ' a ' , ' b ' , ' c ' , ' d ' , ' e ' , ' f ' , ' g ' , ' e ' ]
> > > for item in data _ stream :
. . . s . add ( item )
> > > s . chunk _ size
> > > s . n _ items _ seen
12
> > > s . n _ chunk _ items _ seen
> > > s . n _ chunks
> > > s . chunked _ counts . get ( s . n _ chunks , { } )
{ ' g ' : 1 , ' e ' : 1}'''
|
self . n_items_seen += count
self . n_chunk_items_seen += count
# get current chunk
chunk_id = self . n_chunks
chunk = self . chunked_counts . get ( chunk_id , { } )
self . chunked_counts [ chunk_id ] = chunk
# update count in the current chunk counter dict
if item in chunk :
chunk [ item ] += count
else :
self . n_counts += 1
chunk [ item ] = count
# is the current chunk done ?
if self . n_chunk_items_seen >= self . chunk_size :
self . n_chunks += 1
self . n_chunk_items_seen = 0
# In case we reached max capacity in count entries ,
# drop oldest chunks until we come back within limit
while self . n_counts >= self . max_counts :
self . _drop_oldest_chunk ( )
|
def update_position ( self , loc ) :
"""Set the location of tick in data coords with scalar * loc * ."""
|
# This ensures that the new value of the location is set before
# any other updates take place .
self . _loc = loc
super ( SkewXTick , self ) . update_position ( loc )
|
def _change_color ( self , event ) :
"""Respond to motion of the hsv cursor ."""
|
h = self . bar . get ( )
self . square . set_hue ( h )
( r , g , b ) , ( h , s , v ) , sel_color = self . square . get ( )
self . red . set ( r )
self . green . set ( g )
self . blue . set ( b )
self . hue . set ( h )
self . saturation . set ( s )
self . value . set ( v )
self . hexa . delete ( 0 , "end" )
self . hexa . insert ( 0 , sel_color . upper ( ) )
if self . alpha_channel :
self . alphabar . set_color ( ( r , g , b ) )
self . hexa . insert ( 'end' , ( "%2.2x" % self . alpha . get ( ) ) . upper ( ) )
self . _update_preview ( )
|
def cookie_to_state ( cookie_str , name , encryption_key ) :
"""Loads a state from a cookie
: type cookie _ str : str
: type name : str
: type encryption _ key : str
: rtype : satosa . state . State
: param cookie _ str : string representation of cookie / s
: param name : Name identifier of the cookie
: param encryption _ key : Key to encrypt the state information
: return : A state"""
|
try :
cookie = SimpleCookie ( cookie_str )
state = State ( cookie [ name ] . value , encryption_key )
except KeyError as e :
msg_tmpl = 'No cookie named {name} in {data}'
msg = msg_tmpl . format ( name = name , data = cookie_str )
logger . exception ( msg )
raise SATOSAStateError ( msg ) from e
except ValueError as e :
msg_tmpl = 'Failed to process {name} from {data}'
msg = msg_tmpl . format ( name = name , data = cookie_str )
logger . exception ( msg )
raise SATOSAStateError ( msg ) from e
else :
msg_tmpl = 'Loading state from cookie {data}'
msg = msg_tmpl . format ( data = cookie_str )
satosa_logging ( logger , logging . DEBUG , msg , state )
return state
|
def remote_restore_snapshot ( self , context , ports , snapshot_name ) :
"""Restores virtual machine from a snapshot
: param context : resource context of the vCenterShell
: type context : models . QualiDriverModels . ResourceCommandContext
: param ports : list [ string ] ports : the ports of the connection between the remote resource and the local resource
: type ports : list [ string ]
: param snapshot _ name : Snapshot name to restore from
: type snapshot _ name : str
: return :"""
|
return self . command_orchestrator . restore_snapshot ( context , snapshot_name )
|
def check_var_units ( self , ds ) :
'''Checks each applicable variable for the units attribute
: param netCDF4 . Dataset ds : An open netCDF dataset'''
|
results = [ ]
for variable in self . get_applicable_variables ( ds ) :
msgs = [ ]
# Check units and dims for variable
unit_check = hasattr ( ds . variables [ variable ] , 'units' )
no_dim_check = ( getattr ( ds . variables [ variable ] , 'dimensions' ) == tuple ( ) )
# Check if we have no dimensions . If no dims , skip test
if no_dim_check :
continue
# Check if we have no units
if not unit_check :
msgs . append ( "units" )
results . append ( Result ( BaseCheck . HIGH , unit_check , self . _var_header . format ( variable ) , msgs ) )
return results
|
def get_inactive_status ( brain_or_object , default = "active" ) :
"""Get the ` cancellation _ state ` of an objct
: param brain _ or _ object : A single catalog brain or content object
: type brain _ or _ object : ATContentType / DexterityContentType / CatalogBrain
: returns : Value of the review _ status variable
: rtype : String"""
|
if is_brain ( brain_or_object ) :
return getattr ( brain_or_object , "inactive_state" , default )
workflows = get_workflows_for ( brain_or_object )
if 'bika_inactive_workflow' not in workflows :
return default
return get_workflow_status_of ( brain_or_object , 'inactive_state' )
|
def close ( self , * args , ** kwargs ) :
"""Closes the websocket connection and waits for the ping thread to close"""
|
self . run_event . set ( )
self . ws . close ( )
if self . keepalive and self . keepalive . is_alive ( ) :
self . keepalive . join ( )
|
def list_engines ( zap_helper ) :
"""List engines that can be used to run scripts ."""
|
engines = zap_helper . zap . script . list_engines
console . info ( 'Available engines: {}' . format ( ', ' . join ( engines ) ) )
|
def _get_location ( cli_ctx , namespace ) :
"""Return an Azure location by using an explicit ` - - location ` argument , then by ` - - resource - group ` , and
finally by the subscription if neither argument was provided ."""
|
location = None
if getattr ( namespace , 'location' , None ) :
location = namespace . location
elif getattr ( namespace , 'resource_group_name' , None ) :
location = _get_location_from_resource_group ( cli_ctx , namespace . resource_group_name )
if not location :
location = get_one_of_subscription_locations ( cli_ctx )
return location
|
def cmd_legend ( self , args ) :
'''setup legend for graphs'''
|
if len ( args ) == 0 :
for leg in self . legend . keys ( ) :
print ( "%s -> %s" % ( leg , self . legend [ leg ] ) )
elif len ( args ) == 1 :
leg = args [ 0 ]
if leg in self . legend :
print ( "Removing legend %s" % leg )
self . legend . pop ( leg )
elif len ( args ) >= 2 :
leg = args [ 0 ]
leg2 = args [ 1 ]
print ( "Adding legend %s -> %s" % ( leg , leg2 ) )
self . legend [ leg ] = leg2
|
def progress ( self , * msg ) :
"""Prints a progress message"""
|
label = colors . purple ( "Progress" )
self . _msg ( label , * msg )
|
def _send_solr_command ( self , core_url , json_command ) :
"""Sends JSON string to Solr instance"""
|
# Check document language and dispatch to correct core
url = _get_url ( core_url , "update" )
try :
response = self . req_session . post ( url , data = json_command , headers = { 'Content-Type' : 'application/json' } )
response . raise_for_status ( )
except requests . RequestException as e :
logger . error ( "Failed to send update to Solr endpoint [%s]: %s" , core_url , e , exc_info = True )
raise SolrException ( "Failed to send command to Solr [%s]: %s" % ( core_url , e , ) )
return True
|
def namespaced_view_name ( view_name , metric_prefix ) :
"""create string to be used as metric type"""
|
metric_prefix = metric_prefix or "custom.googleapis.com/opencensus"
return os . path . join ( metric_prefix , view_name ) . replace ( '\\' , '/' )
|
def build_template ( self , mapfile , names , renderer ) :
"""Build source from global and item templates"""
|
AVAILABLE_DUMPS = json . load ( open ( mapfile , "r" ) )
manager = self . get_deps_manager ( AVAILABLE_DUMPS )
fp = StringIO . StringIO ( )
for i , item in enumerate ( manager . get_dump_order ( names ) , start = 1 ) :
fp = renderer ( fp , i , item , manager [ item ] )
if self . dump_other_apps :
exclude_models = [ '-e {0}' . format ( app ) for app in self . exclude_apps ]
for i , item in enumerate ( manager . get_dump_order ( names ) , start = 1 ) :
for model in manager [ item ] [ 'models' ] :
if '-e ' not in model :
model = "-e {0}" . format ( model )
if model not in exclude_models :
exclude_models . append ( model )
fp = renderer ( fp , i + 1 , 'other_apps' , { 'models' : exclude_models , 'use_natural_key' : True } )
content = fp . getvalue ( )
fp . close ( )
context = self . get_global_context ( ) . copy ( )
context . update ( { 'items' : content } )
return self . base_template . format ( ** context )
|
def ToJson ( self ) :
"""Convert object members to a dictionary that can be parsed as JSON .
Returns :
dict :"""
|
jsn = super ( MinerTransaction , self ) . ToJson ( )
jsn [ 'nonce' ] = self . Nonce
return jsn
|
def list ( gandi , state , id , vhosts , type , limit ) :
"""List PaaS instances ."""
|
options = { 'items_per_page' : limit , }
if state :
options [ 'state' ] = state
output_keys = [ 'name' , 'state' ]
if id :
output_keys . append ( 'id' )
if vhosts :
output_keys . append ( 'vhost' )
if type :
output_keys . append ( 'type' )
paas_hosts = { }
result = gandi . paas . list ( options )
for num , paas in enumerate ( result ) :
paas_hosts [ paas [ 'id' ] ] = [ ]
if vhosts :
list_vhost = gandi . vhost . list ( { 'paas_id' : paas [ 'id' ] } )
for host in list_vhost :
paas_hosts [ paas [ 'id' ] ] . append ( host [ 'name' ] )
if num :
gandi . separator_line ( )
output_paas ( gandi , paas , [ ] , paas_hosts [ paas [ 'id' ] ] , output_keys )
return result
|
def _sc_encode ( gain , peak ) :
"""Encode ReplayGain gain / peak values as a Sound Check string ."""
|
# SoundCheck stores the peak value as the actual value of the
# sample , rather than the percentage of full scale that RG uses , so
# we do a simple conversion assuming 16 bit samples .
peak *= 32768.0
# SoundCheck stores absolute RMS values in some unknown units rather
# than the dB values RG uses . We can calculate these absolute values
# from the gain ratio using a reference value of 1000 units . We also
# enforce the maximum value here , which is equivalent to about
# -18.2dB .
g1 = int ( min ( round ( ( 10 ** ( gain / - 10 ) ) * 1000 ) , 65534 ) )
# Same as above , except our reference level is 2500 units .
g2 = int ( min ( round ( ( 10 ** ( gain / - 10 ) ) * 2500 ) , 65534 ) )
# The purpose of these values are unknown , but they also seem to be
# unused so we just use zero .
uk = 0
values = ( g1 , g1 , g2 , g2 , uk , uk , int ( peak ) , int ( peak ) , uk , uk )
return ( u' %08X' * 10 ) % values
|
def serve ( ip , port , application , ssl = None , processes = 1 , ** kwargs ) :
"""Serve a wsgi app ( any wsgi app ) through with either werkzeug ' s runserver
or the one that comes with python . Setting ` processes ` to anything other than 1
will prevent the debigger from working ."""
|
try : # use werkzeug if its there
from werkzeug . serving import run_simple
print ( "Using Werkzeug run_simple" )
run_simple ( ip , port , application , ssl_context = ssl , processes = processes , ** kwargs )
return
except ImportError :
pass
# otherwise just use python ' s built in wsgi webserver
from wsgiref . simple_server import make_server
server = make_server ( ip , port , application )
print ( "Serving on %s:%s, using built in Python server" % ( ip , port ) )
try :
server . serve_forever ( )
except KeyboardInterrupt :
pass
|
def preprocess ( s ) :
"""> > > preprocess ( ' # hi there http : / / www . foo . com @ you isn " t RT & lt ; & gt ; ' )
' hashtaghi hashtaghi there isn " t '"""
|
# s = re . sub ( ' @ \ S + ' , ' thisisamention ' , s ) # map all mentions to thisisamention
s = re . sub ( r'@\S+' , ' ' , s )
# map all mentions to thisisamention
# s = re . sub ( ' http \ S + ' , ' http ' , s ) # keep only http from urls
s = re . sub ( r'http\S+' , ' ' , s )
# keep only http from urls
s = re . sub ( r'#(\S+)' , r'hashtag\1 hashtag\1' , s )
# # foo - > hashtagfoo hashtagfoo ( for retaining hashtags even using bigrams )
# s = re . sub ( r ' [ 0-9 ] + ' , ' 9 ' , s ) # 1234 - > 9
s = re . sub ( r'\bRT\b' , ' ' , s , re . IGNORECASE )
s = re . sub ( r'&[a-z]+;' , ' ' , s , re . IGNORECASE )
s = re . sub ( r'\s+' , ' ' , s ) . strip ( )
return s . lower ( )
|
def get_partition_function ( self ) :
"""Returns the partition function for a given undirected graph .
A partition function is defined as
. . math : : \ sum _ { X } ( \ prod _ { i = 1 } ^ { m } \ phi _ i )
where m is the number of factors present in the graph
and X are all the random variables present .
Examples
> > > from pgmpy . models import MarkovModel
> > > from pgmpy . factors . discrete import DiscreteFactor
> > > G = MarkovModel ( )
> > > G . add _ nodes _ from ( [ ' x1 ' , ' x2 ' , ' x3 ' , ' x4 ' , ' x5 ' , ' x6 ' , ' x7 ' ] )
> > > G . add _ edges _ from ( [ ( ' x1 ' , ' x3 ' ) , ( ' x1 ' , ' x4 ' ) , ( ' x2 ' , ' x4 ' ) ,
. . . ( ' x2 ' , ' x5 ' ) , ( ' x3 ' , ' x6 ' ) , ( ' x4 ' , ' x6 ' ) ,
. . . ( ' x4 ' , ' x7 ' ) , ( ' x5 ' , ' x7 ' ) ] )
> > > phi = [ DiscreteFactor ( edge , [ 2 , 2 ] , np . random . rand ( 4 ) ) for edge in G . edges ( ) ]
> > > G . add _ factors ( * phi )
> > > G . get _ partition _ function ( )"""
|
self . check_model ( )
factor = self . factors [ 0 ]
factor = factor_product ( factor , * [ self . factors [ i ] for i in range ( 1 , len ( self . factors ) ) ] )
if set ( factor . scope ( ) ) != set ( self . nodes ( ) ) :
raise ValueError ( 'DiscreteFactor for all the random variables not defined.' )
return np . sum ( factor . values )
|
def _dataframe_fields ( self ) :
"""Creates a dictionary of all fields to include with DataFrame .
With the result of the calls to class properties changing based on the
class index value , the dictionary should be regenerated every time the
index is changed when the dataframe property is requested .
Returns
dictionary
Returns a dictionary where the keys are the shortened ` ` string ` `
attribute names and the values are the actual value for each
attribute for the specified index ."""
|
fields_to_include = { 'assist_percentage' : self . assist_percentage , 'assists' : self . assists , 'block_percentage' : self . block_percentage , 'blocks' : self . blocks , 'box_plus_minus' : self . box_plus_minus , 'conference' : self . conference , 'defensive_box_plus_minus' : self . defensive_box_plus_minus , 'defensive_rebound_percentage' : self . defensive_rebound_percentage , 'defensive_rebounds' : self . defensive_rebounds , 'defensive_win_shares' : self . defensive_win_shares , 'effective_field_goal_percentage' : self . effective_field_goal_percentage , 'field_goal_attempts' : self . field_goal_attempts , 'field_goal_percentage' : self . field_goal_percentage , 'field_goals' : self . field_goals , 'free_throw_attempt_rate' : self . free_throw_attempt_rate , 'free_throw_attempts' : self . free_throw_attempts , 'free_throw_percentage' : self . free_throw_percentage , 'free_throws' : self . free_throws , 'games_played' : self . games_played , 'games_started' : self . games_started , 'height' : self . height , 'minutes_played' : self . minutes_played , 'offensive_box_plus_minus' : self . offensive_box_plus_minus , 'offensive_rebound_percentage' : self . offensive_rebound_percentage , 'offensive_rebounds' : self . offensive_rebounds , 'offensive_win_shares' : self . offensive_win_shares , 'personal_fouls' : self . personal_fouls , 'player_efficiency_rating' : self . player_efficiency_rating , 'player_id' : self . player_id , 'points' : self . points , 'points_produced' : self . points_produced , 'position' : self . position , 'steal_percentage' : self . steal_percentage , 'steals' : self . steals , 'team_abbreviation' : self . team_abbreviation , 'three_point_attempt_rate' : self . three_point_attempt_rate , 'three_point_attempts' : self . three_point_attempts , 'three_point_percentage' : self . three_point_percentage , 'three_pointers' : self . three_pointers , 'total_rebound_percentage' : self . total_rebound_percentage , 'total_rebounds' : self . total_rebounds , 'true_shooting_percentage' : self . true_shooting_percentage , 'turnover_percentage' : self . turnover_percentage , 'turnovers' : self . turnovers , 'two_point_attempts' : self . two_point_attempts , 'two_point_percentage' : self . two_point_percentage , 'two_pointers' : self . two_pointers , 'usage_percentage' : self . usage_percentage , 'weight' : self . weight , 'win_shares' : self . win_shares , 'win_shares_per_40_minutes' : self . win_shares_per_40_minutes , }
return fields_to_include
|
def parse ( file_path ) :
"""Return a decoded API to the data from a file path .
: param file _ path : the input file path . Data is not entropy compressed ( e . g . gzip )
: return an API to decoded data"""
|
newDecoder = MMTFDecoder ( )
with open ( file_path , "rb" ) as fh :
newDecoder . decode_data ( _unpack ( fh ) )
return newDecoder
|
def parse_size ( image , size ) :
"""Parse a size string ( i . e . " 200 " , " 200x100 " , " x200 " , etc . ) into a
( width , height ) tuple ."""
|
bits = size . split ( "x" )
if image . size [ 0 ] == 0 or image . size [ 1 ] == 0 :
ratio = 1.0
else :
ratio = float ( image . size [ 0 ] ) / float ( image . size [ 1 ] )
if len ( bits ) == 1 or not bits [ 1 ] :
width = int ( bits [ 0 ] )
height = int ( 1 / ratio * width )
elif not bits [ 0 ] :
height = int ( bits [ 1 ] )
width = int ( height * ratio )
else :
width , height = map ( int , bits )
return width , height
|
def _write_entries ( self , stream , entries , converter , properties = None ) :
"""Write iterable of entries as YAML object to stream .
Args :
stream : File - like object .
entries : Iterable of entries .
converter : Conversion function from entry to YAML object .
properties : Set of compartment properties to output ( or None to
output all ) ."""
|
def iter_entries ( ) :
for c in entries :
entry = converter ( c )
if entry is None :
continue
if properties is not None :
entry = OrderedDict ( ( key , value ) for key , value in iteritems ( entry ) if key == 'id' or key in properties )
yield entry
self . _dump ( stream , list ( iter_entries ( ) ) )
|
def convert_ints_to_bytes ( in_ints , num ) :
"""Convert an integer array into a byte arrays . The number of bytes forming an integer
is defined by num
: param in _ ints : the input integers
: param num : the number of bytes per int
: return the integer array"""
|
out_bytes = b""
for val in in_ints :
out_bytes += struct . pack ( mmtf . utils . constants . NUM_DICT [ num ] , val )
return out_bytes
|
def process_line ( self , line ) :
"Process a single complete line ."
|
cleaned = [ ]
columns = line . split ( self . indel )
# Populate indices if not defined
if not self . indices :
self . indices = range ( len ( columns ) )
for i in self . indices : # Support turning an in col into multiple out cols
out = self . process_column ( i , columns [ i ] )
if isinstance ( out , ( list , tuple ) ) :
cleaned . extend ( out )
else :
cleaned . append ( out )
return self . outdel . join ( cleaned ) + '\n'
|
def iterate ( self , index , step , n_cols = 70 ) :
"""Return an iterator that starts and the current index and increments
by the given step ."""
|
while True :
if step < 0 and index < 0 : # Hack to prevent displaying a submission ' s post if iterating
# comments in the negative direction
break
try :
yield self . get ( index , n_cols = n_cols )
except IndexError :
break
index += step
|
def validate ( self , value ) :
"""Return a boolean if the value is valid"""
|
try :
self . _choice = IPAddress ( value )
return True
except ( ValueError , AddrFormatError ) :
self . error_message = '%s is not a valid IP address.' % value
return False
|
def indicators ( self , indicator_data ) :
"""Generator for indicator values .
Some indicator such as Files ( hashes ) and Custom Indicators can have multiple indicator
values ( e . g . md5 , sha1 , sha256 ) . This method provides a generator to iterate over all
indicator values .
Both the * * summary * * field and the individual indicator fields ( e . g . * * md5 * * , * * sha1 * * ,
* * sha256 * * ) are supported .
For indicators that have only one value such as * * ip * * or * * hostName * * the generator will
only return the one result .
. . code - block : : python
: linenos :
: lineno - start : 1
# the individual indicator JSON from the API
for i in resource . indicators ( indicator _ data ) :
print ( i . get ( ' type ' ) ) # md5 , sha1 , sha256 , etc
print ( i . get ( ' value ' ) ) # hash or custom indicator value
. . Warning : : This method could break for custom indicators that have " : " in the value of
the indicator while using the summary field .
. . Note : : For ` ` / v2 / indicators ` ` and ` ` / v2 / indicators / bulk / json ` ` API endpoints only one
hash is returned for a file Indicator even if there are multiple in the platform .
If all hashes are required the ` ` / v2 / indicators / files ` ` or
` ` / v2 / indicators / files / < hash > ` ` endpoints will provide all hashes .
Args :
indicator _ data ( dict ) : The indicator dictionary .
Returns :
( dictionary ) : A dict containing the indicator type and value ."""
|
# indicator _ list = [ ]
for indicator_field in self . value_fields :
if indicator_field == 'summary' :
indicators = self . tcex . expand_indicators ( indicator_data . get ( 'summary' ) )
if indicator_data . get ( 'type' ) == 'File' :
hash_patterns = { 'md5' : re . compile ( r'^([a-fA-F\d]{32})$' ) , 'sha1' : re . compile ( r'^([a-fA-F\d]{40})$' ) , 'sha256' : re . compile ( r'^([a-fA-F\d]{64})$' ) , }
for i in indicators :
if not i :
continue
i = i . strip ( )
# clean up badly formatted summary string
i_type = None
if hash_patterns [ 'md5' ] . match ( i ) :
i_type = 'md5'
elif hash_patterns [ 'sha1' ] . match ( i ) :
i_type = 'sha1'
elif hash_patterns [ 'sha256' ] . match ( i ) :
i_type = 'sha256'
else :
msg = u'Cannot determine hash type: "{}"' . format ( indicator_data . get ( 'summary' ) )
self . tcex . log . warning ( msg )
data = { 'type' : i_type , 'value' : i }
yield data
else :
resource = getattr ( self . tcex . resources , self . tcex . safe_rt ( indicator_data . get ( 'type' ) ) ) ( self . tcex )
values = resource . value_fields
index = 0
for i in indicators :
if i is None :
continue
i = i . strip ( )
# clean up badly formatted summary string
# TODO : remove workaround for bug in indicatorTypes API endpoint
if len ( values ) - 1 < index :
break
data = { 'type' : values [ index ] , 'value' : i }
index += 1
yield data
else :
if indicator_data . get ( indicator_field ) is not None :
yield { 'type' : indicator_field , 'value' : indicator_data . get ( indicator_field ) }
|
def perform_action ( self , action , machines , params , progress_title , success_title ) :
"""Perform the action on the set of machines ."""
|
if len ( machines ) == 0 :
return 0
with utils . Spinner ( ) as context :
return self . _async_perform_action ( context , action , list ( machines ) , params , progress_title , success_title )
|
def get_password ( hsm , args ) :
"""Get password of correct length for this YubiHSM version ."""
|
expected_len = 32
name = 'HSM password'
if hsm . version . have_key_store_decrypt ( ) :
expected_len = 64
name = 'master key'
if args . stdin :
password = sys . stdin . readline ( )
while password and password [ - 1 ] == '\n' :
password = password [ : - 1 ]
else :
if args . debug :
password = raw_input ( 'Enter %s (press enter to skip) (will be echoed) : ' % ( name ) )
else :
password = getpass . getpass ( 'Enter %s (press enter to skip) : ' % ( name ) )
if len ( password ) <= expected_len :
password = password . decode ( 'hex' )
if not password :
return None
return password
else :
sys . stderr . write ( "ERROR: Invalid HSM password (expected max %i chars, got %i)\n" % ( expected_len , len ( password ) ) )
return 1
|
def expire ( self , current_time = None ) :
"""Expire any old entries
` current _ time `
Optional time to be used to clean up queue ( can be used in unit tests )"""
|
if not self . _queue :
return
if current_time is None :
current_time = time ( )
while self . _queue : # Get top most item
top = self . _queue [ 0 ]
# Early exit if item was not promoted and its expiration time
# is greater than now .
if top . promoted is None and top . expiry_date > current_time :
break
# Pop item from the stack
top = heappop ( self . _queue )
need_reschedule = ( top . promoted is not None and top . promoted > current_time )
# Give chance to reschedule
if not need_reschedule :
top . promoted = None
top . on_delete ( False )
need_reschedule = ( top . promoted is not None and top . promoted > current_time )
# If item is promoted and expiration time somewhere in future
# just reschedule it
if need_reschedule :
top . expiry_date = top . promoted
top . promoted = None
heappush ( self . _queue , top )
else :
del self . _items [ top . session_id ]
|
def bucket ( cls , bucket_name , connection = None ) :
"""Gives the bucket from couchbase server .
: param bucket _ name : Bucket name to fetch .
: type bucket _ name : str
: returns : couchbase driver ' s Bucket object .
: rtype : : class : ` couchbase . client . Bucket `
: raises : : exc : ` RuntimeError ` If the credentials wasn ' t set ."""
|
connection = cls . connection if connection == None else connection
if bucket_name not in cls . _buckets :
connection = "{connection}/{bucket_name}" . format ( connection = connection , bucket_name = bucket_name )
if cls . password :
cls . _buckets [ connection ] = Bucket ( connection , password = cls . password )
else :
cls . _buckets [ connection ] = Bucket ( connection )
return cls . _buckets [ connection ]
|
def process_exception ( self , request , e ) :
"""Still process session data when specially Exception"""
|
if isinstance ( e , RedirectException ) :
response = e . get_response ( )
self . process_response ( request , response )
|
def disconnect ( self , format , * args ) :
"""Disconnect a socket from a formatted endpoint
Returns 0 if OK , - 1 if the endpoint was invalid or the function
isn ' t supported ."""
|
return lib . zsock_disconnect ( self . _as_parameter_ , format , * args )
|
def _make_wildcard_attr_map ( ) :
'''Create a dictionary that maps an attribute name
in OpenflowMatch with a non - prefix - related wildcard
bit from the above OpenflowWildcard enumeration .'''
|
_xmap = { }
for wc in OpenflowWildcard :
if not wc . name . endswith ( 'All' ) and not wc . name . endswith ( 'Mask' ) :
translated = ''
for ch in wc . name :
if ch . isupper ( ) :
translated += '_'
translated += ch . lower ( )
else :
translated += ch
_xmap [ translated ] = wc
return _xmap
|
def runserver ( app = None , reloader = None , debug = None , host = None , port = None ) :
"""Run the Flask development server i . e . app . run ( )"""
|
debug = debug or app . config . get ( 'DEBUG' , False )
reloader = reloader or app . config . get ( 'RELOADER' , False )
host = host or app . config . get ( 'HOST' , '127.0.0.1' )
port = port or app . config . get ( 'PORT' , 5000 )
app . run ( use_reloader = reloader , debug = debug , host = host , port = port )
|
def bind_events ( self , events ) :
'''Register all known events found in ` ` events ` ` key - valued parameters .'''
|
evs = self . _events
if evs and events :
for event in evs . values ( ) :
if event . name in events :
event . bind ( events [ event . name ] )
|
def add_blacklisted_directories ( self , directories , rm_black_dirs_from_stored_dirs = True ) :
"""Adds ` directories ` to be blacklisted . Blacklisted directories will not
be returned or searched recursively when calling the
` collect _ directories ` method .
` directories ` may be a single instance or an iterable . Recommend
passing in absolute paths , but method will try to convert to absolute
paths based on the current working directory .
If ` remove _ from _ stored _ directories ` is true , all ` directories `
will be removed from internal state ."""
|
add_black_dirs = self . directory_manager . add_blacklisted_directories
add_black_dirs ( directories , rm_black_dirs_from_stored_dirs )
|
def _check_lib ( self , remake , compiler , debug , profile ) :
"""Makes sure that the linked library with the original code exists . If it doesn ' t
the library is compiled from scratch ."""
|
from os import path
if self . link is None or not path . isfile ( self . link ) :
self . makelib ( remake , True , compiler , debug , profile )
|
def function ( self , x , y , sigma0 , Rs , center_x = 0 , center_y = 0 ) :
"""lensing potential
: param x :
: param y :
: param sigma0 : sigma0 / sigma _ crit
: param a :
: param s :
: param center _ x :
: param center _ y :
: return :"""
|
x_ = x - center_x
y_ = y - center_y
r = np . sqrt ( x_ ** 2 + y_ ** 2 )
if isinstance ( r , int ) or isinstance ( r , float ) :
r = max ( self . _s , r )
else :
r [ r < self . _s ] = self . _s
X = r / Rs
f_ = sigma0 * Rs ** 2 * ( np . log ( X ** 2 / 4. ) + 2 * self . _F ( X ) )
return f_
|
def find_manifests ( self ) :
'''locate manifests and return filepaths thereof'''
|
manifest_dir = mp_util . dot_mavproxy ( )
ret = [ ]
for file in os . listdir ( manifest_dir ) :
try :
file . index ( "manifest" )
ret . append ( os . path . join ( manifest_dir , file ) )
except ValueError :
pass
return ret
|
def extractColumns ( TableName , SourceParameterName , ParameterFormats , ParameterNames = None , FixCol = False ) :
"""INPUT PARAMETERS :
TableName : name of source table ( required )
SourceParameterName : name of source column to process ( required )
ParameterFormats : c formats of unpacked parameters ( required )
ParameterNames : list of resulting parameter names ( optional )
FixCol : column - fixed ( True ) format of source column ( optional )
OUTPUT PARAMETERS :
none
DESCRIPTION :
Note , that this function is aimed to do some extra job on
interpreting string parameters which is normally supposed
to be done by the user .
EXAMPLE OF USAGE :
extractColumns ( ' sampletab ' , SourceParameterName = ' p5 ' ,
ParameterFormats = ( ' % d ' , ' % d ' , ' % d ' ) ,
ParameterNames = ( ' p5_1 ' , ' p5_2 ' , ' p5_3 ' ) )
This example extracts three integer parameters from
a source column ' p5 ' and puts results in ( ' p5_1 ' , ' p5_2 ' , ' p5_3 ' ) ."""
|
# ParameterNames = just the names without expressions
# ParFormats contains python formats for par extraction
# Example : ParameterNames = ( ' v1 ' , ' v2 ' , ' v3 ' )
# ParameterFormats = ( ' % 1s ' , ' % 1s ' , ' % 1s ' )
# By default the format of parameters is column - fixed
if type ( LOCAL_TABLE_CACHE [ TableName ] [ 'header' ] [ 'default' ] [ SourceParameterName ] ) not in set ( [ str , unicode ] ) :
raise Exception ( 'Source parameter must be a string' )
i = - 1
# bug when ( a , ) ! = ( a )
if ParameterNames and type ( ParameterNames ) not in set ( [ list , tuple ] ) :
ParameterNames = [ ParameterNames ]
if ParameterFormats and type ( ParameterFormats ) not in set ( [ list , tuple ] ) :
ParameterFormats = [ ParameterFormats ]
# if ParameterNames is empty , fill it with # 1-2-3 - . . .
if not ParameterNames :
ParameterNames = [ ]
# using naming convension # i , i = 0,1,2,3 . . .
for par_format in ParameterFormats :
while True :
i += 1
par_name = '#%d' % i
fmt = LOCAL_TABLE_CACHE [ TableName ] [ 'header' ] [ 'format' ] . get ( par_name , None )
if not fmt :
break
ParameterNames . append ( par_name )
# check if ParameterNames are valid
Intersection = set ( ParameterNames ) . intersection ( LOCAL_TABLE_CACHE [ TableName ] [ 'header' ] [ 'order' ] )
if Intersection :
raise Exception ( 'Parameters %s already exist' % str ( list ( Intersection ) ) )
# loop over ParameterNames to prepare LOCAL _ TABLE _ CACHE
i = 0
for par_name in ParameterNames :
par_format = ParameterFormats [ i ]
LOCAL_TABLE_CACHE [ TableName ] [ 'header' ] [ 'format' ] [ par_name ] = par_format
LOCAL_TABLE_CACHE [ TableName ] [ 'data' ] [ par_name ] = [ ]
i += 1
# append new parameters in order list
LOCAL_TABLE_CACHE [ TableName ] [ 'header' ] [ 'order' ] += ParameterNames
# cope with default values
i = 0
format_regex = [ ]
format_types = [ ]
# print ' ParameterNames = ' + str ( ParameterNames )
for par_format in ParameterFormats :
par_name = ParameterNames [ i ]
regex = FORMAT_PYTHON_REGEX
# print ' par _ name : ' + par _ name
# print ' par _ format : ' + par _ format
( lng , trail , lngpnt , ty ) = re . search ( regex , par_format ) . groups ( )
ty = ty . lower ( )
if ty == 'd' :
par_type = int
if FixCol :
format_regex_part = REGEX_INTEGER_FIXCOL ( lng )
else :
format_regex_part = REGEX_INTEGER
elif ty == 's' :
par_type = str
if FixCol :
format_regex_part = REGEX_STRING_FIXCOL ( lng )
else :
format_regex_part = REGEX_STRING
elif ty == 'f' :
par_type = float
if FixCol :
format_regex_part = REGEX_FLOAT_F_FIXCOL ( lng )
else :
format_regex_part = REGEX_FLOAT_F
elif ty == 'e' :
par_type = float
if FixCol :
format_regex_part = REGEX_FLOAT_E_FIXCOL ( lng )
else :
format_regex_part = REGEX_FLOAT_E
else :
raise Exception ( 'Unknown data type' )
format_regex . append ( '(' + format_regex_part + ')' )
format_types . append ( par_type )
def_val = getDefaultValue ( par_type )
LOCAL_TABLE_CACHE [ TableName ] [ 'header' ] [ 'default' ] [ par_name ] = def_val
i += 1
format_regex = '\s*' . join ( format_regex )
# print ' format _ regex = ' + str ( format _ regex )
# return format _ regex
# loop through values of SourceParameter
for SourceParameterString in LOCAL_TABLE_CACHE [ TableName ] [ 'data' ] [ SourceParameterName ] :
try :
ExtractedValues = list ( re . search ( format_regex , SourceParameterString ) . groups ( ) )
except :
raise Exception ( 'Error with line \"%s\"' % SourceParameterString )
i = 0
# loop through all parameters which are supposed to be extracted
for par_name in ParameterNames : # print ' ExtractedValues [ i ] = ' + ExtractedValues [ i ]
# print ' par _ name = ' + par _ name
par_value = format_types [ i ] ( ExtractedValues [ i ] )
LOCAL_TABLE_CACHE [ TableName ] [ 'data' ] [ par_name ] . append ( par_value )
i += 1
# explicitly check that number of rows are equal
number_of_rows = LOCAL_TABLE_CACHE [ TableName ] [ 'header' ] [ 'number_of_rows' ]
number_of_rows2 = len ( LOCAL_TABLE_CACHE [ TableName ] [ 'data' ] [ SourceParameterName ] )
number_of_rows3 = len ( LOCAL_TABLE_CACHE [ TableName ] [ 'data' ] [ ParameterNames [ 0 ] ] )
if not ( number_of_rows == number_of_rows2 == number_of_rows3 ) :
raise Exception ( 'Error while extracting parameters: check your regexp' )
|
def get_listener ( name ) :
'''Return the listener class .'''
|
try :
log . debug ( 'Using %s as listener' , name )
return LISTENER_LOOKUP [ name ]
except KeyError :
msg = 'Listener {} is not available. Are the dependencies installed?' . format ( name )
log . error ( msg , exc_info = True )
raise InvalidListenerException ( msg )
|
def DeleteGroupTags ( r , group , tags , dry_run = False ) :
"""Deletes tags from a node group .
@ type group : str
@ param group : group to delete tags from
@ type tags : list of string
@ param tags : tags to delete
@ type dry _ run : bool
@ param dry _ run : whether to perform a dry run
@ rtype : string
@ return : job id"""
|
query = { "dry-run" : dry_run , "tag" : tags , }
return r . request ( "delete" , "/2/groups/%s/tags" % group , query = query )
|
def expand_path ( experiment_config , key ) :
'''Change ' ~ ' to user home directory'''
|
if experiment_config . get ( key ) :
experiment_config [ key ] = os . path . expanduser ( experiment_config [ key ] )
|
def uniq ( args ) :
"""% prog uniq vcffile
Retain only the first entry in vcf file ."""
|
from six . moves . urllib . parse import parse_qs
p = OptionParser ( uniq . __doc__ )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
vcffile , = args
fp = must_open ( vcffile )
data = [ ]
for row in fp :
if row [ 0 ] == '#' :
print ( row . strip ( ) )
continue
v = VcfLine ( row )
data . append ( v )
for pos , vv in groupby ( data , lambda x : x . pos ) :
vv = list ( vv )
if len ( vv ) == 1 :
print ( vv [ 0 ] )
continue
bestv = max ( vv , key = lambda x : float ( parse_qs ( x . info ) [ "R2" ] [ 0 ] ) )
print ( bestv )
|
def _get_line_type ( line ) :
'''Decide the line type in function of its contents'''
|
stripped = line . strip ( )
if not stripped :
return 'empty'
remainder = re . sub ( r"\s+" , " " , re . sub ( CHORD_RE , "" , stripped ) )
if len ( remainder ) * 2 < len ( re . sub ( r"\s+" , " " , stripped ) ) :
return 'chord'
return 'lyric'
|
def generate_random_perovskite ( lat = None ) :
'''This generates a random valid perovskite structure in ASE format .
Useful for testing .
Binary and organic perovskites are not considered .'''
|
if not lat :
lat = round ( random . uniform ( 3.5 , Perovskite_tilting . OCTAHEDRON_BOND_LENGTH_LIMIT * 2 ) , 3 )
A_site = random . choice ( Perovskite_Structure . A )
B_site = random . choice ( Perovskite_Structure . B )
Ci_site = random . choice ( Perovskite_Structure . C )
Cii_site = random . choice ( Perovskite_Structure . C )
while covalent_radii [ chemical_symbols . index ( A_site ) ] - covalent_radii [ chemical_symbols . index ( B_site ) ] < 0.05 or covalent_radii [ chemical_symbols . index ( A_site ) ] - covalent_radii [ chemical_symbols . index ( B_site ) ] > 0.5 :
A_site = random . choice ( Perovskite_Structure . A )
B_site = random . choice ( Perovskite_Structure . B )
return crystal ( [ A_site , B_site , Ci_site , Cii_site ] , [ ( 0.5 , 0.25 , 0.0 ) , ( 0.0 , 0.0 , 0.0 ) , ( 0.0 , 0.25 , 0.0 ) , ( 0.25 , 0.0 , 0.75 ) ] , spacegroup = 62 , cellpar = [ lat * math . sqrt ( 2 ) , 2 * lat , lat * math . sqrt ( 2 ) , 90 , 90 , 90 ] )
|
def setQuery ( self , query ) :
"""Set the SPARQL query text and set the VIVO custom
authentication parameters .
Set here because this is called immediately before
any query is sent to the triple store ."""
|
self . queryType = self . _parseQueryType ( query )
self . queryString = self . injectPrefixes ( query )
self . addParameter ( 'email' , self . email )
self . addParameter ( 'password' , self . password )
|
def mcscanq ( args ) :
"""% prog mcscanq query . ids blocksfile
Query multiple synteny blocks to get the closest alignment feature . Mostly
used for ' highlighting ' the lines in the synteny plot , drawn by
graphics . karyotype and graphics . synteny ."""
|
p = OptionParser ( mcscanq . __doc__ )
p . add_option ( "--color" , help = "Add color highlight, used in plotting" )
p . add_option ( "--invert" , default = False , action = "store_true" , help = "Invert query and subject [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) < 2 :
sys . exit ( not p . print_help ( ) )
qids , blocksfile = args
b = BlockFile ( blocksfile )
fp = open ( qids )
for gene in fp :
gene = gene . strip ( )
for line in b . query_gene ( gene , color = opts . color , invert = opts . invert ) :
print ( line )
|
def login_as_bot ( ) :
"""Login as the bot account " octogrid " , if user isn ' t authenticated on Plotly"""
|
plotly_credentials_file = join ( join ( expanduser ( '~' ) , PLOTLY_DIRECTORY ) , PLOTLY_CREDENTIALS_FILENAME )
if isfile ( plotly_credentials_file ) :
with open ( plotly_credentials_file , 'r' ) as f :
credentials = loads ( f . read ( ) )
if ( credentials [ 'username' ] == '' or credentials [ 'api_key' ] == '' ) :
plotly . sign_in ( BOT_USERNAME , BOT_API_KEY )
else :
plotly . sign_in ( BOT_USERNAME , BOT_API_KEY )
|
def get_bandstructure ( self ) :
"""returns a LobsterBandStructureSymmLine object which can be plotted with a normal BSPlotter"""
|
return LobsterBandStructureSymmLine ( kpoints = self . kpoints_array , eigenvals = self . eigenvals , lattice = self . lattice , efermi = self . efermi , labels_dict = self . label_dict , structure = self . structure , projections = self . p_eigenvals )
|
def build_object ( self , obj ) :
"""Override django - bakery to skip profiles that raise 404"""
|
try :
build_path = self . get_build_path ( obj )
self . request = self . create_request ( build_path )
self . request . user = AnonymousUser ( )
self . set_kwargs ( obj )
self . build_file ( build_path , self . get_content ( ) )
except Http404 : # cleanup directory
self . unbuild_object ( obj )
|
def get_boolean ( self , key , optional = False ) :
"""Tries to fetch a variable from the config and expects it to be a truthy value . This could be a string ( " 1 " , " Y " )
or an actual boolean . This is because we use the strtobool function in the case we find a string . The function
strtobool expects values of the form ( " 1 " , " Y " , " YES " , " true " , " True " , " t " ) and so forth for truthy values . The
variables populated from os env will always be strings , but we should make sure that we set expected boolean
variables to either truthy / falsy strings conforming to the pattern above or actual boolean values .
: param key : Variable to look for
: param optional : Whether to raise ConfigKeyNotFoundError if key was not found .
: return : bool"""
|
return self . _get_typed_value ( key , bool , lambda x : bool ( util . strtobool ( x ) ) , optional )
|
def add ( self , name , monitor = True ) :
"""Add a folder , library ( . py ) or resource file ( . robot , . tsv , . txt ) to the database"""
|
if os . path . isdir ( name ) :
if ( not os . path . basename ( name ) . startswith ( "." ) ) :
self . add_folder ( name )
elif os . path . isfile ( name ) :
if ( ( self . _looks_like_resource_file ( name ) ) or ( self . _looks_like_libdoc_file ( name ) ) or ( self . _looks_like_library_file ( name ) ) ) :
self . add_file ( name )
else : # let ' s hope it ' s a library name !
self . add_library ( name )
|
def get_all_files_in_range ( dirname , starttime , endtime , pad = 64 ) :
"""Returns all files in dirname and all its subdirectories whose
names indicate that they contain segments in the range starttime
to endtime"""
|
ret = [ ]
# Maybe the user just wants one file . . .
if os . path . isfile ( dirname ) :
if re . match ( '.*-[0-9]*-[0-9]*\.xml$' , dirname ) :
return [ dirname ]
else :
return ret
first_four_start = starttime / 100000
first_four_end = endtime / 100000
for filename in os . listdir ( dirname ) :
if re . match ( '.*-[0-9]{5}$' , filename ) :
dirtime = int ( filename [ - 5 : ] )
if dirtime >= first_four_start and dirtime <= first_four_end :
ret += get_all_files_in_range ( os . path . join ( dirname , filename ) , starttime , endtime , pad = pad )
elif re . match ( '.*-[0-9]{4}$' , filename ) :
dirtime = int ( filename [ - 4 : ] )
if dirtime >= first_four_start and dirtime <= first_four_end :
ret += get_all_files_in_range ( os . path . join ( dirname , filename ) , starttime , endtime , pad = pad )
elif re . match ( '.*-[0-9]*-[0-9]*\.xml$' , filename ) :
file_time = int ( filename . split ( '-' ) [ - 2 ] )
if file_time >= ( starttime - pad ) and file_time <= ( endtime + pad ) :
ret . append ( os . path . join ( dirname , filename ) )
else : # Keep recursing , we may be looking at directories of
# ifos , each of which has directories with times
ret += get_all_files_in_range ( os . path . join ( dirname , filename ) , starttime , endtime , pad = pad )
return ret
|
def smoothed ( self , iterations = 1 ) :
"""Return a smoothed copy of this histogram
Parameters
iterations : int , optional ( default = 1)
The number of smoothing iterations
Returns
hist : asrootpy ' d histogram
The smoothed histogram"""
|
copy = self . Clone ( shallow = True )
copy . Smooth ( iterations )
return copy
|
def update ( self ) :
"""Updates the bundle"""
|
with self . _lock : # Was it active ?
restart = self . _state == Bundle . ACTIVE
# Send the update event
self . _fire_bundle_event ( BundleEvent . UPDATE_BEGIN )
try : # Stop the bundle
self . stop ( )
except : # Something wrong occurred , notify listeners
self . _fire_bundle_event ( BundleEvent . UPDATE_FAILED )
raise
# Change the source file age
module_stat = None
module_file = getattr ( self . __module , "__file__" , None )
if module_file is not None and os . path . isfile ( module_file ) :
try :
module_stat = os . stat ( module_file )
# Change modification time to bypass weak time resolution
# of the underlying file system
os . utime ( module_file , ( module_stat . st_atime , module_stat . st_mtime + 1 ) , )
except OSError : # Can ' t touch the file
_logger . warning ( "Failed to update the modification time of '%s'. " "The bundle update might not reflect the latest " "changes." , module_file , )
# Clean up the module constants ( otherwise kept by reload )
# Keep special members ( _ _ name _ _ , _ _ file _ _ , . . . )
old_content = self . __module . __dict__ . copy ( )
for name in list ( self . __module . __dict__ ) :
if not ( name . startswith ( "__" ) and name . endswith ( "__" ) ) :
del self . __module . __dict__ [ name ]
try : # Reload the module
reload_module ( self . __module )
except ( ImportError , SyntaxError ) as ex : # Exception raised if the file is unreadable
_logger . exception ( "Error updating %s: %s" , self . __name , ex )
# Reset module content
self . __module . __dict__ . clear ( )
self . __module . __dict__ . update ( old_content )
if module_stat is not None :
try : # Reset times
os . utime ( module_file , ( module_stat . st_atime , module_stat . st_mtime ) , )
except OSError : # Shouldn ' t occur , since we succeeded before the update
_logger . debug ( "Failed to reset the modification time of '%s'" , module_file , )
if restart :
try : # Re - start the bundle
self . start ( )
except : # Something wrong occurred , notify listeners
self . _fire_bundle_event ( BundleEvent . UPDATE_FAILED )
raise
# Bundle update finished
self . _fire_bundle_event ( BundleEvent . UPDATED )
|
def connect ( ip , _initialize = True , wait_ready = None , timeout = 30 , still_waiting_callback = default_still_waiting_callback , still_waiting_interval = 1 , status_printer = None , vehicle_class = None , rate = 4 , baud = 115200 , heartbeat_timeout = 30 , source_system = 255 , source_component = 0 , use_native = False ) :
"""Returns a : py : class : ` Vehicle ` object connected to the address specified by string parameter ` ` ip ` ` .
Connection string parameters ( ` ` ip ` ` ) for different targets are listed in the : ref : ` getting started guide < get _ started _ connecting > ` .
The method is usually called with ` ` wait _ ready = True ` ` to ensure that vehicle parameters and ( most ) attributes are
available when ` ` connect ( ) ` ` returns .
. . code : : python
from dronekit import connect
# Connect to the Vehicle using " connection string " ( in this case an address on network )
vehicle = connect ( ' 127.0.0.1:14550 ' , wait _ ready = True )
: param String ip : : ref : ` Connection string < get _ started _ connecting > ` for target address - e . g . 127.0.0.1:14550.
: param Bool / Array wait _ ready : If ` ` True ` ` wait until all default attributes have downloaded before
the method returns ( default is ` ` None ` ` ) .
The default attributes to wait on are : : py : attr : ` parameters ` , : py : attr : ` gps _ 0 ` ,
: py : attr : ` armed ` , : py : attr : ` mode ` , and : py : attr : ` attitude ` .
You can also specify a named set of parameters to wait on ( e . g . ` ` wait _ ready = [ ' system _ status ' , ' mode ' ] ` ` ) .
For more information see : py : func : ` Vehicle . wait _ ready < Vehicle . wait _ ready > ` .
: param status _ printer : ( deprecated ) method of signature ` ` def status _ printer ( txt ) ` ` that prints
STATUS _ TEXT messages from the Vehicle and other diagnostic information .
By default the status information is handled by the ` ` autopilot ` ` logger .
: param Vehicle vehicle _ class : The class that will be instantiated by the ` ` connect ( ) ` ` method .
This can be any sub - class of ` ` Vehicle ` ` ( and defaults to ` ` Vehicle ` ` ) .
: param int rate : Data stream refresh rate . The default is 4Hz ( 4 updates per second ) .
: param int baud : The baud rate for the connection . The default is 115200.
: param int heartbeat _ timeout : Connection timeout value in seconds ( default is 30s ) .
If a heartbeat is not detected within this time an exception will be raised .
: param int source _ system : The MAVLink ID of the : py : class : ` Vehicle ` object returned by this method ( by default 255 ) .
: param int source _ component : The MAVLink Component ID fo the : py : class : ` Vehicle ` object returned by this method ( by default 0 ) .
: param bool use _ native : Use precompiled MAVLink parser .
. . note : :
The returned : py : class : ` Vehicle ` object acts as a ground control station from the
perspective of the connected " real " vehicle . It will process / receive messages from the real vehicle
if they are addressed to this ` ` source _ system ` ` id . Messages sent to the real vehicle are
automatically updated to use the vehicle ' s ` ` target _ system ` ` id .
It is * good practice * to assign a unique id for every system on the MAVLink network .
It is possible to configure the autopilot to only respond to guided - mode commands from a specified GCS ID .
The ` ` status _ printer ` ` argument is deprecated . To redirect the logging from the library and from the
autopilot , configure the ` ` dronekit ` ` and ` ` autopilot ` ` loggers using the Python ` ` logging ` ` module .
: returns : A connected vehicle of the type defined in ` ` vehicle _ class ` ` ( a superclass of : py : class : ` Vehicle ` ) ."""
|
from dronekit . mavlink import MAVConnection
if not vehicle_class :
vehicle_class = Vehicle
handler = MAVConnection ( ip , baud = baud , source_system = source_system , source_component = source_component , use_native = use_native )
vehicle = vehicle_class ( handler )
if status_printer :
vehicle . _autopilot_logger . addHandler ( ErrprinterHandler ( status_printer ) )
if _initialize :
vehicle . initialize ( rate = rate , heartbeat_timeout = heartbeat_timeout )
if wait_ready :
if wait_ready is True :
vehicle . wait_ready ( still_waiting_interval = still_waiting_interval , still_waiting_callback = still_waiting_callback , timeout = timeout )
else :
vehicle . wait_ready ( * wait_ready )
return vehicle
|
def myFunc ( parameter ) :
"""This function will be executed on the remote host even if it was not
available at launch ."""
|
print ( 'Hello World from {0}!' . format ( scoop . worker ) )
# It is possible to get a constant anywhere
print ( shared . getConst ( 'myVar' ) [ 2 ] )
# Parameters are handled as usual
return parameter + 1
|
def compose_layer ( layer , force = False , ** kwargs ) :
"""Compose a single layer with pixels ."""
|
from PIL import Image , ImageChops
assert layer . bbox != ( 0 , 0 , 0 , 0 ) , 'Layer bbox is (0, 0, 0, 0)'
image = layer . topil ( ** kwargs )
if image is None or force :
texture = create_fill ( layer )
if texture is not None :
image = texture
if image is None :
return image
# TODO : Group should have the following too .
# Apply mask .
if layer . has_mask ( ) and not layer . mask . disabled :
mask_bbox = layer . mask . bbox
if ( ( mask_bbox [ 2 ] - mask_bbox [ 0 ] ) > 0 and ( mask_bbox [ 3 ] - mask_bbox [ 1 ] ) > 0 ) :
color = layer . mask . background_color
offset = ( mask_bbox [ 0 ] - layer . left , mask_bbox [ 1 ] - layer . top )
mask = Image . new ( 'L' , image . size , color = color )
mask . paste ( layer . mask . topil ( ) , offset )
if image . mode . endswith ( 'A' ) : # What should we do here ? There are two alpha channels .
pass
image . putalpha ( mask )
elif layer . has_vector_mask ( ) and ( force or not layer . has_pixels ( ) ) :
mask = draw_vector_mask ( layer )
# TODO : Stroke drawing .
texture = image
image = Image . new ( image . mode , image . size , 'white' )
image . paste ( texture , mask = mask )
# Apply layer fill effects .
apply_effect ( layer , image )
# Clip layers .
if layer . has_clip_layers ( ) :
clip_box = extract_bbox ( layer . clip_layers )
inter_box = intersect ( layer . bbox , clip_box )
if inter_box != ( 0 , 0 , 0 , 0 ) :
clip_image = compose ( layer . clip_layers , bbox = layer . bbox )
mask = image . getchannel ( 'A' )
if clip_image . mode . endswith ( 'A' ) :
mask = ImageChops . multiply ( clip_image . getchannel ( 'A' ) , mask )
clip_image . putalpha ( mask )
image = _blend ( image , clip_image , ( 0 , 0 ) )
# Apply opacity .
if layer . opacity < 255 :
opacity = layer . opacity
if image . mode . endswith ( 'A' ) :
opacity = opacity / 255.
channels = list ( image . split ( ) )
channels [ - 1 ] = channels [ - 1 ] . point ( lambda x : int ( x * opacity ) )
image = Image . merge ( image . mode , channels )
else :
image . putalpha ( opacity )
return image
|
def formatted_completion_sig ( completion ) :
"""Regenerate signature for methods . Return just the name otherwise"""
|
f_result = completion [ "name" ]
if is_basic_type ( completion ) : # It ' s a raw type
return f_result
elif len ( completion [ "typeInfo" ] [ "paramSections" ] ) == 0 :
return f_result
# It ' s a function type
sections = completion [ "typeInfo" ] [ "paramSections" ]
f_sections = [ formatted_param_section ( ps ) for ps in sections ]
return u"{}{}" . format ( f_result , "" . join ( f_sections ) )
|
def make_grasp_phenotype_file ( fn , pheno , out ) :
"""Subset the GRASP database on a specific phenotype .
Parameters
fn : str
Path to GRASP database file .
pheno : str
Phenotype to extract from database .
out : sttr
Path to output file for subset of GRASP database ."""
|
import subprocess
c = 'awk -F "\\t" \'NR == 1 || $12 == "{}" \' {} > {}' . format ( pheno . replace ( "'" , '\\x27' ) , fn , out )
subprocess . check_call ( c , shell = True )
|
def playlists ( self ) :
"""获取用户创建的歌单
如果不是用户本人 , 则不能获取用户默认精选集"""
|
if self . _playlists is None :
playlists_data = self . _api . user_playlists ( self . identifier )
playlists = [ ]
for playlist_data in playlists_data :
playlist = _deserialize ( playlist_data , PlaylistSchema )
playlists . append ( playlist )
self . _playlists = playlists
return self . _playlists
|
def _extract_header_value ( line ) :
"""Extracts a key / value pair from a header line in an ODF file"""
|
# Skip blank lines , returning None
if not line :
return None
# Attempt to split by equals sign
halves = line . split ( '=' )
if len ( halves ) > 1 :
key = halves [ 0 ] . strip ( )
value = halves [ 1 ] . strip ( )
return { key : value }
# Otherwise , attempt to split by colon
else :
halves = line . split ( ':' )
key = halves [ 0 ] . strip ( )
value = halves [ 1 ] . strip ( )
return { key : value }
|
def _coerceSingleRepetition ( self , dataSet ) :
"""Make a new liveform with our parameters , and get it to coerce our data
for us ."""
|
# make a liveform because there is some logic in _ coerced
form = LiveForm ( lambda ** k : None , self . parameters , self . name )
return form . fromInputs ( dataSet )
|
def bottleneck_block ( inputs , filters , is_training , strides , projection_shortcut = None , row_blocks_dim = None , col_blocks_dim = None ) :
"""Bottleneck block variant for residual networks with BN after convolutions .
Args :
inputs : a ` mtf . Tensor ` of shape
` [ batch _ dim , row _ blocks , col _ blocks , rows , cols , in _ channels ] ` .
filters : ` int ` number of filters for the first two convolutions . Note
that the third and final convolution will use 4 times as many filters .
is _ training : ` bool ` for whether the model is in training mode .
strides : ` int ` block stride . If greater than 1 , this block will ultimately
downsample the input .
projection _ shortcut : ` function ` to use for projection shortcuts ( typically
a 1x1 convolution to match the filter dimensions ) . If None , no
projection is used and the input is passed as unchanged through the
shortcut connection .
row _ blocks _ dim : a mtf . Dimension , row dimension which is
spatially partitioned along mesh axis
col _ blocks _ dim : a mtf . Dimension , row dimension which is
spatially partitioned along mesh axis
Returns :
The output ` Tensor ` of the block ."""
|
shortcut = inputs
filter_h_dim = mtf . Dimension ( "filter_height" , 3 )
filter_w_dim = mtf . Dimension ( "filter_width" , 3 )
one_h_dim = mtf . Dimension ( "filter_height" , 1 )
one_w_dim = mtf . Dimension ( "filter_width" , 1 )
if projection_shortcut is not None :
filters_dim = mtf . Dimension ( "filtersp" , filters )
kernel = mtf . get_variable ( inputs . mesh , "kernel" , mtf . Shape ( [ one_h_dim , one_w_dim , inputs . shape . dims [ - 1 ] , filters_dim ] ) )
shortcut = projection_shortcut ( inputs , kernel )
# First conv block
filters1_dim = mtf . Dimension ( "filters1" , filters )
kernel1 = mtf . get_variable ( inputs . mesh , "kernel1" , mtf . Shape ( [ one_h_dim , one_w_dim , inputs . shape . dims [ - 1 ] , filters1_dim ] ) )
inputs = mtf . conv2d_with_blocks ( inputs , kernel1 , strides = [ 1 , 1 , 1 , 1 ] , padding = "SAME" , h_blocks_dim = None , w_blocks_dim = col_blocks_dim )
# TODO ( nikip ) : Add Dropout ?
inputs = batch_norm_relu ( inputs , is_training )
# Second conv block
filters2_dim = mtf . Dimension ( "filters2" , 4 * filters )
kernel2 = mtf . get_variable ( inputs . mesh , "kernel2" , mtf . Shape ( [ filter_h_dim , filter_w_dim , filters1_dim , filters2_dim ] ) )
inputs = mtf . conv2d_with_blocks ( inputs , kernel2 , strides = [ 1 , 1 , 1 , 1 ] , padding = "SAME" , h_blocks_dim = row_blocks_dim , w_blocks_dim = col_blocks_dim )
inputs = batch_norm_relu ( inputs , is_training )
# Third wide conv filter block
filters3_dim = mtf . Dimension ( "filters3" , filters )
filters3_kernel = mtf . get_variable ( inputs . mesh , "wide_kernel" , mtf . Shape ( [ one_h_dim , one_w_dim , filters2_dim , filters3_dim ] ) )
inputs = mtf . conv2d_with_blocks ( inputs , filters3_kernel , strides , padding = "SAME" , h_blocks_dim = None , w_blocks_dim = col_blocks_dim )
# TODO ( nikip ) : Althought the original resnet code has this batch norm , in our
# setup this is causing no gradients to be passed . Investigate further .
# inputs = batch _ norm _ relu ( inputs , is _ training , relu = True )
# TODO ( nikip ) : Maybe add residual with a projection ?
return mtf . relu ( shortcut + mtf . rename_dimension ( inputs , inputs . shape . dims [ - 1 ] . name , shortcut . shape . dims [ - 1 ] . name ) )
|
def datetime_to_year_quarter ( dt ) :
"""Args :
dt : a datetime
Returns :
tuple of the datetime ' s year and quarter"""
|
year = dt . year
quarter = int ( math . ceil ( float ( dt . month ) / 3 ) )
return ( year , quarter )
|
def get_all ( self , key : str ) -> List [ str ] :
"""Return the ( possibly empty ) list of all values for a header ."""
|
return self . _dict . get ( key . lower ( ) , [ ] )
|
def attach ( cls , name , vhost , remote_name ) :
"""Attach an instance ' s vhost to a remote from the local repository ."""
|
paas_access = cls . get ( 'paas_access' )
if not paas_access :
paas_info = cls . info ( name )
paas_access = '%s@%s' % ( paas_info [ 'user' ] , paas_info [ 'git_server' ] )
remote_url = 'ssh+git://%s/%s.git' % ( paas_access , vhost )
ret = cls . execute ( 'git remote add %s %s' % ( remote_name , remote_url , ) )
if ret :
cls . echo ( 'Added remote `%s` to your local git repository.' % ( remote_name ) )
cls . echo ( 'Use `git push %s master` to push your code to the ' 'instance.' % ( remote_name ) )
cls . echo ( 'Then `$ gandi deploy` to build and deploy your ' 'application.' )
|
def new_consolidate ( self , result , batch_result ) :
'''Used so that it can work with the multiprocess plugin .
Monkeypatched because nose seems a bit unsupported at this time ( ideally
the plugin would have this support by default ) .'''
|
ret = original ( self , result , batch_result )
parent_frame = sys . _getframe ( ) . f_back
# addr is something as D : \ pytesting1 \ src \ mod1 \ hello . py : TestCase . testMet4
# so , convert it to what report _ cond expects
addr = parent_frame . f_locals [ 'addr' ]
i = addr . rindex ( ':' )
addr = [ addr [ : i ] , addr [ i + 1 : ] ]
output , testsRun , failures , errors , errorClasses = batch_result
if failures or errors :
for failure in failures :
PYDEV_NOSE_PLUGIN_SINGLETON . report_cond ( 'fail' , addr , output , failure )
for error in errors :
PYDEV_NOSE_PLUGIN_SINGLETON . report_cond ( 'error' , addr , output , error )
else :
PYDEV_NOSE_PLUGIN_SINGLETON . report_cond ( 'ok' , addr , output )
return ret
|
def guess_fill_char ( left_comp , right_comp ) :
"""For the case where there is no annotated synteny we will try to guess it"""
|
# No left component , obiously new
return "*"
# First check that the blocks have the same src ( not just species ) and
# orientation
if ( left_comp . src == right_comp . src and left_comp . strand != right_comp . strand ) : # Are they completely contiguous ? Easy to call that a gap
if left_comp . end == right_comp . start :
return "-"
# TODO : should be able to make some guesses about short insertions
# here
# All other cases we have no clue about
return "*"
|
def send_sticker ( self , sticker , ** options ) :
"""Send a sticker to the chat .
: param sticker : Sticker to send ( file or string )
: param options : Additional sendSticker options ( see
https : / / core . telegram . org / bots / api # sendsticker )"""
|
return self . bot . api_call ( "sendSticker" , chat_id = str ( self . id ) , sticker = sticker , ** options )
|
def _get_container ( self , path ) :
"""Return single container ."""
|
cont = self . native_conn . get_container ( path )
return self . cont_cls ( self , cont . name , cont . object_count , cont . size_used )
|
def get_pubkey_hex ( privatekey_hex ) :
"""Get the uncompressed hex form of a private key"""
|
if not isinstance ( privatekey_hex , ( str , unicode ) ) :
raise ValueError ( "private key is not a hex string but {}" . format ( str ( type ( privatekey_hex ) ) ) )
# remove ' compressed ' hint
if len ( privatekey_hex ) > 64 :
if privatekey_hex [ - 2 : ] != '01' :
raise ValueError ( "private key does not end in 01" )
privatekey_hex = privatekey_hex [ : 64 ]
# get hex public key
privatekey_int = int ( privatekey_hex , 16 )
privk = ec . derive_private_key ( privatekey_int , ec . SECP256K1 ( ) , default_backend ( ) )
pubk = privk . public_key ( )
x = pubk . public_numbers ( ) . x
y = pubk . public_numbers ( ) . y
pubkey_hex = "04{:064x}{:064x}" . format ( x , y )
return pubkey_hex
|
def warning ( cls , template , default_params = { } , cause = None , stack_depth = 0 , log_context = None , ** more_params ) :
""": param template : * string * human readable string with placeholders for parameters
: param default _ params : * dict * parameters to fill in template
: param cause : * Exception * for chaining
: param stack _ depth : * int * how many calls you want popped off the stack to report the * true * caller
: param log _ context : * dict * extra key : value pairs for your convenience
: param more _ params : * any more parameters ( which will overwrite default _ params )
: return :"""
|
timestamp = datetime . utcnow ( )
if not is_text ( template ) :
Log . error ( "Log.warning was expecting a unicode template" )
if isinstance ( default_params , BaseException ) :
cause = default_params
default_params = { }
if "values" in more_params . keys ( ) :
Log . error ( "Can not handle a logging parameter by name `values`" )
params = Data ( dict ( default_params , ** more_params ) )
cause = unwraplist ( [ Except . wrap ( c ) for c in listwrap ( cause ) ] )
trace = exceptions . extract_stack ( stack_depth + 1 )
e = Except ( exceptions . WARNING , template = template , params = params , cause = cause , trace = trace )
Log . _annotate ( e , timestamp , stack_depth + 1 )
|
def close ( self ) :
'''close the Mission Editor window'''
|
self . time_to_quit = True
self . close_window . release ( )
if self . child . is_alive ( ) :
self . child . join ( 1 )
self . child . terminate ( )
self . mavlink_message_queue_handler . join ( )
self . event_queue_lock . acquire ( )
self . event_queue . put ( MissionEditorEvent ( me_event . MEE_TIME_TO_QUIT ) ) ;
self . event_queue_lock . release ( )
|
def pull_full_properties ( self ) :
"""Retrieve the full set of resource properties and cache them in this
object .
Authorization requirements :
* Object - access permission to this resource .
Raises :
: exc : ` ~ zhmcclient . HTTPError `
: exc : ` ~ zhmcclient . ParseError `
: exc : ` ~ zhmcclient . AuthError `
: exc : ` ~ zhmcclient . ConnectionError `"""
|
full_properties = self . manager . session . get ( self . _uri )
self . _properties = dict ( full_properties )
self . _properties_timestamp = int ( time . time ( ) )
self . _full_properties = True
|
def set_publicId ( self , publicId ) :
'''Sets the publicId to the public object
@ param publicId : a publicId ( title of article )
@ type publicId : string'''
|
publicObj = self . get_public ( )
if publicObj is not None :
publicObj . set_publicid ( publicId )
else :
publicObj = Cpublic ( )
publicObj . set_publicid ( publicId )
self . set_public ( publicObj )
|
def authorize_url ( self , duration , scopes , state , implicit = False ) :
"""Return the URL used out - of - band to grant access to your application .
: param duration : Either ` ` permanent ` ` or ` ` temporary ` ` . ` ` temporary ` `
authorizations generate access tokens that last only 1
hour . ` ` permanent ` ` authorizations additionally generate a refresh
token that can be indefinitely used to generate new hour - long
access tokens . Only ` ` temporary ` ` can be specified if ` ` implicit ` `
is set to ` ` True ` ` .
: param scopes : A list of OAuth scopes to request authorization for .
: param state : A string that will be reflected in the callback to
` ` redirect _ uri ` ` . This value should be temporarily unique to the
client for whom the URL was generated for .
: param implicit : ( optional ) Use the implicit grant flow ( default :
False ) . This flow is only available for UntrustedAuthenticators ."""
|
if self . redirect_uri is None :
raise InvalidInvocation ( "redirect URI not provided" )
if implicit and not isinstance ( self , UntrustedAuthenticator ) :
raise InvalidInvocation ( "Only UntrustedAuthentictor instances can " "use the implicit grant flow." )
if implicit and duration != "temporary" :
raise InvalidInvocation ( "The implicit grant flow only supports " "temporary access tokens." )
params = { "client_id" : self . client_id , "duration" : duration , "redirect_uri" : self . redirect_uri , "response_type" : "token" if implicit else "code" , "scope" : " " . join ( scopes ) , "state" : state , }
url = self . _requestor . reddit_url + const . AUTHORIZATION_PATH
request = Request ( "GET" , url , params = params )
return request . prepare ( ) . url
|
def process_doc ( self , doc ) :
"""Attempt to parse an xml string conforming to either an SOS or SensorML
dataset and return the results"""
|
xml_doc = ET . fromstring ( doc )
if xml_doc . tag == "{http://www.opengis.net/sos/1.0}Capabilities" :
ds = SensorObservationService ( None , xml = doc )
# SensorObservationService does not store the etree doc root ,
# so maybe use monkey patching here for now ?
ds . _root = xml_doc
elif xml_doc . tag == "{http://www.opengis.net/sensorML/1.0.1}SensorML" :
ds = SensorML ( xml_doc )
else :
raise ValueError ( "Unrecognized XML root element: {}" . format ( xml_doc . tag ) )
return ds
|
def write ( self ) :
"""write the current settings to the config file"""
|
with open ( storage . config_file , 'w' ) as cfg :
yaml . dump ( self . as_dict ( ) , cfg , default_flow_style = False )
storage . refresh ( )
|
def _construct_location_to_filter_list ( match_query ) :
"""Return a dict mapping location - > list of filters applied at that location .
Args :
match _ query : MatchQuery object from which to extract location - > filters dict
Returns :
dict mapping each location in match _ query to a list of
Filter objects applied at that location"""
|
# For each location , all filters for that location should be applied at the first instance .
# This function collects a list of all filters corresponding to each location
# present in the given MatchQuery .
location_to_filters = { }
for match_traversal in match_query . match_traversals :
for match_step in match_traversal :
current_filter = match_step . where_block
if current_filter is not None :
current_location = match_step . as_block . location
location_to_filters . setdefault ( current_location , [ ] ) . append ( current_filter )
return location_to_filters
|
def solve_linear_diop ( total : int , * coeffs : int ) -> Iterator [ Tuple [ int , ... ] ] :
r"""Yield non - negative integer solutions of a linear Diophantine equation of the format
: math : ` c _ 1 x _ 1 + \ dots + c _ n x _ n = total ` .
If there are at most two coefficients , : func : ` base _ solution _ linear ( ) ` is used to find the solutions .
Otherwise , the solutions are found recursively , by reducing the number of variables in each recursion :
1 . Compute : math : ` d : = gcd ( c _ 2 , \ dots , c _ n ) `
2 . Solve : math : ` c _ 1 x + d y = total `
3 . Recursively solve : math : ` c _ 2 x _ 2 + \ dots + c _ n x _ n = y ` for each solution for : math : ` y `
4 . Combine these solutions to form a solution for the whole equation
Args :
total :
The constant of the equation .
* coeffs :
The coefficients : math : ` c _ i ` of the equation .
Yields :
The non - negative integer solutions of the equation as a tuple : math : ` ( x _ 1 , \ dots , x _ n ) ` ."""
|
if len ( coeffs ) == 0 :
if total == 0 :
yield tuple ( )
return
if len ( coeffs ) == 1 :
if total % coeffs [ 0 ] == 0 :
yield ( total // coeffs [ 0 ] , )
return
if len ( coeffs ) == 2 :
yield from base_solution_linear ( coeffs [ 0 ] , coeffs [ 1 ] , total )
return
# calculate gcd ( coeffs [ 1 : ] )
remainder_gcd = math . gcd ( coeffs [ 1 ] , coeffs [ 2 ] )
for coeff in coeffs [ 3 : ] :
remainder_gcd = math . gcd ( remainder_gcd , coeff )
# solve coeffs [ 0 ] * x + remainder _ gcd * y = total
for coeff0_solution , remainder_gcd_solution in base_solution_linear ( coeffs [ 0 ] , remainder_gcd , total ) :
new_coeffs = [ c // remainder_gcd for c in coeffs [ 1 : ] ]
# use the solutions for y to solve the remaining variables recursively
for remainder_solution in solve_linear_diop ( remainder_gcd_solution , * new_coeffs ) :
yield ( coeff0_solution , ) + remainder_solution
|
def _safe_call ( obj , methname , * args , ** kwargs ) :
"""Safely calls the method with the given methname on the given
object . Remaining positional and keyword arguments are passed to
the method . The return value is None , if the method is not
available , or the return value of the method ."""
|
meth = getattr ( obj , methname , None )
if meth is None or not callable ( meth ) :
return
return meth ( * args , ** kwargs )
|
def save ( self ) :
"""Override the save method to save the first and last name to the user
field ."""
|
# First save the parent form and get the user .
new_user = super ( SignupFormExtra , self ) . save ( )
new_user . first_name = self . cleaned_data [ 'first_name' ]
new_user . last_name = self . cleaned_data [ 'last_name' ]
new_user . save ( )
# Userena expects to get the new user from this form , so return the new
# user .
return new_user
|
def _is_label_or_level_reference ( self , key , axis = 0 ) :
"""Test whether a key is a label or level reference for a given axis .
To be considered either a label or a level reference , ` key ` must be a
string that :
- ( axis = 0 ) : Matches a column label or an index level
- ( axis = 1 ) : Matches an index label or a column level
Parameters
key : str
Potential label or level name
axis : int , default 0
Axis that levels are associated with ( 0 for index , 1 for columns )
Returns
is _ label _ or _ level : bool"""
|
if self . ndim > 2 :
raise NotImplementedError ( "_is_label_or_level_reference is not implemented for {type}" . format ( type = type ( self ) ) )
return ( self . _is_level_reference ( key , axis = axis ) or self . _is_label_reference ( key , axis = axis ) )
|
def _encodeAddressField ( address , smscField = False ) :
"""Encodes the address into an address field
: param address : The address to encode ( phone number or alphanumeric )
: type byteIter : str
: return : Encoded SMS PDU address field
: rtype : bytearray"""
|
# First , see if this is a number or an alphanumeric string
toa = 0x80 | 0x00 | 0x01
# Type - of - address start | Unknown type - of - number | ISDN / tel numbering plan
alphaNumeric = False
if address . isalnum ( ) : # Might just be a local number
if address . isdigit ( ) : # Local number
toa |= 0x20
else : # Alphanumeric address
toa |= 0x50
toa &= 0xFE
# switch to " unknown " numbering plan
alphaNumeric = True
else :
if address [ 0 ] == '+' and address [ 1 : ] . isdigit ( ) : # International number
toa |= 0x10
# Remove the ' + ' prefix
address = address [ 1 : ]
else : # Alphanumeric address
toa |= 0x50
toa &= 0xFE
# switch to " unknown " numbering plan
alphaNumeric = True
if alphaNumeric :
addressValue = packSeptets ( encodeGsm7 ( address , False ) )
addressLen = len ( addressValue ) * 2
else :
addressValue = encodeSemiOctets ( address )
if smscField :
addressLen = len ( addressValue ) + 1
else :
addressLen = len ( address )
result = bytearray ( )
result . append ( addressLen )
result . append ( toa )
result . extend ( addressValue )
return result
|
def render_profile_data ( self , as_parsed ) :
"""Render the chosen profile entry , as it was parsed ."""
|
try :
return deep_map ( self . _render_profile_data , as_parsed )
except RecursionException :
raise DbtProfileError ( 'Cycle detected: Profile input has a reference to itself' , project = as_parsed )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.