signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def collections ( self ) :
"""List top - level collections of the client ' s database .
Returns :
Sequence [ ~ . firestore _ v1beta1 . collection . CollectionReference ] :
iterator of subcollections of the current document ."""
|
iterator = self . _firestore_api . list_collection_ids ( self . _database_string , metadata = self . _rpc_metadata )
iterator . client = self
iterator . item_to_value = _item_to_collection_ref
return iterator
|
def subsol ( datetime ) :
"""Finds subsolar geocentric latitude and longitude .
Parameters
datetime : : class : ` datetime . datetime `
Returns
sbsllat : float
Latitude of subsolar point
sbsllon : float
Longitude of subsolar point
Notes
Based on formulas in Astronomical Almanac for the year 1996 , p . C24.
( U . S . Government Printing Office , 1994 ) . Usable for years 1601-2100,
inclusive . According to the Almanac , results are good to at least 0.01
degree latitude and 0.025 degrees longitude between years 1950 and 2050.
Accuracy for other years has not been tested . Every day is assumed to have
exactly 86400 seconds ; thus leap seconds that sometimes occur on December
31 are ignored ( their effect is below the accuracy threshold of the
algorithm ) .
After Fortran code by A . D . Richmond , NCAR . Translated from IDL
by K . Laundal ."""
|
# convert to year , day of year and seconds since midnight
year = datetime . year
doy = datetime . timetuple ( ) . tm_yday
ut = datetime . hour * 3600 + datetime . minute * 60 + datetime . second
if not 1601 <= year <= 2100 :
raise ValueError ( 'Year must be in [1601, 2100]' )
yr = year - 2000
nleap = int ( np . floor ( ( year - 1601.0 ) / 4.0 ) )
nleap -= 99
if year <= 1900 :
ncent = int ( np . floor ( ( year - 1601.0 ) / 100.0 ) )
ncent = 3 - ncent
nleap = nleap + ncent
l0 = - 79.549 + ( - 0.238699 * ( yr - 4.0 * nleap ) + 3.08514e-2 * nleap )
g0 = - 2.472 + ( - 0.2558905 * ( yr - 4.0 * nleap ) - 3.79617e-2 * nleap )
# Days ( including fraction ) since 12 UT on January 1 of IYR :
df = ( ut / 86400.0 - 1.5 ) + doy
# Mean longitude of Sun :
lmean = l0 + 0.9856474 * df
# Mean anomaly in radians :
grad = np . radians ( g0 + 0.9856003 * df )
# Ecliptic longitude :
lmrad = np . radians ( lmean + 1.915 * np . sin ( grad ) + 0.020 * np . sin ( 2.0 * grad ) )
sinlm = np . sin ( lmrad )
# Obliquity of ecliptic in radians :
epsrad = np . radians ( 23.439 - 4e-7 * ( df + 365 * yr + nleap ) )
# Right ascension :
alpha = np . degrees ( np . arctan2 ( np . cos ( epsrad ) * sinlm , np . cos ( lmrad ) ) )
# Declination , which is also the subsolar latitude :
sslat = np . degrees ( np . arcsin ( np . sin ( epsrad ) * sinlm ) )
# Equation of time ( degrees ) :
etdeg = lmean - alpha
nrot = round ( etdeg / 360.0 )
etdeg = etdeg - 360.0 * nrot
# Subsolar longitude :
sslon = 180.0 - ( ut / 240.0 + etdeg )
# Earth rotates one degree every 240 s .
nrot = round ( sslon / 360.0 )
sslon = sslon - 360.0 * nrot
return sslat , sslon
|
def file_cmd ( context , yes , file_id ) :
"""Delete a file ."""
|
file_obj = context . obj [ 'store' ] . File . get ( file_id )
if file_obj . is_included :
question = f"remove file from file system and database: {file_obj.full_path}"
else :
question = f"remove file from database: {file_obj.full_path}"
if yes or click . confirm ( question ) :
if file_obj . is_included and Path ( file_obj . full_path ) . exists ( ) :
Path ( file_obj . full_path ) . unlink ( )
file_obj . delete ( )
context . obj [ 'store' ] . commit ( )
click . echo ( 'file deleted' )
|
def agent_version ( self ) :
"""Get the version of the Juju machine agent .
May return None if the agent is not yet available ."""
|
version = self . safe_data [ 'agent-status' ] [ 'version' ]
if version :
return client . Number . from_json ( version )
else :
return None
|
def getWindowTitle ( self , hwnd ) :
"""Gets the title for the specified window"""
|
for w in self . _get_window_list ( ) :
if "kCGWindowNumber" in w and w [ "kCGWindowNumber" ] == hwnd :
return w [ "kCGWindowName" ]
|
def save_list ( key , * values ) :
"""Convert the given list of parameters to a JSON object .
JSON object is of the form :
{ key : [ values [ 0 ] , values [ 1 ] , . . . ] } ,
where values represent the given list of parameters ."""
|
return json . dumps ( { key : [ _get_json ( value ) for value in values ] } )
|
def character_span ( self ) :
"""Returns the character span of the token"""
|
begin , end = self . token_span
return ( self . sentence [ begin ] . character_span [ 0 ] , self . sentence [ end - 1 ] . character_span [ - 1 ] )
|
def _dens ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ dens
PURPOSE :
evaluate the density for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
the density
HISTORY :
2010-08-08 - Written - Bovy ( NYU )"""
|
# Cylindrical distance
Rdist = _cylR ( R , phi , self . _orb . R ( t ) , self . _orb . phi ( t ) )
# Difference vector
( xd , yd , zd ) = _cyldiff ( self . _orb . R ( t ) , self . _orb . phi ( t ) , self . _orb . z ( t ) , R , phi , z )
# Return the density
return evaluateDensities ( self . _pot , Rdist , zd , use_physical = False )
|
def bake ( self ) :
"""Bake a ` ` shell ` ` command so it ' s ready to execute and returns None .
: return : None"""
|
command_list = self . command . split ( ' ' )
command , args = command_list [ 0 ] , command_list [ 1 : ]
self . _sh_command = getattr ( sh , command )
# Reconstruct command with remaining args .
self . _sh_command = self . _sh_command . bake ( args , _env = self . env , _out = LOG . out , _err = LOG . error )
|
def singleaxis ( self , apparent_zenith , apparent_azimuth ) :
"""Get tracking data . See : py : func : ` pvlib . tracking . singleaxis ` more
detail .
Parameters
apparent _ zenith : float , 1d array , or Series
Solar apparent zenith angles in decimal degrees .
apparent _ azimuth : float , 1d array , or Series
Solar apparent azimuth angles in decimal degrees .
Returns
tracking data"""
|
tracking_data = singleaxis ( apparent_zenith , apparent_azimuth , self . axis_tilt , self . axis_azimuth , self . max_angle , self . backtrack , self . gcr )
return tracking_data
|
def page ( self , log = values . unset , message_date_before = values . unset , message_date = values . unset , message_date_after = values . unset , page_token = values . unset , page_number = values . unset , page_size = values . unset ) :
"""Retrieve a single page of NotificationInstance records from the API .
Request is executed immediately
: param unicode log : Filter by log level
: param date message _ date _ before : Filter by date
: param date message _ date : Filter by date
: param date message _ date _ after : Filter by date
: param str page _ token : PageToken provided by the API
: param int page _ number : Page Number , this value is simply for client state
: param int page _ size : Number of records to return , defaults to 50
: returns : Page of NotificationInstance
: rtype : twilio . rest . api . v2010 . account . call . notification . NotificationPage"""
|
params = values . of ( { 'Log' : log , 'MessageDate<' : serialize . iso8601_date ( message_date_before ) , 'MessageDate' : serialize . iso8601_date ( message_date ) , 'MessageDate>' : serialize . iso8601_date ( message_date_after ) , 'PageToken' : page_token , 'Page' : page_number , 'PageSize' : page_size , } )
response = self . _version . page ( 'GET' , self . _uri , params = params , )
return NotificationPage ( self . _version , response , self . _solution )
|
def collision_encode ( self , src , id , action , threat_level , time_to_minimum_delta , altitude_minimum_delta , horizontal_minimum_delta ) :
'''Information about a potential collision
src : Collision data source ( uint8 _ t )
id : Unique identifier , domain based on src field ( uint32 _ t )
action : Action that is being taken to avoid this collision ( uint8 _ t )
threat _ level : How concerned the aircraft is about this collision ( uint8 _ t )
time _ to _ minimum _ delta : Estimated time until collision occurs ( seconds ) ( float )
altitude _ minimum _ delta : Closest vertical distance in meters between vehicle and object ( float )
horizontal _ minimum _ delta : Closest horizontal distance in meteres between vehicle and object ( float )'''
|
return MAVLink_collision_message ( src , id , action , threat_level , time_to_minimum_delta , altitude_minimum_delta , horizontal_minimum_delta )
|
def get_next ( self , label ) :
"""Get the next section with the given label"""
|
while self . _get_current_label ( ) != label :
self . _skip_section ( )
return self . _read_section ( )
|
def assert_instance_deleted ( self , model_class , ** kwargs ) :
"""Checks if the model instance was deleted from the database .
For example : :
> > > with self . assert _ instance _ deleted ( Article , slug = ' lorem - ipsum ' ) :
. . . Article . objects . get ( slug = ' lorem - ipsum ' ) . delete ( )"""
|
return _InstanceContext ( self . assert_instance_exists , self . assert_instance_does_not_exist , model_class , ** kwargs )
|
def _clean_multiple_def ( self , ready ) :
"""Cleans the list of variable definitions extracted from the definition text to
get hold of the dimensions and default values ."""
|
result = [ ]
for entry in ready :
if isinstance ( entry , list ) : # This variable declaration has a default value specified , which is in the
# second slot of the list .
default = self . _collapse_default ( entry [ 1 ] )
# For hard - coded array defaults , add the parenthesis back in .
if default [ 0 ] == "/" :
default = "({})" . format ( default )
namedim = entry [ 0 ]
else :
default = None
namedim = entry
if isinstance ( namedim , str ) :
name = namedim . strip ( ) . strip ( "," )
dimension = None
D = 0
else : # Namedim is a tuple of ( name , dimension )
name = namedim [ 0 ] . strip ( )
D = count_dimensions ( namedim [ 1 ] )
dimension = self . _collapse_default ( namedim [ 1 ] )
result . append ( ( name , dimension , default , D ) )
return result
|
def address ( cls , name , description = None , unit = '' , default = None , initial_status = None ) :
"""Instantiate a new IP address sensor object .
Parameters
name : str
The name of the sensor .
description : str
A short description of the sensor .
units : str
The units of the sensor value . May be the empty string
if there are no applicable units .
default : ( string , int )
An initial value for the sensor . Tuple contaning ( host , port ) .
default is ( " 0.0.0.0 " , None )
initial _ status : int enum or None
An initial status for the sensor . If None , defaults to
Sensor . UNKNOWN . ` initial _ status ` must be one of the keys in
Sensor . STATUSES"""
|
return cls ( cls . ADDRESS , name , description , unit , None , default , initial_status )
|
def get ( self , block = True , timeout = None ) :
"""get ."""
|
try :
item = super ( ) . get ( block , timeout )
self . _getsocket . recv ( 1 )
return item
except queue . Empty :
raise queue . Empty
|
def _sethex ( self , hexstring ) :
"""Reset the bitstring to have the value given in hexstring ."""
|
hexstring = tidy_input_string ( hexstring )
# remove any 0x if present
hexstring = hexstring . replace ( '0x' , '' )
length = len ( hexstring )
if length % 2 :
hexstring += '0'
try :
try :
data = bytearray . fromhex ( hexstring )
except TypeError : # Python 2.6 needs a unicode string ( a bug ) . 2.7 and 3 . x work fine .
data = bytearray . fromhex ( unicode ( hexstring ) )
except ValueError :
raise CreationError ( "Invalid symbol in hex initialiser." )
self . _setbytes_unsafe ( data , length * 4 , 0 )
|
def execute ( self , sql , params = None ) :
"""Execute given SQL .
Calls ` rollback ` if there ' s a DB error and re - raises the exception .
Calls ` commit ` if autocommit is True and there was no error .
Returns number of rows affected ( for commands that affect rows ) .
> > > import getpass
> > > s = DB ( dbname = ' test ' , user = getpass . getuser ( ) , host = ' localhost ' ,
. . . password = ' ' )
> > > s . execute ( ' drop table if exists t2 ' )
> > > s . execute ( ' drop table t2 ' )
Traceback ( most recent call last ) :
ProgrammingError : table " t2 " does not exist
< BLANKLINE >"""
|
logging . debug ( sql )
try :
self . _cursor . execute ( sql , params )
if self . autocommit :
self . _conn . commit ( )
if self . _cursor . rowcount > 0 :
return self . _cursor . rowcount
except psycopg2 . Error , error :
logging . debug ( 'PG error ({}): {}' . format ( error . pgcode , error . pgerror ) )
self . _conn . rollback ( )
raise
|
def add_observations ( self , ins_file , out_file = None , pst_path = None , inschek = True ) :
"""add new parameters to a control file
Parameters
ins _ file : str
instruction file
out _ file : str
model output file . If None , then ins _ file . replace ( " . ins " , " " ) is used . Default is None
pst _ path : str ( optional )
the path to append to the instruction file and out file in the control file . If
not None , then any existing path in front of the template or in file is split off
and pst _ path is prepended . Default is None
inschek : bool
flag to run inschek . If successful , inscheck outputs are used as obsvals
Returns
new _ obs _ data : pandas . DataFrame
the data for the new observations that were added
Note
populates the new observation information with default values"""
|
assert os . path . exists ( ins_file ) , "{0}, {1}" . format ( os . getcwd ( ) , ins_file )
if out_file is None :
out_file = ins_file . replace ( ".ins" , "" )
assert ins_file != out_file , "doh!"
# get the parameter names in the template file
obsnme = pst_utils . parse_ins_file ( ins_file )
sobsnme = set ( obsnme )
sexist = set ( self . obs_names )
sint = sobsnme . intersection ( sexist )
if len ( sint ) > 0 :
raise Exception ( "the following obs instruction file {0} are already in the control file:{1}" . format ( ins_file , ',' . join ( sint ) ) )
# find " new " parameters that are not already in the control file
new_obsnme = [ o for o in obsnme if o not in self . observation_data . obsnme ]
if len ( new_obsnme ) == 0 :
raise Exception ( "no new observations found in instruction file {0}" . format ( ins_file ) )
# extend observation _ data
new_obs_data = pst_utils . populate_dataframe ( new_obsnme , pst_utils . pst_config [ "obs_fieldnames" ] , pst_utils . pst_config [ "obs_defaults" ] , pst_utils . pst_config [ "obs_dtype" ] )
new_obs_data . loc [ new_obsnme , "obsnme" ] = new_obsnme
new_obs_data . index = new_obsnme
self . observation_data = self . observation_data . append ( new_obs_data )
cwd = '.'
if pst_path is not None :
cwd = os . path . join ( * os . path . split ( ins_file ) [ : - 1 ] )
ins_file = os . path . join ( pst_path , os . path . split ( ins_file ) [ - 1 ] )
out_file = os . path . join ( pst_path , os . path . split ( out_file ) [ - 1 ] )
self . instruction_files . append ( ins_file )
self . output_files . append ( out_file )
df = None
if inschek :
df = pst_utils . _try_run_inschek ( ins_file , out_file , cwd = cwd )
if df is not None : # print ( self . observation _ data . index , df . index )
self . observation_data . loc [ df . index , "obsval" ] = df . obsval
new_obs_data . loc [ df . index , "obsval" ] = df . obsval
return new_obs_data
|
def dist ( self , * args , ** kwargs ) :
"""NAME :
dist
PURPOSE :
return distance from the observer
INPUT :
t - ( optional ) time at which to get dist ( can be Quantity )
obs = [ X , Y , Z ] - ( optional ) position of observer ( in kpc ; entries can be Quantity )
( default = [ 8.0,0 . , 0 . ] ) OR Orbit object that corresponds to the orbit of the observer
Y is ignored and always assumed to be zero
ro = ( Object - wide default ) physical scale for distances to use to convert ( can be Quantity )
OUTPUT :
dist ( t ) in kpc
HISTORY :
2011-02-23 - Written - Bovy ( NYU )"""
|
out = self . _orb . dist ( * args , ** kwargs )
if len ( out ) == 1 :
return out [ 0 ]
else :
return out
|
def on_connected ( self , headers , body ) :
"""Once the connection is established , and ' heart - beat ' is found in the headers , we calculate the real
heartbeat numbers ( based on what the server sent and what was specified by the client ) - if the heartbeats
are not 0 , we start up the heartbeat loop accordingly .
: param dict headers : headers in the connection message
: param body : the message body"""
|
if 'heart-beat' in headers :
self . heartbeats = utils . calculate_heartbeats ( headers [ 'heart-beat' ] . replace ( ' ' , '' ) . split ( ',' ) , self . heartbeats )
if self . heartbeats != ( 0 , 0 ) :
self . send_sleep = self . heartbeats [ 0 ] / 1000
# by default , receive gets an additional grace of 50%
# set a different heart - beat - receive - scale when creating the connection to override that
self . receive_sleep = ( self . heartbeats [ 1 ] / 1000 ) * self . heart_beat_receive_scale
log . debug ( "Setting receive_sleep to %s" , self . receive_sleep )
# Give grace of receiving the first heartbeat
self . received_heartbeat = monotonic ( ) + self . receive_sleep
self . running = True
if self . heartbeat_thread is None :
self . heartbeat_thread = utils . default_create_thread ( self . __heartbeat_loop )
self . heartbeat_thread . name = "StompHeartbeat%s" % getattr ( self . heartbeat_thread , "name" , "Thread" )
|
def _setup ( self ) :
"""Run setup tasks after initialization"""
|
self . _populate_local ( )
try :
self . _populate_latest ( )
except Exception as e :
self . log . exception ( 'Unable to retrieve latest %s version information' , self . meta_name )
self . _sort ( )
|
def iter_referents ( self ) :
"""Generates target sets that are compatible with the current beliefstate ."""
|
tlow , thigh = self [ 'targetset_arity' ] . get_tuple ( )
clow , chigh = self [ 'contrast_arity' ] . get_tuple ( )
referents = list ( self . iter_singleton_referents ( ) )
t = len ( referents )
low = max ( 1 , tlow )
high = min ( [ t , thigh ] )
for targets in itertools . chain . from_iterable ( itertools . combinations ( referents , r ) for r in reversed ( xrange ( low , high + 1 ) ) ) :
if clow <= t - len ( targets ) <= chigh :
yield targets
|
def within_n_mads ( n , series ) :
"""Return true if all values in sequence are within n MADs"""
|
mad_score = ( series - series . mean ( ) ) / series . mad ( )
return ( mad_score . abs ( ) <= n ) . all ( )
|
def overviews ( self ) :
"""This method returns the properties overviews .
: return :"""
|
overviews = [ ]
try :
list_items = self . _ad_page_content . select ( "#overview li" )
except Exception as e :
if self . _debug :
logging . error ( "Error getting overviews. Error message: " + e . args [ 0 ] )
return
for li in list_items :
overviews . append ( li . text )
return overviews
|
def bandwidth ( self ) :
"""Target bandwidth in bits / sec"""
|
self . _bandwidth = self . lib . iperf_get_test_rate ( self . _test )
return self . _bandwidth
|
def get_relative_positions_of_waypoints ( transition_v ) :
"""This method takes the waypoints of a connection and returns all relative positions of these waypoints .
: param canvas : Canvas to check relative position in
: param transition _ v : Transition view to extract all relative waypoint positions
: return : List with all relative positions of the given transition"""
|
handles_list = transition_v . handles ( )
rel_pos_list = [ ]
for handle in handles_list :
if handle in transition_v . end_handles ( include_waypoints = True ) :
continue
rel_pos = transition_v . canvas . get_matrix_i2i ( transition_v , transition_v . parent ) . transform_point ( * handle . pos )
rel_pos_list . append ( rel_pos )
return rel_pos_list
|
def _parse_downloadpage_html ( self , doc ) :
"""解析下载页面 , 返回下载链接"""
|
soup = bs4 . BeautifulSoup ( doc , 'lxml' )
a = soup . select ( '.download-box > a.btn-click' )
if a :
a = a [ 0 ]
link = a . get ( 'href' )
return link
return ''
|
def _set_repository_view ( self , session ) :
"""Sets the underlying repository view to match current view"""
|
if self . _repository_view == COMPARATIVE :
try :
session . use_comparative_repository_view ( )
except AttributeError :
pass
else :
try :
session . use_plenary_repository_view ( )
except AttributeError :
pass
|
def decompose_seconds_in_day ( seconds ) :
"""Decomposes seconds in day into hour , minute and second components .
Arguments
seconds : int
A time of day by the number of seconds passed since midnight .
Returns
hour : int
The hour component of the given time of day .
minut : int
The minute component of the given time of day .
second : int
The second component of the given time of day ."""
|
if seconds > SECONDS_IN_DAY :
seconds = seconds - SECONDS_IN_DAY
if seconds < 0 :
raise ValueError ( "seconds param must be non-negative!" )
hour = int ( seconds / 3600 )
leftover = seconds - hour * 3600
minute = int ( leftover / 60 )
second = leftover - minute * 60
return hour , minute , second
|
def AddATR ( self , readernode , atr ) :
"""Add an ATR to a reader node ."""
|
capchild = self . AppendItem ( readernode , atr )
self . SetPyData ( capchild , None )
self . SetItemImage ( capchild , self . cardimageindex , wx . TreeItemIcon_Normal )
self . SetItemImage ( capchild , self . cardimageindex , wx . TreeItemIcon_Expanded )
self . Expand ( capchild )
return capchild
|
def upload ( self , file_obj ) :
"""Replace the content of this object .
: param file file _ obj : The file ( or file - like object ) to upload ."""
|
return self . _client . upload_object ( self . _instance , self . _bucket , self . name , file_obj )
|
def read ( self , size = - 1 ) :
"""Read bytes and call the callback"""
|
bites = self . file . read ( size )
self . bytes_read += len ( bites )
self . callback ( len ( bites ) , self . bytes_read )
return bites
|
def get_references ( profile_path , role , profile_name , server ) :
"""Get display and return the References for the path provided , ResultClass
CIM _ ReferencedProfile , and the role provided ."""
|
references_for_profile = server . conn . References ( ObjectName = profile_path , ResultClass = "CIM_ReferencedProfile" , Role = role )
if VERBOSE :
print ( 'References for profile=%s, path=%s, ResultClass=' 'CIM_ReferencedProfile, Role=%s' % ( profile_name , profile_path , role ) )
for ref in references_for_profile :
print ( 'Reference for %s get_role=%s cn=%s\n antecedent=%s\n ' 'dependent=%s' % ( profile_name , role , ref . classname , ref [ 'Antecedent' ] , ref [ 'Dependent' ] ) )
return references_for_profile
|
def raise_freshness_log_entry ( self , t_stale_by ) :
"""Raise freshness alert entry ( warning level )
Example : " The freshness period of host ' host _ name ' is expired
by 0d 0h 17m 6s ( threshold = 0d 1h 0m 0s ) .
Attempt : 1 / 1.
I ' m forcing the state to freshness state ( d / HARD ) "
: param t _ stale _ by : time in seconds the host has been in a stale state
: type t _ stale _ by : int
: return : None"""
|
logger . warning ( "The freshness period of %s '%s' is expired by %ss " "(threshold=%ss + %ss). Attempt: %s / %s. " "I'm forcing the state to freshness state (%s / %s)." , self . my_type , self . get_full_name ( ) , t_stale_by , self . freshness_threshold , self . additional_freshness_latency , self . attempt , self . max_check_attempts , self . freshness_state , self . state_type )
|
def _initialize_mtf_dimension_name_to_size_gcd ( self , mtf_graph ) :
"""Initializer for self . _ mtf _ dimension _ name _ to _ size _ gcd .
Args :
mtf _ graph : an mtf . Graph .
Returns :
A { string : int } , mapping the name of an MTF dimension to the greatest
common divisor of all the sizes it has . All these sizes being evenly
divisible by some x is equivalent to the GCD being divisible by x ."""
|
mtf_dimension_name_to_size_gcd = { }
for mtf_operation in mtf_graph . operations :
for mtf_tensor in mtf_operation . outputs :
for mtf_dimension in mtf_tensor . shape . dims :
mtf_dimension_name_to_size_gcd [ mtf_dimension . name ] = fractions . gcd ( mtf_dimension_name_to_size_gcd . get ( mtf_dimension . name , mtf_dimension . size ) , mtf_dimension . size )
return mtf_dimension_name_to_size_gcd
|
def update ( self , value = None ) :
'Updates the ProgressBar to a new value .'
|
if value is not None and value is not UnknownLength :
if ( self . maxval is not UnknownLength and not 0 <= value <= self . maxval and not value < self . currval ) :
raise ValueError ( 'Value out of range' )
self . currval = value
if self . start_time is None :
self . start ( )
self . update ( value )
if not self . _need_update ( ) :
return
if self . redirect_stderr and sys . stderr . tell ( ) :
self . fd . write ( '\r' + ' ' * self . term_width + '\r' )
self . _stderr . write ( sys . stderr . getvalue ( ) )
self . _stderr . flush ( )
sys . stderr = StringIO ( )
if self . redirect_stdout and sys . stdout . tell ( ) :
self . fd . write ( '\r' + ' ' * self . term_width + '\r' )
self . _stdout . write ( sys . stdout . getvalue ( ) )
self . _stdout . flush ( )
sys . stdout = StringIO ( )
now = time . time ( )
self . seconds_elapsed = now - self . start_time
self . next_update = self . currval + self . update_interval
self . fd . write ( '\r' + self . _format_line ( ) )
self . last_update_time = now
|
def _subcommand ( group , * args , ** kwargs ) :
"""Decorator to define a subcommand .
This decorator is used for the group ' s @ command decorator ."""
|
def decorator ( f ) :
if 'help' not in kwargs :
kwargs [ 'help' ] = f . __doc__
_parser_class = group . _subparsers . _parser_class
if 'parser' in kwargs : # use a copy of the given parser
group . _subparsers . _parser_class = _CopiedArgumentParser
if 'parents' in kwargs :
if not hasattr ( f , '_argnames' ) : # pragma : no cover
f . _argnames = [ ]
for p in kwargs [ 'parents' ] :
f . _argnames += p . _argnames if hasattr ( p , '_argnames' ) else [ ]
kwargs [ 'parents' ] = [ p . parser for p in kwargs [ 'parents' ] ]
if args == ( ) :
f . parser = group . _subparsers . add_parser ( f . __name__ , ** kwargs )
else :
f . parser = group . _subparsers . add_parser ( * args , ** kwargs )
f . parser . set_defaults ( ** { '_func_' + group . __name__ : f } )
f . climax = 'parser' not in kwargs
group . _subparsers . _parser_class = _parser_class
for arg in getattr ( f , '_arguments' , [ ] ) :
f . parser . add_argument ( * arg [ 0 ] , ** arg [ 1 ] )
return f
return decorator
|
def clean_text ( self , text , preserve_space ) :
"""Text cleaning as per https : / / www . w3 . org / TR / SVG / text . html # WhiteSpace"""
|
if text is None :
return
if preserve_space :
text = text . replace ( '\r\n' , ' ' ) . replace ( '\n' , ' ' ) . replace ( '\t' , ' ' )
else :
text = text . replace ( '\r\n' , '' ) . replace ( '\n' , '' ) . replace ( '\t' , ' ' )
text = text . strip ( )
while ( ' ' in text ) :
text = text . replace ( ' ' , ' ' )
return text
|
def Overlay_setShowDebugBorders ( self , show ) :
"""Function path : Overlay . setShowDebugBorders
Domain : Overlay
Method name : setShowDebugBorders
Parameters :
Required arguments :
' show ' ( type : boolean ) - > True for showing debug borders
No return value .
Description : Requests that backend shows debug borders on layers"""
|
assert isinstance ( show , ( bool , ) ) , "Argument 'show' must be of type '['bool']'. Received type: '%s'" % type ( show )
subdom_funcs = self . synchronous_command ( 'Overlay.setShowDebugBorders' , show = show )
return subdom_funcs
|
def pitch_contour ( times , frequencies , fs , amplitudes = None , function = np . sin , length = None , kind = 'linear' ) :
'''Sonify a pitch contour .
Parameters
times : np . ndarray
time indices for each frequency measurement , in seconds
frequencies : np . ndarray
frequency measurements , in Hz .
Non - positive measurements will be interpreted as un - voiced samples .
fs : int
desired sampling rate of the output signal
amplitudes : np . ndarray
amplitude measurments , nonnegative
defaults to ` ` np . ones ( ( length , ) ) ` `
function : function
function to use to synthesize notes , should be : math : ` 2 \ pi ` - periodic
length : int
desired number of samples in the output signal ,
defaults to ` ` max ( times ) * fs ` `
kind : str
Interpolation mode for the frequency and amplitude values .
See : ` ` scipy . interpolate . interp1d ` ` for valid settings .
Returns
output : np . ndarray
synthesized version of the pitch contour'''
|
fs = float ( fs )
if length is None :
length = int ( times . max ( ) * fs )
# Squash the negative frequencies .
# wave ( 0 ) = 0 , so clipping here will un - voice the corresponding instants
frequencies = np . maximum ( frequencies , 0.0 )
# Build a frequency interpolator
f_interp = interp1d ( times * fs , 2 * np . pi * frequencies / fs , kind = kind , fill_value = 0.0 , bounds_error = False , copy = False )
# Estimate frequency at sample points
f_est = f_interp ( np . arange ( length ) )
if amplitudes is None :
a_est = np . ones ( ( length , ) )
else : # build an amplitude interpolator
a_interp = interp1d ( times * fs , amplitudes , kind = kind , fill_value = 0.0 , bounds_error = False , copy = False )
a_est = a_interp ( np . arange ( length ) )
# Sonify the waveform
return a_est * function ( np . cumsum ( f_est ) )
|
def checkPos ( self ) :
"""check all positions"""
|
soup = BeautifulSoup ( self . css1 ( path [ 'movs-table' ] ) . html , 'html.parser' )
poss = [ ]
for label in soup . find_all ( "tr" ) :
pos_id = label [ 'id' ]
# init an empty list
# check if it already exist
pos_list = [ x for x in self . positions if x . id == pos_id ]
if pos_list : # and update it
pos = pos_list [ 0 ]
pos . update ( label )
else :
pos = self . new_pos ( label )
pos . get_gain ( )
poss . append ( pos )
# remove old positions
self . positions . clear ( )
self . positions . extend ( poss )
logger . debug ( "%d positions update" % len ( poss ) )
return self . positions
|
def profiles ( weeks ) :
"""Number of weeks to build .
Starting with the current week ."""
|
profiles = Profiles ( store )
weeks = get_last_weeks ( weeks ) if isinstance ( weeks , int ) else weeks
print ( weeks )
profiles . create ( weeks )
|
def _parse_the_ned_object_results ( self ) :
"""* parse the ned results *
* * Key Arguments : * *
* * Return : * *
- None
. . todo : :
- @ review : when complete , clean _ parse _ the _ ned _ results method
- @ review : when complete add logging"""
|
self . log . info ( 'starting the ``_parse_the_ned_results`` method' )
results = [ ]
headers = [ "objectName" , "objectType" , "raDeg" , "decDeg" , "redshift" , "redshiftFlag" ]
if self . nedResults :
pathToReadFile = self . nedResults
try :
self . log . debug ( "attempting to open the file %s" % ( pathToReadFile , ) )
readFile = codecs . open ( pathToReadFile , encoding = 'utf-8' , mode = 'rb' )
thisData = readFile . read ( )
readFile . close ( )
except IOError , e :
message = 'could not open the file %s' % ( pathToReadFile , )
self . log . critical ( message )
raise IOError ( message )
readFile . close ( )
matchObject = re . search ( r"No\.\|Object Name.*?\n(.*)" , thisData , re . S )
if matchObject : # Print the header for stdout
thisHeader = "| "
for head in headers :
thisHeader += str ( head ) . ljust ( self . resultSpacing , ' ' ) + " | "
if not self . quiet :
print thisHeader
theseLines = string . split ( matchObject . group ( ) , '\n' )
csvReader = csv . DictReader ( theseLines , dialect = 'excel' , delimiter = '|' , quotechar = '"' )
for row in csvReader :
thisDict = { }
thisRow = "| "
thisDict [ "raDeg" ] = row [ "RA(deg)" ] . strip ( )
thisDict [ "decDeg" ] = row [ "DEC(deg)" ] . strip ( )
thisDict [ "redshift" ] = row [ "Redshift" ] . strip ( )
thisDict [ "redshiftFlag" ] = row [ "Redshift Flag" ] . strip ( )
thisDict [ "objectName" ] = row [ "Object Name" ] . strip ( )
thisDict [ "objectType" ] = row [ "Type" ] . strip ( )
results . append ( thisDict )
for head in headers :
thisRow += str ( thisDict [ head ] ) . ljust ( self . resultSpacing , ' ' ) + " | "
if not self . quiet :
print thisRow
else :
for head in headers :
thisRow += str ( "" ) . ljust ( self . resultSpacing , ' ' ) + " | "
if not self . quiet :
print thisRow
else : # Print the header for stdout
thisHeader = "| "
for head in headers :
thisHeader += str ( head ) . ljust ( self . resultSpacing , ' ' ) + " | "
if not self . quiet :
print thisHeader
thisRow = "| "
for head in headers :
thisRow += str ( "" ) . ljust ( self . resultSpacing , ' ' ) + " | "
if not self . quiet :
print thisRow
self . log . info ( 'completed the ``_parse_the_ned_results`` method' )
return results
|
def get_postadres_by_huisnummer ( self , huisnummer ) :
'''Get the ` postadres ` for a : class : ` Huisnummer ` .
: param huisnummer : The : class : ` Huisnummer ` for which the ` postadres ` is wanted . OR A huisnummer id .
: rtype : A : class : ` str ` .'''
|
try :
id = huisnummer . id
except AttributeError :
id = huisnummer
def creator ( ) :
res = crab_gateway_request ( self . client , 'GetPostadresByHuisnummerId' , id )
if res == None :
raise GatewayResourceNotFoundException ( )
return res . Postadres
if self . caches [ 'short' ] . is_configured :
key = 'GetPostadresByHuisnummerId#%s' % ( id )
postadres = self . caches [ 'short' ] . get_or_create ( key , creator )
else :
postadres = creator ( )
return postadres
|
def _compute_e2_factor ( self , imt , vs30 ) :
"""Compute and return e2 factor , equation 19 , page 80."""
|
e2 = np . zeros_like ( vs30 )
if imt . name == "PGV" :
period = 1
elif imt . name == "PGA" :
period = 0
else :
period = imt . period
if period < 0.35 :
return e2
else :
idx = vs30 <= 1000
if period >= 0.35 and period <= 2.0 :
e2 [ idx ] = ( - 0.25 * np . log ( vs30 [ idx ] / 1000 ) * np . log ( period / 0.35 ) )
elif period > 2.0 :
e2 [ idx ] = ( - 0.25 * np . log ( vs30 [ idx ] / 1000 ) * np . log ( 2.0 / 0.35 ) )
return e2
|
def session ( self , master = '' , config = None ) :
"""Takes care of starting any local servers and stopping queues on exit .
In general , the Runner is designed to work with any user provided session ,
but this provides a convenience for properly stopping the queues .
Args :
master : The master session to use .
config : A tf . ConfigProto or None .
Yields :
A session ."""
|
session_manager = SESSION_MANAGER_FACTORY ( )
# Initialization is handled manually at a later point and session _ manager
# is just used for distributed compatibility .
with session_manager . prepare_session ( master , None , config = config , init_fn = lambda _ : None ) as sess :
try :
yield sess
finally :
self . stop_queues ( )
|
def _normalize_stmt_idx ( self , block_addr , stmt_idx ) :
"""For each statement ID , convert ' default ' to ( last _ stmt _ idx + 1)
: param block _ addr : The block address .
: param stmt _ idx : Statement ID .
: returns : New statement ID ."""
|
if type ( stmt_idx ) is int :
return stmt_idx
if stmt_idx == DEFAULT_STATEMENT :
vex_block = self . project . factory . block ( block_addr ) . vex
return len ( vex_block . statements )
raise AngrBackwardSlicingError ( 'Unsupported statement ID "%s"' % stmt_idx )
|
def listen ( self ) :
"""Blocking call on widgets ."""
|
while self . _listen :
key = u''
key = self . term . inkey ( timeout = 0.2 )
try :
if key . code == KEY_ENTER :
self . on_enter ( key = key )
elif key . code in ( KEY_DOWN , KEY_UP ) :
self . on_key_arrow ( key = key )
elif key . code == KEY_ESCAPE or key == chr ( 3 ) :
self . on_exit ( key = key )
elif key != '' :
self . on_key ( key = key )
except KeyboardInterrupt :
self . on_exit ( key = key )
|
def send ( self , to , from_ , body , dm = False ) :
"""Send BODY as an @ message from FROM to TO
If we don ' t have the access tokens for FROM , raise AccountNotFoundError .
If the tweet resulting from ' @ { 0 } { 1 } ' . format ( TO , BODY ) is > 140 chars
raise TweetTooLongError .
If we want to send this message as a DM , do so .
Arguments :
- ` to ` : str
- ` from _ ` : str
- ` body ` : str
- ` dm ` : [ optional ] bool
Return : None
Exceptions : AccountNotFoundError
TweetTooLongError"""
|
tweet = '@{0} {1}' . format ( to , body )
if from_ not in self . accounts :
raise AccountNotFoundError ( )
if len ( tweet ) > 140 :
raise TweetTooLongError ( )
self . auth . set_access_token ( * self . accounts . get ( from_ ) )
api = tweepy . API ( self . auth )
if dm :
api . send_direct_message ( screen_name = to , text = body )
else :
api . update_status ( tweet )
return
|
def ready ( self ) :
"""Configure global XRay recorder based on django settings
under XRAY _ RECORDER namespace .
This method could be called twice during server startup
because of base command and reload command .
So this function must be idempotent"""
|
if not settings . AWS_XRAY_TRACING_NAME :
raise SegmentNameMissingException ( 'Segment name is required.' )
xray_recorder . configure ( daemon_address = settings . AWS_XRAY_DAEMON_ADDRESS , sampling = settings . SAMPLING , sampling_rules = settings . SAMPLING_RULES , context_missing = settings . AWS_XRAY_CONTEXT_MISSING , plugins = settings . PLUGINS , service = settings . AWS_XRAY_TRACING_NAME , dynamic_naming = settings . DYNAMIC_NAMING , streaming_threshold = settings . STREAMING_THRESHOLD , max_trace_back = settings . MAX_TRACE_BACK , stream_sql = settings . STREAM_SQL , )
if settings . PATCH_MODULES :
if settings . AUTO_PATCH_PARENT_SEGMENT_NAME is not None :
with xray_recorder . in_segment ( settings . AUTO_PATCH_PARENT_SEGMENT_NAME ) :
patch ( settings . PATCH_MODULES , ignore_module_patterns = settings . IGNORE_MODULE_PATTERNS )
else :
patch ( settings . PATCH_MODULES , ignore_module_patterns = settings . IGNORE_MODULE_PATTERNS )
# if turned on subsegment will be generated on
# built - in database and template rendering
if settings . AUTO_INSTRUMENT :
try :
patch_db ( )
except Exception :
log . debug ( 'failed to patch Django built-in database' )
try :
patch_template ( )
except Exception :
log . debug ( 'failed to patch Django built-in template engine' )
|
def is_forced_retry ( self , method , status_code ) :
"""Is this method / status code retryable ? ( Based on method / codes whitelists )"""
|
if self . method_whitelist and method . upper ( ) not in self . method_whitelist :
return False
return self . status_forcelist and status_code in self . status_forcelist
|
def _prepare_client ( client_or_address ) :
""": param client _ or _ address : one of :
* None
* verbatim : ' local '
* string address
* a Client instance
: return : a tuple : ( Client instance , shutdown callback function ) .
: raises : ValueError if no valid client input was provided ."""
|
if client_or_address is None or str ( client_or_address ) . lower ( ) == 'local' :
local_cluster = LocalCluster ( diagnostics_port = None )
client = Client ( local_cluster )
def close_client_and_local_cluster ( verbose = False ) :
if verbose :
print ( 'shutting down client and local cluster' )
client . close ( )
local_cluster . close ( )
return client , close_client_and_local_cluster
elif isinstance ( client_or_address , str ) and client_or_address . lower ( ) != 'local' :
client = Client ( client_or_address )
def close_client ( verbose = False ) :
if verbose :
print ( 'shutting down client' )
client . close ( )
return client , close_client
elif isinstance ( client_or_address , Client ) :
def close_dummy ( verbose = False ) :
if verbose :
print ( 'not shutting down client, client was created externally' )
return None
return client_or_address , close_dummy
else :
raise ValueError ( "Invalid client specified {}" . format ( str ( client_or_address ) ) )
|
def AddDischargingBattery ( self , device_name , model_name , percentage , seconds_to_empty ) :
'''Convenience method to add a discharging battery object
You have to specify a device name which must be a valid part of an object
path , e . g . " mock _ ac " , an arbitrary model name , the charge percentage , and
the seconds until the battery is empty .
Please note that this does not set any global properties such as
" on - battery " .
Returns the new object path .'''
|
path = '/org/freedesktop/UPower/devices/' + device_name
self . AddObject ( path , DEVICE_IFACE , { 'PowerSupply' : dbus . Boolean ( True , variant_level = 1 ) , 'IsPresent' : dbus . Boolean ( True , variant_level = 1 ) , 'Model' : dbus . String ( model_name , variant_level = 1 ) , 'Percentage' : dbus . Double ( percentage , variant_level = 1 ) , 'TimeToEmpty' : dbus . Int64 ( seconds_to_empty , variant_level = 1 ) , 'EnergyFull' : dbus . Double ( 100.0 , variant_level = 1 ) , 'Energy' : dbus . Double ( percentage , variant_level = 1 ) , # UP _ DEVICE _ STATE _ DISCHARGING
'State' : dbus . UInt32 ( 2 , variant_level = 1 ) , # UP _ DEVICE _ KIND _ BATTERY
'Type' : dbus . UInt32 ( 2 , variant_level = 1 ) , } , [ ] )
self . EmitSignal ( MAIN_IFACE , 'DeviceAdded' , self . device_sig_type , [ path ] )
return path
|
def load ( self , filename ) :
"""load data from a saved . lcopt file"""
|
if filename [ - 6 : ] != ".lcopt" :
filename += ".lcopt"
try :
savedInstance = pickle . load ( open ( "{}" . format ( filename ) , "rb" ) )
except FileNotFoundError :
savedInstance = pickle . load ( open ( fix_mac_path_escapes ( os . path . join ( storage . model_dir , "{}" . format ( filename ) ) ) , "rb" ) )
attributes = [ 'name' , 'database' , 'params' , 'production_params' , 'allocation_params' , 'ext_params' , 'matrix' , 'names' , 'parameter_sets' , 'model_matrices' , 'technosphere_matrices' , 'leontif_matrices' , 'external_databases' , 'parameter_map' , 'sandbox_positions' , 'ecoinventName' , 'biosphereName' , 'forwastName' , 'analysis_settings' , 'technosphere_databases' , 'biosphere_databases' , 'result_set' , 'evaluated_parameter_sets' , 'useForwast' , 'base_project_name' , 'save_option' , 'allow_allocation' , 'ecoinvent_version' , 'ecoinvent_system_model' , ]
for attr in attributes :
if hasattr ( savedInstance , attr ) :
setattr ( self , attr , getattr ( savedInstance , attr ) )
else :
pass
# print ( " can ' t set { } " . format ( attr ) )
# use legacy save option if this is missing from the model
if not hasattr ( savedInstance , 'save_option' ) :
setattr ( self , 'save_option' , LEGACY_SAVE_OPTION )
# figure out ecoinvent version and system model if these are missing from the model
if not hasattr ( savedInstance , 'ecoinvent_version' ) or not hasattr ( savedInstance , 'ecoinvent_system_model' ) :
parts = savedInstance . ecoinventName . split ( "_" )
main_version = parts [ 0 ] [ - 1 ]
sub_version = parts [ 1 ]
system_model = parts [ 2 ]
# print ( parts )
setattr ( self , 'ecoinvent_version' , '{}.{}' . format ( main_version , sub_version ) )
setattr ( self , 'ecoinvent_system_model' , system_model )
|
def source_uris ( self ) :
"""The fully - qualified URIs that point to your data in Google Cloud Storage .
Each URI can contain one ' * ' wildcard character and it must come after the ' bucket ' name ."""
|
return [ x . path for x in luigi . task . flatten ( self . input ( ) ) ]
|
def index_all_layers ( self ) :
"""Index all layers in search engine ."""
|
from hypermap . aggregator . models import Layer
if not settings . REGISTRY_SKIP_CELERY :
layers_cache = set ( Layer . objects . filter ( is_valid = True ) . values_list ( 'id' , flat = True ) )
deleted_layers_cache = set ( Layer . objects . filter ( is_valid = False ) . values_list ( 'id' , flat = True ) )
cache . set ( 'layers' , layers_cache )
cache . set ( 'deleted_layers' , deleted_layers_cache )
else :
for layer in Layer . objects . all ( ) :
index_layer ( layer . id )
|
def queryWorkitems ( self , query_str , projectarea_id = None , projectarea_name = None , returned_properties = None , archived = False ) :
"""Query workitems with the query string in a certain
: class : ` rtcclient . project _ area . ProjectArea `
At least either of ` projectarea _ id ` and ` projectarea _ name ` is given
: param query _ str : a valid query string
: param projectarea _ id : the : class : ` rtcclient . project _ area . ProjectArea `
id
: param projectarea _ name : the
: class : ` rtcclient . project _ area . ProjectArea ` name
: param returned _ properties : the returned properties that you want .
Refer to : class : ` rtcclient . client . RTCClient ` for more explanations
: param archived : ( default is False ) whether the
: class : ` rtcclient . workitem . Workitem ` is archived
: return : a : class : ` list ` that contains the queried
: class : ` rtcclient . workitem . Workitem ` objects
: rtype : list"""
|
pa_id = ( self . rtc_obj . _pre_get_resource ( projectarea_id = projectarea_id , projectarea_name = projectarea_name ) )
self . log . info ( "Start to query workitems with query string: %s" , query_str )
query_str = urlquote ( query_str )
rp = returned_properties
return ( self . rtc_obj . _get_paged_resources ( "Query" , projectarea_id = pa_id , customized_attr = query_str , page_size = "100" , returned_properties = rp , archived = archived ) )
|
def attributes ( self , ** kwargs ) : # pragma : no cover
"""Retrieve the attribute configuration object .
Retrieves a mapping that identifies the custom directory
attributes configured for the Directory SyncService instance ,
and the mapping of the custom attributes to standard directory
attributes .
Args :
* * kwargs : Supported : meth : ` ~ pancloud . httpclient . HTTPClient . request ` parameters .
Returns :
requests . Response : Requests Response ( ) object .
Examples :
Refer to ` ` directory _ attributes . py ` ` example ."""
|
path = "/directory-sync-service/v1/attributes"
r = self . _httpclient . request ( method = "GET" , path = path , url = self . url , ** kwargs )
return r
|
def __fire_callback ( self , type_ , * args , ** kwargs ) :
"""Returns True if at least one callback was called"""
|
called = False
plain_submit = self . __threadpool . submit
with self . __callbacks :
submit = self . __crud_threadpool . submit if type_ in _CB_CRUD_TYPES else plain_submit
for func , serialised_if_crud in self . __callbacks [ type_ ] :
called = True
# allow CRUD callbacks to not be serialised if requested
( submit if serialised_if_crud else plain_submit ) ( func , * args , ** kwargs )
return called
|
def create_variable ( descriptor ) :
"""Creates a variable from a dictionary descriptor"""
|
if descriptor [ 'type' ] == 'continuous' :
return ContinuousVariable ( descriptor [ 'name' ] , descriptor [ 'domain' ] , descriptor . get ( 'dimensionality' , 1 ) )
elif descriptor [ 'type' ] == 'bandit' :
return BanditVariable ( descriptor [ 'name' ] , descriptor [ 'domain' ] , descriptor . get ( 'dimensionality' , None ) )
# bandits variables cannot be repeated
elif descriptor [ 'type' ] == 'discrete' :
return DiscreteVariable ( descriptor [ 'name' ] , descriptor [ 'domain' ] , descriptor . get ( 'dimensionality' , 1 ) )
elif descriptor [ 'type' ] == 'categorical' :
return CategoricalVariable ( descriptor [ 'name' ] , descriptor [ 'domain' ] , descriptor . get ( 'dimensionality' , 1 ) )
else :
raise InvalidConfigError ( 'Unknown variable type ' + descriptor [ 'type' ] )
|
def get_project ( self , projectname ) :
"""Get the project details from Slurm ."""
|
cmd = [ "list" , "accounts" , "where" , "name=%s" % projectname ]
results = self . _read_output ( cmd )
if len ( results ) == 0 :
return None
elif len ( results ) > 1 :
logger . error ( "Command returned multiple results for '%s'." % projectname )
raise RuntimeError ( "Command returned multiple results for '%s'." % projectname )
the_result = results [ 0 ]
the_project = the_result [ "Account" ]
if projectname . lower ( ) != the_project . lower ( ) :
logger . error ( "We expected projectname '%s' " "but got projectname '%s'." % ( projectname , the_project ) )
raise RuntimeError ( "We expected projectname '%s' " "but got projectname '%s'." % ( projectname , the_project ) )
return the_result
|
def load_pdb ( pdb , path = True , pdb_id = '' , ignore_end = False ) :
"""Converts a PDB file into an AMPAL object .
Parameters
pdb : str
Either a path to a PDB file or a string containing PDB
format structural data .
path : bool , optional
If ` true ` , flags ` pdb ` as a path and not a PDB string .
pdb _ id : str , optional
Identifier for the ` Assembly ` .
ignore _ end : bool , optional
If ` false ` , parsing of the file will stop when an " END "
record is encountered .
Returns
ampal : ampal . Assembly or ampal . AmpalContainer
AMPAL object that contains the structural information from
the PDB file provided . If the PDB file has a single state
then an ` Assembly ` will be returned , otherwise an
` AmpalContainer ` will be returned ."""
|
pdb_p = PdbParser ( pdb , path = path , pdb_id = pdb_id , ignore_end = ignore_end )
return pdb_p . make_ampal ( )
|
def _pick_lead_item ( items ) :
"""Choose lead item for a set of samples .
Picks tumors for tumor / normal pairs and first sample for batch groups ."""
|
paired = vcfutils . get_paired ( items )
if paired :
return paired . tumor_data
else :
return list ( items ) [ 0 ]
|
def _save_or_delete_workflow ( self ) :
"""Calls the real save method if we pass the beggining of the wf"""
|
if not self . current . task_type . startswith ( 'Start' ) :
if self . current . task_name . startswith ( 'End' ) and not self . are_we_in_subprocess ( ) :
self . wf_state [ 'finished' ] = True
self . wf_state [ 'finish_date' ] = datetime . now ( ) . strftime ( settings . DATETIME_DEFAULT_FORMAT )
if self . current . workflow_name not in settings . EPHEMERAL_WORKFLOWS and not self . wf_state [ 'in_external' ] :
wfi = WFCache ( self . current ) . get_instance ( )
TaskInvitation . objects . filter ( instance = wfi , role = self . current . role , wf_name = wfi . wf . name ) . delete ( )
self . current . log . info ( "Delete WFCache: %s %s" % ( self . current . workflow_name , self . current . token ) )
self . save_workflow_to_cache ( self . serialize_workflow ( ) )
|
async def update ( self , db = None , data = None ) :
'''Update the entire document by replacing its content with new data , retaining its primary key'''
|
db = db or self . db
if data : # update model explicitely with a new data structure
# merge the current model ' s data with the new data
self . import_data ( data )
# prepare data for database update
data = self . prepare_data ( )
# data = { x : ndata [ x ] for x in ndata if x in data or x = = self . primary _ key }
else :
data = self . export_data ( native = True )
if self . primary_key not in data or data [ self . primary_key ] is None :
raise Exception ( 'Missing object primary key' )
query = { self . primary_key : self . pk }
for i in self . connection_retries ( ) :
try :
result = await db [ self . get_collection_name ( ) ] . find_one_and_replace ( filter = query , replacement = data , return_document = ReturnDocument . AFTER )
if result :
updated_obj = self . create_model ( result )
updated_obj . _db = db
# emit post save
asyncio . ensure_future ( post_save . send ( sender = self . __class__ , db = db , instance = updated_obj , created = False ) )
return updated_obj
return None
except ConnectionFailure as ex :
exceed = await self . check_reconnect_tries_and_wait ( i , 'update' )
if exceed :
raise ex
|
def channels_history ( self , room_id , ** kwargs ) :
"""Retrieves the messages from a channel ."""
|
return self . __call_api_get ( 'channels.history' , roomId = room_id , kwargs = kwargs )
|
def parse_model ( self , model ) :
"""Split the given model _ name into controller and model parts .
If the controller part is empty , the current controller will be used .
If the model part is empty , the current model will be used for
the controller .
The returned model name will always be qualified with a username .
: param model str : The model name to parse .
: return ( str , str ) : The controller and model names ."""
|
# TODO if model is empty , use $ JUJU _ MODEL environment variable .
if model and ':' in model : # explicit controller given
controller_name , model_name = model . split ( ':' )
else : # use the current controller if one isn ' t explicitly given
controller_name = self . current_controller ( )
model_name = model
if not controller_name :
controller_name = self . current_controller ( )
if not model_name :
model_name = self . current_model ( controller_name , model_only = True )
if not model_name :
raise NoModelException ( 'no current model' )
if '/' not in model_name : # model name doesn ' t include a user prefix , so add one
# by using the current user for the controller .
accounts = self . accounts ( ) . get ( controller_name )
if accounts is None :
raise JujuError ( 'No account found for controller {} ' . format ( controller_name ) )
username = accounts . get ( 'user' )
if username is None :
raise JujuError ( 'No username found for controller {}' . format ( controller_name ) )
model_name = username + "/" + model_name
return controller_name , model_name
|
def next_fat ( self , current ) :
"""Helper gives you seekable position of next FAT sector . Should not be
called from external code ."""
|
sector_size = self . header . sector_size // 4
block = current // sector_size
difat_position = 76
if block >= 109 :
block -= 109
sector = self . header . difat_sector_start
while block >= sector_size :
position = ( sector + 1 ) << self . header . sector_shift
position += self . header . sector_size - 4
sector = self . get_long ( position )
block -= sector_size - 1
difat_position = ( sector + 1 ) << self . header . sector_shift
fat_sector = self . get_long ( difat_position + block * 4 )
fat_position = ( fat_sector + 1 ) << self . header . sector_shift
fat_position += ( current % sector_size ) * 4
return self . get_long ( fat_position )
|
def call ( cmd , stdout = PIPE , stderr = PIPE , on_error = 'raise' , ** kwargs ) :
"""Call out to the shell using ` subprocess . Popen `
Parameters
stdout : ` file - like ` , optional
stream for stdout
stderr : ` file - like ` , optional
stderr for stderr
on _ error : ` str ` , optional
what to do when the command fails , one of
- ' ignore ' - do nothing
- ' warn ' - print a warning
- ' raise ' - raise an exception
* * kwargs
other keyword arguments to pass to ` subprocess . Popen `
Returns
out : ` str `
the output stream of the command
err : ` str `
the error stream from the command
Raises
OSError
if ` cmd ` is a ` str ` ( or ` shell = True ` is passed ) and the executable
is not found
subprocess . CalledProcessError
if the command fails otherwise"""
|
if isinstance ( cmd , ( list , tuple ) ) :
cmdstr = ' ' . join ( cmd )
kwargs . setdefault ( 'shell' , False )
else :
cmdstr = str ( cmd )
kwargs . setdefault ( 'shell' , True )
proc = Popen ( cmd , stdout = stdout , stderr = stderr , ** kwargs )
out , err = proc . communicate ( )
if proc . returncode :
if on_error == 'ignore' :
pass
elif on_error == 'warn' :
e = CalledProcessError ( proc . returncode , cmdstr )
warnings . warn ( str ( e ) )
else :
raise CalledProcessError ( proc . returncode , cmdstr )
return out . decode ( 'utf-8' ) , err . decode ( 'utf-8' )
|
def rows ( self ) -> List [ List [ str ] ] :
"""Returns the table rows ."""
|
return [ list ( d . values ( ) ) for d in self . data ]
|
def get_status ( self , instance ) :
"""Retrives a status of a field from cache . Fields in state ' error ' and
' complete ' will not retain the status after the call ."""
|
status_key , status = self . _get_status ( instance )
if status [ 'state' ] in [ 'complete' , 'error' ] :
cache . delete ( status_key )
return status
|
def _gettype ( self ) :
'''Return current type of this struct
: returns : a typedef object ( e . g . nstruct )'''
|
current = self
lastname = getattr ( current . _parser , 'typedef' , None )
while hasattr ( current , '_sub' ) :
current = current . _sub
tn = getattr ( current . _parser , 'typedef' , None )
if tn is not None :
lastname = tn
return lastname
|
def extend_regex2 ( regexpr , reflags = 0 ) :
"""also preprocesses flags"""
|
regexpr = extend_regex ( regexpr )
IGNORE_CASE_PREF = '\\c'
if regexpr . startswith ( IGNORE_CASE_PREF ) : # hack for vim - like ignore case
regexpr = regexpr [ len ( IGNORE_CASE_PREF ) : ]
reflags = reflags | re . IGNORECASE
return regexpr , reflags
|
def get_numpy_array ( self ) :
"""Dump this color into NumPy array ."""
|
# This holds the obect ' s spectral data , and will be passed to
# numpy . array ( ) to create a numpy array ( matrix ) for the matrix math
# that will be done during the conversion to XYZ .
values = [ ]
# Use the required value list to build this dynamically . Default to
# 0.0 , since that ultimately won ' t affect the outcome due to the math
# involved .
for val in self . VALUES :
values . append ( getattr ( self , val , 0.0 ) )
# Create and the actual numpy array / matrix from the spectral list .
color_array = numpy . array ( [ values ] )
return color_array
|
def detect_missing_relations ( self , obj , exc ) :
"""Parse error messages and collect the missing - relationship errors
as a dict of Resource - > { id set }"""
|
missing = defaultdict ( set )
for name , err in exc . error_dict . items ( ) : # check if it was a relationship that doesnt exist locally
pattern = r".+ with id (\d+) does not exist.+"
m = re . match ( pattern , str ( err ) )
if m :
field = obj . _meta . get_field ( name )
res = self . get_resource ( field . related_model )
missing [ res ] . add ( int ( m . group ( 1 ) ) )
return missing
|
def local_fehdist ( feh ) :
"""feh PDF based on local SDSS distribution
From Jo Bovy :
https : / / github . com / jobovy / apogee / blob / master / apogee / util / _ _ init _ _ . py # L3
2D gaussian fit based on Casagrande ( 2011)"""
|
fehdist = 0.8 / 0.15 * np . exp ( - 0.5 * ( feh - 0.016 ) ** 2. / 0.15 ** 2. ) + 0.2 / 0.22 * np . exp ( - 0.5 * ( feh + 0.15 ) ** 2. / 0.22 ** 2. )
return fehdist
|
def get_data ( self , path , ** params ) :
"""Giving a service path and optional specific arguments , returns
the XML data from the API parsed as a dict structure ."""
|
xml = self . get_response ( path , ** params )
try :
return parse ( xml )
except Exception as err :
print ( path )
print ( params )
print ( err )
raise
|
def on_edited_dataframe_sync ( cell_renderer , iter , new_value , column , df_py_dtypes , list_store , df_data ) :
'''Handle the ` ' edited ' ` signal from a ` gtk . CellRenderer ` to :
* Update the corresponding entry in the list store .
* Update the corresponding entry in the provided data frame instance .
The callback can be connected to the cell renderer as follows :
cell _ renderer . connect ( ' edited ' , on _ edited _ dataframe _ sync , column ,
list _ store , df _ py _ dtypes , df _ data )
where ` column ` is the ` gtk . TreeViewColumn ` the cell renderer belongs to ,
and ` df _ py _ dtypes ` and ` list _ store ` are the return values from calling
` get _ list _ store ` on the ` df _ data ` data frame .
Args :
cell _ renderer ( gtk . CellRenderer )
iter ( str ) : Gtk TreeView iterator
new _ value ( str ) : New value resulting from edit operation .
column ( gtk . TreeViewColumn ) : Column containing edited cell .
df _ py _ dtypes ( pandas . DataFrame ) : Data frame containing type
information for columns in tree view ( and ` list _ store ` ) .
list _ store ( gtk . ListStore ) : Model containing data bound to tree view .
df _ data ( pandas . DataFrame ) : Data frame containing data in ` list _ store ` .
Returns :
None'''
|
# Extract name of column ( name of TreeView column must match data frame
# column name ) .
column_name = column . get_name ( )
# Look up the list store column index and data type for column .
i , dtype = df_py_dtypes . ix [ column_name ]
# Update the list store with the new value .
if dtype == float :
value = si_parse ( new_value )
elif dtype == bool :
value = not list_store [ iter ] [ i ]
if value == list_store [ iter ] [ i ] : # Value has not changed .
return False
list_store [ iter ] [ i ] = value
# Update the data frame with the new value .
df_data [ column_name ] . values [ int ( iter ) ] = value
return True
|
def _makeTextWidgets ( self ) :
"""Makes a text widget ."""
|
self . textWidget = urwid . Text ( self . text )
return [ self . textWidget ]
|
def message_text ( self , m_data ) :
'''Raises ValueError if a value doesn ' t work out , and TypeError if
this isn ' t a message type'''
|
if m_data . get ( 'type' ) != 'message' :
raise TypeError ( 'This is not a message' )
# Edited messages have text in message
_text = m_data . get ( 'text' , None ) or m_data . get ( 'message' , { } ) . get ( 'text' , None )
try :
log . info ( 'Message is %s' , _text )
# this can violate the ascii codec
except UnicodeEncodeError as uee :
log . warning ( 'Got a message that I could not log. The reason is: %s' , uee )
# Convert UTF to string
_text = salt . utils . json . dumps ( _text )
_text = salt . utils . yaml . safe_load ( _text )
if not _text :
raise ValueError ( '_text has no value' )
return _text
|
def generate_patches ( self ) :
"""Generates a list of patches for each file underneath
self . root _ directory
that satisfy the given conditions given
query conditions , where patches for
each file are suggested by self . suggestor ."""
|
start_pos = self . start_position or Position ( None , None )
end_pos = self . end_position or Position ( None , None )
path_list = Query . _walk_directory ( self . root_directory )
path_list = Query . _sublist ( path_list , start_pos . path , end_pos . path )
path_list = ( path for path in path_list if Query . _path_looks_like_code ( path ) and ( self . path_filter ( path ) ) or ( self . inc_extensionless and helpers . is_extensionless ( path ) ) )
for path in path_list :
try :
lines = list ( open ( path ) )
except ( IOError , UnicodeDecodeError ) : # If we can ' t open the file - - perhaps it ' s a symlink whose
# destination no loner exists - - then short - circuit .
continue
for patch in self . suggestor ( lines ) :
if path == start_pos . path :
if patch . start_line_number < start_pos . line_number :
continue
# suggestion is pre - start _ pos
if path == end_pos . path :
if patch . end_line_number >= end_pos . line_number :
break
# suggestion is post - end _ pos
old_lines = lines [ patch . start_line_number : patch . end_line_number ]
if patch . new_lines is None or patch . new_lines != old_lines :
patch . path = path
yield patch
# re - open file , in case contents changed
lines [ : ] = list ( open ( path ) )
|
def get_disk_usage ( self , path = None ) :
"""Return the total , used and free disk space in bytes as named tuple ,
or placeholder values simulating unlimited space if not set .
. . note : : This matches the return value of shutil . disk _ usage ( ) .
Args :
path : The disk space is returned for the file system device where
` path ` resides .
Defaults to the root path ( e . g . ' / ' on Unix systems ) ."""
|
DiskUsage = namedtuple ( 'usage' , 'total, used, free' )
if path is None :
mount_point = self . mount_points [ self . root . name ]
else :
mount_point = self . _mount_point_for_path ( path )
if mount_point and mount_point [ 'total_size' ] is not None :
return DiskUsage ( mount_point [ 'total_size' ] , mount_point [ 'used_size' ] , mount_point [ 'total_size' ] - mount_point [ 'used_size' ] )
return DiskUsage ( 1024 * 1024 * 1024 * 1024 , 0 , 1024 * 1024 * 1024 * 1024 )
|
def false_negatives ( links_true , links_pred ) :
"""Count the number of False Negatives .
Returns the number of incorrect predictions of true links . ( true links ,
but predicted as non - links ) . This value is known as the number of False
Negatives ( FN ) .
Parameters
links _ true : pandas . MultiIndex , pandas . DataFrame , pandas . Series
The true ( or actual ) links .
links _ pred : pandas . MultiIndex , pandas . DataFrame , pandas . Series
The predicted links .
Returns
int
The number of false negatives ."""
|
links_true = _get_multiindex ( links_true )
links_pred = _get_multiindex ( links_pred )
return len ( links_true . difference ( links_pred ) )
|
def force_bytes ( bytes_or_unicode , encoding = 'utf-8' , errors = 'backslashreplace' ) :
'Convert passed string type to bytes , if necessary .'
|
if isinstance ( bytes_or_unicode , bytes ) :
return bytes_or_unicode
return bytes_or_unicode . encode ( encoding , errors )
|
def det_curve ( self , cost_miss = 100 , cost_fa = 1 , prior_target = 0.01 , return_latency = False ) :
"""DET curve
Parameters
cost _ miss : float , optional
Cost of missed detections . Defaults to 100.
cost _ fa : float , optional
Cost of false alarms . Defaults to 1.
prior _ target : float , optional
Target trial prior . Defaults to 0.5.
return _ latency : bool , optional
Set to True to return latency .
Has no effect when latencies are given at initialization time .
Returns
thresholds : numpy array
Detection thresholds
fpr : numpy array
False alarm rate
fnr : numpy array
False rejection rate
eer : float
Equal error rate
cdet : numpy array
Cdet cost function
speaker _ latency : numpy array
absolute _ latency : numpy array
Speaker and absolute latency when return _ latency is set to True ."""
|
if self . latencies is None :
y_true = np . array ( [ trial [ 'target' ] for _ , trial in self ] )
scores = np . array ( [ trial [ 'score' ] for _ , trial in self ] )
fpr , fnr , thresholds , eer = det_curve ( y_true , scores , distances = False )
fpr , fnr , thresholds = fpr [ : : - 1 ] , fnr [ : : - 1 ] , thresholds [ : : - 1 ]
cdet = cost_miss * fnr * prior_target + cost_fa * fpr * ( 1. - prior_target )
if return_latency : # needed to align the thresholds used in the DET curve
# with ( self . ) thresholds used to compute latencies .
indices = np . searchsorted ( thresholds , self . thresholds , side = 'left' )
thresholds = np . take ( thresholds , indices , mode = 'clip' )
fpr = np . take ( fpr , indices , mode = 'clip' )
fnr = np . take ( fnr , indices , mode = 'clip' )
cdet = np . take ( cdet , indices , mode = 'clip' )
return thresholds , fpr , fnr , eer , cdet , self . speaker_latency , self . absolute_latency
else :
return thresholds , fpr , fnr , eer , cdet
else :
y_true = np . array ( [ trial [ 'target' ] for _ , trial in self ] )
spk_scores = np . array ( [ trial [ 'spk_score' ] for _ , trial in self ] )
abs_scores = np . array ( [ trial [ 'abs_score' ] for _ , trial in self ] )
result = { }
for key , scores in { 'speaker' : spk_scores , 'absolute' : abs_scores } . items ( ) :
result [ key ] = { }
for i , latency in enumerate ( self . latencies ) :
fpr , fnr , theta , eer = det_curve ( y_true , scores [ : , i ] , distances = False )
fpr , fnr , theta = fpr [ : : - 1 ] , fnr [ : : - 1 ] , theta [ : : - 1 ]
cdet = cost_miss * fnr * prior_target + cost_fa * fpr * ( 1. - prior_target )
result [ key ] [ latency ] = theta , fpr , fnr , eer , cdet
return result
|
def return_future ( fn ) :
"""Decorator that turns a synchronous function into one returning a future .
This should only be applied to non - blocking functions . Will do set _ result ( )
with the return value , or set _ exc _ info ( ) if an exception is raised ."""
|
@ wraps ( fn )
def decorated ( * args , ** kwargs ) :
return gen . maybe_future ( fn ( * args , ** kwargs ) )
return decorated
|
def __get_state_by_id ( cls , job_id ) :
"""Get job state by id .
Args :
job _ id : job id .
Returns :
model . MapreduceState for the job .
Raises :
ValueError : if the job state is missing ."""
|
state = model . MapreduceState . get_by_job_id ( job_id )
if state is None :
raise ValueError ( "Job state for job %s is missing." % job_id )
return state
|
def require_condition ( cls , expr , message , * format_args , ** format_kwds ) :
"""used to assert a certain state . If the expression renders a false
value , an exception will be raised with the supplied message
: param : message : The failure message to attach to the raised Buzz
: param : expr : A boolean value indicating an evaluated expression
: param : format _ args : Format arguments . Follows str . format convention
: param : format _ kwds : Format keyword args . Follows str . format convetion"""
|
if not expr :
raise cls ( message , * format_args , ** format_kwds )
|
def get_next_action ( self , request , application , roles ) :
"""Retrieve the next state ."""
|
# Check for serious errors in submission .
# Should only happen in rare circumstances .
errors = application . check_valid ( )
if len ( errors ) > 0 :
for error in errors :
messages . error ( request , error )
return 'error'
# approve application
approved_by = request . user
created_person , created_account = application . approve ( approved_by )
# send email
application . extend ( )
link , is_secret = base . get_email_link ( application )
emails . send_approved_email ( application , created_person , created_account , link , is_secret )
if created_person or created_account :
return 'password_needed'
else :
return 'password_ok'
|
def removeProdable ( self , prodable : Prodable = None , name : str = None ) -> Optional [ Prodable ] :
"""Remove the specified Prodable object from this Looper ' s list of Prodables
: param prodable : the Prodable to remove"""
|
if prodable :
self . prodables . remove ( prodable )
return prodable
elif name :
for p in self . prodables :
if hasattr ( p , "name" ) and getattr ( p , "name" ) == name :
prodable = p
break
if prodable :
self . prodables . remove ( prodable )
return prodable
else :
logger . warning ( "Trying to remove a prodable {} which is not present" . format ( prodable ) )
else :
logger . error ( "Provide a prodable object or a prodable name" )
|
def where ( self , other , cond , align = True , errors = 'raise' , try_cast = False , axis = 0 , transpose = False ) :
"""evaluate the block ; return result block ( s ) from the result
Parameters
other : a ndarray / object
cond : the condition to respect
align : boolean , perform alignment on other / cond
errors : str , { ' raise ' , ' ignore ' } , default ' raise '
- ` ` raise ` ` : allow exceptions to be raised
- ` ` ignore ` ` : suppress exceptions . On error return original object
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
a new block ( s ) , the result of the func"""
|
import pandas . core . computation . expressions as expressions
assert errors in [ 'raise' , 'ignore' ]
values = self . values
orig_other = other
if transpose :
values = values . T
other = getattr ( other , '_values' , getattr ( other , 'values' , other ) )
cond = getattr ( cond , 'values' , cond )
# If the default broadcasting would go in the wrong direction , then
# explicitly reshape other instead
if getattr ( other , 'ndim' , 0 ) >= 1 :
if values . ndim - 1 == other . ndim and axis == 1 :
other = other . reshape ( tuple ( other . shape + ( 1 , ) ) )
elif transpose and values . ndim == self . ndim - 1 :
cond = cond . T
if not hasattr ( cond , 'shape' ) :
raise ValueError ( "where must have a condition that is ndarray " "like" )
# our where function
def func ( cond , values , other ) :
if cond . ravel ( ) . all ( ) :
return values
values , other = self . _try_coerce_args ( values , other )
try :
return self . _try_coerce_result ( expressions . where ( cond , values , other ) )
except Exception as detail :
if errors == 'raise' :
raise TypeError ( 'Could not operate [{other!r}] with block values ' '[{detail!s}]' . format ( other = other , detail = detail ) )
else : # return the values
result = np . empty ( values . shape , dtype = 'float64' )
result . fill ( np . nan )
return result
# see if we can operate on the entire block , or need item - by - item
# or if we are a single block ( ndim = = 1)
try :
result = func ( cond , values , other )
except TypeError : # we cannot coerce , return a compat dtype
# we are explicitly ignoring errors
block = self . coerce_to_target_dtype ( other )
blocks = block . where ( orig_other , cond , align = align , errors = errors , try_cast = try_cast , axis = axis , transpose = transpose )
return self . _maybe_downcast ( blocks , 'infer' )
if self . _can_hold_na or self . ndim == 1 :
if transpose :
result = result . T
# try to cast if requested
if try_cast :
result = self . _try_cast_result ( result )
return self . make_block ( result )
# might need to separate out blocks
axis = cond . ndim - 1
cond = cond . swapaxes ( axis , 0 )
mask = np . array ( [ cond [ i ] . all ( ) for i in range ( cond . shape [ 0 ] ) ] , dtype = bool )
result_blocks = [ ]
for m in [ mask , ~ mask ] :
if m . any ( ) :
r = self . _try_cast_result ( result . take ( m . nonzero ( ) [ 0 ] , axis = axis ) )
result_blocks . append ( self . make_block ( r . T , placement = self . mgr_locs [ m ] ) )
return result_blocks
|
def _server_connect ( self , s ) :
"""Sets up a TCP connection to the server ."""
|
self . _socket = socket . socket ( socket . AF_INET , socket . SOCK_STREAM )
self . _socket . setblocking ( 0 )
self . _socket . settimeout ( 1.0 )
if self . options [ "tcp_nodelay" ] :
self . _socket . setsockopt ( socket . IPPROTO_TCP , socket . TCP_NODELAY , 1 )
self . io = tornado . iostream . IOStream ( self . _socket , max_buffer_size = self . _max_read_buffer_size , max_write_buffer_size = self . _max_write_buffer_size , read_chunk_size = self . _read_chunk_size )
# Connect to server with a deadline
future = self . io . connect ( ( s . uri . hostname , s . uri . port ) )
yield tornado . gen . with_timeout ( timedelta ( seconds = self . options [ "connect_timeout" ] ) , future )
# Called whenever disconnected from the server .
self . io . set_close_callback ( self . _process_op_err )
|
def from_env ( parser_modules : t . Optional [ t . Union [ t . List [ str ] , t . Tuple [ str ] ] ] = DEFAULT_PARSER_MODULES , env : t . Optional [ t . Dict [ str , str ] ] = None , silent : bool = False , suppress_logs : bool = False , extra : t . Optional [ dict ] = None ) -> 'ConfigLoader' :
"""Creates an instance of : class : ` ~ django _ docker _ helpers . config . ConfigLoader `
with parsers initialized from environment variables .
By default it tries to initialize all bundled parsers .
Parsers may be customized with ` ` parser _ modules ` ` argument or ` ` CONFIG _ _ PARSERS ` ` environment variable .
Environment variable has a priority over the method argument .
: param parser _ modules : a list of dot - separated module paths
: param env : a dict with environment variables , default is ` ` os . environ ` `
: param silent : passed to : class : ` ~ django _ docker _ helpers . config . ConfigLoader `
: param suppress _ logs : passed to : class : ` ~ django _ docker _ helpers . config . ConfigLoader `
: param extra : pass extra arguments to * every * parser
: return : an instance of : class : ` ~ django _ docker _ helpers . config . ConfigLoader `
Example :
env = {
' CONFIG _ _ PARSERS ' : ' EnvironmentParser , RedisParser , YamlParser ' ,
' ENVIRONMENTPARSER _ _ SCOPE ' : ' nested ' ,
' YAMLPARSER _ _ CONFIG ' : ' . / tests / data / config . yml ' ,
' REDISPARSER _ _ HOST ' : ' wtf . test ' ,
' NESTED _ _ VARIABLE ' : ' i _ am _ here ' ,
loader = ConfigLoader . from _ env ( env = env )
assert [ type ( p ) for p in loader . parsers ] = = [ EnvironmentParser , RedisParser , YamlParser ]
assert loader . get ( ' variable ' ) = = ' i _ am _ here ' , ' Ensure env copied from ConfigLoader '
loader = ConfigLoader . from _ env ( parser _ modules = [ ' EnvironmentParser ' ] , env = { } )"""
|
env = env or os . environ
extra = extra or { }
environment_parser = EnvironmentParser ( scope = 'config' , env = env )
silent = environment_parser . get ( 'silent' , silent , coerce_type = bool )
suppress_logs = environment_parser . get ( 'suppress_logs' , suppress_logs , coerce_type = bool )
env_parsers = environment_parser . get ( 'parsers' , None , coercer = comma_str_to_list )
if not env_parsers and not parser_modules :
raise ValueError ( 'Must specify `CONFIG__PARSERS` env var or `parser_modules`' )
if env_parsers :
parser_classes = ConfigLoader . import_parsers ( env_parsers )
else :
parser_classes = ConfigLoader . import_parsers ( parser_modules )
parsers = [ ]
for parser_class in parser_classes :
parser_options = ConfigLoader . load_parser_options_from_env ( parser_class , env = env )
_init_args = inspect . getfullargspec ( parser_class . __init__ ) . args
# add extra args if parser ' s _ _ init _ _ can take it it
if 'env' in _init_args :
parser_options [ 'env' ] = env
for k , v in extra . items ( ) :
if k in _init_args :
parser_options [ k ] = v
parser_instance = parser_class ( ** parser_options )
parsers . append ( parser_instance )
return ConfigLoader ( parsers = parsers , silent = silent , suppress_logs = suppress_logs )
|
def outlook ( self , qs ) :
"""CSV format suitable for importing into outlook"""
|
csvf = writer ( sys . stdout )
columns = [ 'Name' , 'E-mail Address' , 'Notes' , 'E-mail 2 Address' , 'E-mail 3 Address' , 'Mobile Phone' , 'Pager' , 'Company' , 'Job Title' , 'Home Phone' , 'Home Phone 2' , 'Home Fax' , 'Home Address' , 'Business Phone' , 'Business Phone 2' , 'Business Fax' , 'Business Address' , 'Other Phone' , 'Other Fax' , 'Other Address' ]
csvf . writerow ( columns )
empty = [ '' ] * ( len ( columns ) - 2 )
for ent in qs :
csvf . writerow ( [ full_name ( ** ent ) , ent [ 'email' ] ] + empty )
|
def traverse ( self ) :
"""Traverse proposal kernel"""
|
if self . verbose > 1 :
print_ ( '\t' + self . _id + ' Running Traverse proposal kernel' )
# Mask for values to move
phi = self . phi
theta = self . traverse_theta
# Calculate beta
if ( random ( ) < ( theta - 1 ) / ( 2 * theta ) ) :
beta = exp ( 1 / ( theta + 1 ) * log ( random ( ) ) )
else :
beta = exp ( 1 / ( 1 - theta ) * log ( random ( ) ) )
if self . _prime :
xp , x = self . values
else :
x , xp = self . values
if self . verbose > 1 :
print_ ( '\t' + 'Current value = ' + str ( x ) )
x = ( xp + beta * ( xp - x ) ) * phi + x * ( phi == False )
if self . verbose > 1 :
print_ ( '\t' + 'Proposed value = ' + str ( x ) )
self . stochastic . value = x
# Set proposal adjustment factor
self . hastings_factor = ( sum ( phi ) - 2 ) * log ( beta )
|
def get_tabular_rows ( self , url , dict_rows = False , ** kwargs ) : # type : ( str , bool , Any ) - > Iterator [ Dict ]
"""Get iterator for reading rows from tabular data . Each row is returned as a dictionary .
Args :
url ( str ) : URL to download
dict _ rows ( bool ) : Return dict ( requires headers parameter ) or list for each row . Defaults to False ( list ) .
* * kwargs :
headers ( Union [ int , List [ int ] , List [ str ] ] ) : Number of row ( s ) containing headers or list of headers
file _ type ( Optional [ str ] ) : Type of file . Defaults to inferring .
delimiter ( Optional [ str ] ) : Delimiter used for values in each row . Defaults to inferring .
Returns :
Iterator [ Union [ List , Dict ] ] : Iterator where each row is returned as a list or dictionary ."""
|
return self . get_tabular_stream ( url , ** kwargs ) . iter ( keyed = dict_rows )
|
def params ( self , ** kwargs ) :
"""Specify query params to be used when executing the search . All the
keyword arguments will override the current values . See
https : / / elasticsearch - py . readthedocs . io / en / master / api . html # elasticsearch . Elasticsearch . search
for all available parameters .
Example : :
s = Search ( )
s = s . params ( routing = ' user - 1 ' , preference = ' local ' )"""
|
s = self . _clone ( )
s . _params . update ( kwargs )
return s
|
def join_where ( self , table , one , operator , two , type = 'inner' ) :
"""Add a " join where " clause to the query
: param table : The table to join with , can also be a JoinClause instance
: type table : str or JoinClause
: param one : The first column of the join condition
: type one : str
: param operator : The operator of the join condition
: type operator : str
: param two : The second column of the join condition
: type two : str
: param type : The join type
: type type : str
: return : The current QueryBuilder instance
: rtype : QueryBuilder"""
|
return self . join ( table , one , operator , two , type , True )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.