signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def add_response_headers ( headers ) :
"""Add headers passed in to the response
Usage :
. . code : : py
@ app . route ( ' / ' )
@ add _ response _ headers ( { ' X - Robots - Tag ' : ' noindex ' } )
def not _ indexed ( ) :
# This will set ` ` X - Robots - Tag : noindex ` ` in the response headers
return " Check my headers ! " """
|
def decorator ( func ) :
@ wraps ( func )
def _ ( * args , ** kwargs ) :
rsp = make_response ( func ( * args , ** kwargs ) )
rsp_headers = rsp . headers
for header , value in headers . items ( ) :
rsp_headers [ header ] = value
return rsp
return _
return decorator
|
def validate_env ( self , envname ) :
"""Check the name of the environment against the black list and the
whitelist . If a whitelist is specified only it is checked ."""
|
if self . whitelist_envs and envname in self . whitelist_envs :
return True
elif self . whitelist_envs :
return False
if self . blacklist_envs and envname not in self . blacklist_envs :
return True
elif self . blacklist_envs : # If there is just a True , all envs are blacklisted
return False
else :
return True
|
def read_tlv ( data ) :
"""Parse TLV8 bytes into a dict .
If value is larger than 255 bytes , it is split up in multiple chunks . So
the same tag might occurr several times ."""
|
def _parse ( data , pos , size , result = None ) :
if result is None :
result = { }
if pos >= size :
return result
tag = str ( data [ pos ] )
length = data [ pos + 1 ]
value = data [ pos + 2 : pos + 2 + length ]
if tag in result :
result [ tag ] += value
# value > 255 is split up
else :
result [ tag ] = value
return _parse ( data , pos + 2 + length , size , result )
return _parse ( data , 0 , len ( data ) )
|
def copy ( self ) :
'''Returns a copy of this query .'''
|
if self . object_getattr is Query . object_getattr :
other = Query ( self . key )
else :
other = Query ( self . key , object_getattr = self . object_getattr )
other . limit = self . limit
other . offset = self . offset
other . offset_key = self . offset_key
other . filters = self . filters
other . orders = self . orders
return other
|
def make_application ( debug = None , apps_dir = 'apps' , project_dir = None , include_apps = None , debug_console = True , settings_file = None , local_settings_file = None , start = True , default_settings = None , dispatcher_cls = None , dispatcher_kwargs = None , debug_cls = None , debug_kwargs = None , reuse = True , verbose = False , pythonpath = None , trace_print = False ) :
"""Make an application object"""
|
from uliweb . utils . common import import_attr
from uliweb . utils . whocallme import print_frame
from werkzeug . debug import DebuggedApplication
# is reuse , then create application only one
if reuse and hasattr ( SimpleFrame . __global__ , 'application' ) and SimpleFrame . __global__ . application :
return SimpleFrame . __global__ . application
# process settings and local _ settings
settings_file = settings_file or os . environ . get ( 'SETTINGS' , 'settings.ini' )
local_settings_file = local_settings_file or os . environ . get ( 'LOCAL_SETTINGS' , 'local_settings.ini' )
dispatcher_cls = dispatcher_cls or SimpleFrame . Dispatcher
dispatcher_kwargs = dispatcher_kwargs or { }
if project_dir :
apps_dir = os . path . abspath ( os . path . normpath ( os . path . join ( project_dir , 'apps' ) ) )
if not project_dir :
project_dir = os . path . abspath ( os . path . normpath ( os . path . abspath ( os . path . join ( apps_dir , '..' ) ) ) )
if pythonpath :
if isinstance ( pythonpath , str ) :
pythonpath = pythonpath . split ( ';' )
for x in pythonpath :
if x not in sys . path :
sys . path . insert ( 0 , x )
if project_dir not in sys . path :
sys . path . insert ( 0 , project_dir )
if apps_dir not in sys . path :
sys . path . insert ( 0 , apps_dir )
install_config ( apps_dir )
if trace_print :
output = sys . stdout
class MyOut ( object ) :
def write ( self , s ) :
output . write ( s )
output . write ( '\n' )
print_frame ( output )
if hasattr ( output , 'flush' ) :
output . flush ( )
sys . stdout = MyOut ( )
application = app = dispatcher_cls ( apps_dir = apps_dir , include_apps = include_apps , settings_file = settings_file , local_settings_file = local_settings_file , start = start , default_settings = default_settings , reset = True , ** dispatcher_kwargs )
if verbose :
log . info ( ' * settings file is "%s"' % settings_file )
log . info ( ' * local settings file is "%s"' % local_settings_file )
# settings global application object
SimpleFrame . __global__ . application = app
# process wsgi middlewares
middlewares = [ ]
parameters = { }
for name , v in uliweb . settings . get ( 'WSGI_MIDDLEWARES' , { } ) . iteritems ( ) :
order , kwargs = 500 , { }
if not v :
continue
if isinstance ( v , ( list , tuple ) ) :
if len ( v ) > 3 :
logging . error ( 'WSGI_MIDDLEWARE %s difinition is not right' % name )
raise uliweb . UliwebError ( 'WSGI_MIDDLEWARE %s difinition is not right' % name )
cls = v [ 0 ]
if len ( v ) == 2 :
if isinstance ( v [ 1 ] , int ) :
order = v [ 1 ]
else :
kwargs = v [ 1 ]
else :
order , kwargs = v [ 1 ] , v [ 2 ]
else :
cls = v
middlewares . append ( ( order , name ) )
parameters [ name ] = cls , kwargs
middlewares . sort ( cmp = lambda x , y : cmp ( x [ 0 ] , y [ 0 ] ) )
for name in reversed ( [ x [ 1 ] for x in middlewares ] ) :
clspath , kwargs = parameters [ name ]
cls = import_attr ( clspath )
app = cls ( app , ** kwargs )
debug_flag = uliweb . settings . GLOBAL . DEBUG
if debug or ( debug is None and debug_flag ) :
if not debug_cls :
debug_cls = DebuggedApplication
log . setLevel ( logging . DEBUG )
log . info ( ' * Loading DebuggedApplication...' )
app . debug = True
app = debug_cls ( app , uliweb . settings . GLOBAL . get ( 'DEBUG_CONSOLE' , False ) )
return app
|
def user_lookup ( self , ids , id_type = "user_id" ) :
"""A generator that returns users for supplied user ids , screen _ names ,
or an iterator of user _ ids of either . Use the id _ type to indicate
which you are supplying ( user _ id or screen _ name )"""
|
if id_type not in [ 'user_id' , 'screen_name' ] :
raise RuntimeError ( "id_type must be user_id or screen_name" )
if not isinstance ( ids , types . GeneratorType ) :
ids = iter ( ids )
# TODO : this is similar to hydrate , maybe they could share code ?
lookup_ids = [ ]
def do_lookup ( ) :
ids_str = "," . join ( lookup_ids )
log . info ( "looking up users %s" , ids_str )
url = 'https://api.twitter.com/1.1/users/lookup.json'
params = { id_type : ids_str }
try :
resp = self . get ( url , params = params , allow_404 = True )
except requests . exceptions . HTTPError as e :
if e . response . status_code == 404 :
log . warning ( "no users matching %s" , ids_str )
raise e
return resp . json ( )
for id in ids :
lookup_ids . append ( id . strip ( ) )
if len ( lookup_ids ) == 100 :
for u in do_lookup ( ) :
yield u
lookup_ids = [ ]
if len ( lookup_ids ) > 0 :
for u in do_lookup ( ) :
yield u
|
def ParseAgeSpecification ( cls , age ) :
"""Parses an aff4 age and returns a datastore age specification ."""
|
try :
return ( 0 , int ( age ) )
except ( ValueError , TypeError ) :
pass
if age == NEWEST_TIME :
return data_store . DB . NEWEST_TIMESTAMP
elif age == ALL_TIMES :
return data_store . DB . ALL_TIMESTAMPS
elif len ( age ) == 2 :
start , end = age
return ( int ( start ) , int ( end ) )
raise ValueError ( "Unknown age specification: %s" % age )
|
def create ( self , asset_versions = values . unset , function_versions = values . unset , dependencies = values . unset ) :
"""Create a new BuildInstance
: param unicode asset _ versions : The asset _ versions
: param unicode function _ versions : The function _ versions
: param unicode dependencies : The dependencies
: returns : Newly created BuildInstance
: rtype : twilio . rest . serverless . v1 . service . build . BuildInstance"""
|
data = values . of ( { 'AssetVersions' : serialize . map ( asset_versions , lambda e : e ) , 'FunctionVersions' : serialize . map ( function_versions , lambda e : e ) , 'Dependencies' : dependencies , } )
payload = self . _version . create ( 'POST' , self . _uri , data = data , )
return BuildInstance ( self . _version , payload , service_sid = self . _solution [ 'service_sid' ] , )
|
def _check_cooling_parameters ( radiuscooling , scalecooling ) :
"""Helper function to verify the cooling parameters of the training ."""
|
if radiuscooling != "linear" and radiuscooling != "exponential" :
raise Exception ( "Invalid parameter for radiuscooling: " + radiuscooling )
if scalecooling != "linear" and scalecooling != "exponential" :
raise Exception ( "Invalid parameter for scalecooling: " + scalecooling )
|
def write_exon_children ( self , db , exon_id ) :
"""Write out the children records of the exon given by
the ID ( not including the exon record itself ) ."""
|
exon_children = db . children ( exon_id , order_by = 'start' )
for exon_child in exon_children :
self . write_rec ( exon_child )
|
def _delete_record ( self , identifier = None , rtype = None , name = None , content = None ) :
"""Delete record ( s ) matching the provided params . If there is no match , do
nothing ."""
|
ids = [ ]
if identifier :
ids . append ( identifier )
elif not identifier and rtype and name :
records = self . _list_records ( rtype , name , content )
if records :
ids = [ record [ "id" ] for record in records ]
if ids :
LOGGER . debug ( "delete_records: %s" , ids )
with localzone . manage ( self . filename , self . origin , autosave = True ) as zone :
for hashid in ids :
zone . remove_record ( hashid )
# pylint : disable = no - member
LOGGER . debug ( "delete_record: %s" , hashid )
return True
|
def set_verified ( self , msg_info ) :
"""expects " msg _ info " to have the field ' files _ containers _ id '
This call already executes " update _ last _ checked _ time " so it doesn ' t need to be called separately"""
|
assert hasattr ( msg_info , 'files_containers_id' )
with self . _session_resource as session :
session . execute ( update ( FilesDestinations ) . where ( FilesDestinations . file_containers_id == msg_info . files_containers_id ) . values ( verification_info = msg_info . msg_id ) )
self . update_last_checked_time ( msg_info )
|
def done ( self ) :
"""Check if we should stop returning objects"""
|
if self . _done :
return self . _done
if self . _limit is None :
self . _done = False
elif self . itemcount >= self . _limit :
self . _done = True
return self . _done
|
def need_deployment ( ) :
'''Salt thin needs to be deployed - prep the target directory and emit the
delimiter and exit code that signals a required deployment .'''
|
if os . path . exists ( OPTIONS . saltdir ) :
shutil . rmtree ( OPTIONS . saltdir )
old_umask = os . umask ( 0o077 )
# pylint : disable = blacklisted - function
try :
os . makedirs ( OPTIONS . saltdir )
finally :
os . umask ( old_umask )
# pylint : disable = blacklisted - function
# Verify perms on saltdir
if not is_windows ( ) :
euid = os . geteuid ( )
dstat = os . stat ( OPTIONS . saltdir )
if dstat . st_uid != euid : # Attack detected , try again
need_deployment ( )
if dstat . st_mode != 16832 : # Attack detected
need_deployment ( )
# If SUDOing then also give the super user group write permissions
sudo_gid = os . environ . get ( 'SUDO_GID' )
if sudo_gid :
try :
os . chown ( OPTIONS . saltdir , - 1 , int ( sudo_gid ) )
stt = os . stat ( OPTIONS . saltdir )
os . chmod ( OPTIONS . saltdir , stt . st_mode | stat . S_IWGRP | stat . S_IRGRP | stat . S_IXGRP )
except OSError :
sys . stdout . write ( '\n\nUnable to set permissions on thin directory.\nIf sudo_user is set ' 'and is not root, be certain the user is in the same group\nas the login user' )
sys . exit ( 1 )
# Delimiter emitted on stdout * only * to indicate shim message to master .
sys . stdout . write ( "{0}\ndeploy\n" . format ( OPTIONS . delimiter ) )
sys . exit ( EX_THIN_DEPLOY )
|
def model_counts_spectrum ( self , name , logemin , logemax , weighted = False ) :
"""Return the model counts spectrum of a source .
Parameters
name : str
Source name ."""
|
# EAC , we need this b / c older version of the ST don ' t have the right signature
try :
cs = np . array ( self . like . logLike . modelCountsSpectrum ( str ( name ) , weighted ) )
except ( TypeError , NotImplementedError ) :
cs = np . array ( self . like . logLike . modelCountsSpectrum ( str ( name ) ) )
imin = utils . val_to_edge ( self . log_energies , logemin ) [ 0 ]
imax = utils . val_to_edge ( self . log_energies , logemax ) [ 0 ]
if imax <= imin :
raise Exception ( 'Invalid energy range.' )
return cs [ imin : imax ]
|
def all ( self , components_in_and = True ) :
'''Return all of the results of a query in a list'''
|
self . components_in_and = components_in_and
return [ obj for obj in iter ( self ) ]
|
def _replace ( self , data , replacements ) :
"""Given a list of 2 - tuples ( find , repl ) this function performs all
replacements on the input and returns the result ."""
|
for find , repl in replacements :
data = data . replace ( find , repl )
return data
|
def handle_finish ( self , obj ) :
"""Handle an incoming ` ` Data ` ` finished processing request .
: param obj : The Channels message object . Command object format :
. . code - block : : none
' command ' : ' finish ' ,
' data _ id ' : [ id of the : class : ` ~ resolwe . flow . models . Data ` object
this command changes ] ,
' process _ rc ' : [ exit status of the processing ]
' spawn _ processes ' : [ optional ; list of spawn dictionaries ] ,
' exported _ files _ mapper ' : [ if spawn _ processes present ]"""
|
data_id = obj [ ExecutorProtocol . DATA_ID ]
logger . debug ( __ ( "Finishing Data with id {} (handle_finish)." , data_id ) , extra = { 'data_id' : data_id , 'packet' : obj } )
spawning_failed = False
with transaction . atomic ( ) : # Spawn any new jobs in the request .
spawned = False
if ExecutorProtocol . FINISH_SPAWN_PROCESSES in obj :
if is_testing ( ) : # NOTE : This is a work - around for Django issue # 10827
# ( https : / / code . djangoproject . com / ticket / 10827 ) , same as in
# TestCaseHelpers . _ pre _ setup ( ) . Because the listener is running
# independently , it must clear the cache on its own .
ContentType . objects . clear_cache ( )
spawned = True
exported_files_mapper = obj [ ExecutorProtocol . FINISH_EXPORTED_FILES ]
logger . debug ( __ ( "Spawning new Data objects for Data with id {} (handle_finish)." , data_id ) , extra = { 'data_id' : data_id } )
try : # This transaction is needed because we ' re running
# asynchronously with respect to the main Django code
# here ; the manager can get nudged from elsewhere .
with transaction . atomic ( ) :
parent_data = Data . objects . get ( pk = data_id )
# Spawn processes .
for d in obj [ ExecutorProtocol . FINISH_SPAWN_PROCESSES ] :
d [ 'contributor' ] = parent_data . contributor
d [ 'process' ] = Process . objects . filter ( slug = d [ 'process' ] ) . latest ( )
d [ 'tags' ] = parent_data . tags
for field_schema , fields in iterate_fields ( d . get ( 'input' , { } ) , d [ 'process' ] . input_schema ) :
type_ = field_schema [ 'type' ]
name = field_schema [ 'name' ]
value = fields [ name ]
if type_ == 'basic:file:' :
fields [ name ] = self . hydrate_spawned_files ( exported_files_mapper , value , data_id )
elif type_ == 'list:basic:file:' :
fields [ name ] = [ self . hydrate_spawned_files ( exported_files_mapper , fn , data_id ) for fn in value ]
with transaction . atomic ( ) :
d = Data . objects . create ( ** d )
DataDependency . objects . create ( parent = parent_data , child = d , kind = DataDependency . KIND_SUBPROCESS , )
# Copy permissions .
copy_permissions ( parent_data , d )
# Entity is added to the collection only when it is
# created - when it only contains 1 Data object .
entities = Entity . objects . filter ( data = d ) . annotate ( num_data = Count ( 'data' ) ) . filter ( num_data = 1 )
# Copy collections .
for collection in parent_data . collection_set . all ( ) :
collection . data . add ( d )
# Add entities to which data belongs to the collection .
for entity in entities :
entity . collections . add ( collection )
except Exception : # pylint : disable = broad - except
logger . error ( __ ( "Error while preparing spawned Data objects of process '{}' (handle_finish):\n\n{}" , parent_data . process . slug , traceback . format_exc ( ) ) , extra = { 'data_id' : data_id } )
spawning_failed = True
# Data wrap up happens last , so that any triggered signals
# already see the spawned children . What the children themselves
# see is guaranteed by the transaction we ' re in .
if ExecutorProtocol . FINISH_PROCESS_RC in obj :
process_rc = obj [ ExecutorProtocol . FINISH_PROCESS_RC ]
try :
d = Data . objects . get ( pk = data_id )
except Data . DoesNotExist :
logger . warning ( "Data object does not exist (handle_finish)." , extra = { 'data_id' : data_id , } )
async_to_sync ( self . _send_reply ) ( obj , { ExecutorProtocol . RESULT : ExecutorProtocol . RESULT_ERROR } )
return
changeset = { 'process_progress' : 100 , 'finished' : now ( ) , }
if spawning_failed :
changeset [ 'status' ] = Data . STATUS_ERROR
changeset [ 'process_error' ] = [ "Error while preparing spawned Data objects" ]
elif process_rc == 0 and not d . status == Data . STATUS_ERROR :
changeset [ 'status' ] = Data . STATUS_DONE
else :
changeset [ 'status' ] = Data . STATUS_ERROR
changeset [ 'process_rc' ] = process_rc
obj [ ExecutorProtocol . UPDATE_CHANGESET ] = changeset
self . handle_update ( obj , internal_call = True )
if not getattr ( settings , 'FLOW_MANAGER_KEEP_DATA' , False ) : # Purge worker is not running in test runner , so we should skip triggering it .
if not is_testing ( ) :
channel_layer = get_channel_layer ( )
try :
async_to_sync ( channel_layer . send ) ( CHANNEL_PURGE_WORKER , { 'type' : TYPE_PURGE_RUN , 'location_id' : d . location . id , 'verbosity' : self . _verbosity , } )
except ChannelFull :
logger . warning ( "Cannot trigger purge because channel is full." , extra = { 'data_id' : data_id } )
# Notify the executor that we ' re done .
async_to_sync ( self . _send_reply ) ( obj , { ExecutorProtocol . RESULT : ExecutorProtocol . RESULT_OK } )
# Now nudge the main manager to perform final cleanup . This is
# needed even if there was no spawn baggage , since the manager
# may need to know when executors have finished , to keep count
# of them and manage synchronization .
async_to_sync ( consumer . send_event ) ( { WorkerProtocol . COMMAND : WorkerProtocol . FINISH , WorkerProtocol . DATA_ID : data_id , WorkerProtocol . FINISH_SPAWNED : spawned , WorkerProtocol . FINISH_COMMUNICATE_EXTRA : { 'executor' : getattr ( settings , 'FLOW_EXECUTOR' , { } ) . get ( 'NAME' , 'resolwe.flow.executors.local' ) , } , } )
|
def project_create_event ( self , proj_info ) :
"""Create project ."""
|
LOG . debug ( "Processing create %(proj)s event." , { 'proj' : proj_info } )
proj_id = proj_info . get ( 'resource_info' )
self . project_create_func ( proj_id )
|
def reading ( self , service ) :
"""get the data from the service and put theme in cache
: param service : service object to read
: type service : object"""
|
# counting the new data to store to display them in the log provider - the service that offer data
provider_token = service . provider . token
default_provider . load_services ( )
service_provider = default_provider . get_service ( str ( service . provider . name . name ) )
date_triggered = service . date_triggered if service . date_triggered else service . date_created
# get the data from the provider service
kwargs = { 'token' : provider_token , 'trigger_id' : service . id , 'date_triggered' : date_triggered }
data = self . provider ( service_provider , ** kwargs )
if len ( data ) > 0 :
logger . info ( "{} - {} new data" . format ( service , len ( data ) ) )
elif data is False : # if data is False , something went wrong
self . is_ceil_reached ( service )
|
def answer ( self , signatory ) :
"""Respond to this request .
Given a L { Signatory } , I can check the validity of the signature and
the X { C { invalidate _ handle } } .
@ param signatory : The L { Signatory } to use to check the signature .
@ type signatory : L { Signatory }
@ returns : A response with an X { C { is _ valid } } ( and , if
appropriate X { C { invalidate _ handle } } ) field .
@ returntype : L { OpenIDResponse }"""
|
is_valid = signatory . verify ( self . assoc_handle , self . signed )
# Now invalidate that assoc _ handle so it this checkAuth message cannot
# be replayed .
signatory . invalidate ( self . assoc_handle , dumb = True )
response = OpenIDResponse ( self )
valid_str = ( is_valid and "true" ) or "false"
response . fields . setArg ( OPENID_NS , 'is_valid' , valid_str )
if self . invalidate_handle :
assoc = signatory . getAssociation ( self . invalidate_handle , dumb = False )
if not assoc :
response . fields . setArg ( OPENID_NS , 'invalidate_handle' , self . invalidate_handle )
return response
|
def loglevel ( level ) :
"""Convert any representation of ` level ` to an int appropriately .
: type level : int or str
: rtype : int
> > > loglevel ( ' DEBUG ' ) = = logging . DEBUG
True
> > > loglevel ( 10)
10
> > > loglevel ( None )
Traceback ( most recent call last ) :
ValueError : None is not a proper log level ."""
|
if isinstance ( level , str ) :
level = getattr ( logging , level . upper ( ) )
elif isinstance ( level , int ) :
pass
else :
raise ValueError ( '{0!r} is not a proper log level.' . format ( level ) )
return level
|
def raw_imu_send ( self , time_usec , xacc , yacc , zacc , xgyro , ygyro , zgyro , xmag , ymag , zmag , force_mavlink1 = False ) :
'''The RAW IMU readings for the usual 9DOF sensor setup . This message
should always contain the true raw values without any
scaling to allow data capture and system debugging .
time _ usec : Timestamp ( microseconds since UNIX epoch or microseconds since system boot ) ( uint64 _ t )
xacc : X acceleration ( raw ) ( int16 _ t )
yacc : Y acceleration ( raw ) ( int16 _ t )
zacc : Z acceleration ( raw ) ( int16 _ t )
xgyro : Angular speed around X axis ( raw ) ( int16 _ t )
ygyro : Angular speed around Y axis ( raw ) ( int16 _ t )
zgyro : Angular speed around Z axis ( raw ) ( int16 _ t )
xmag : X Magnetic field ( raw ) ( int16 _ t )
ymag : Y Magnetic field ( raw ) ( int16 _ t )
zmag : Z Magnetic field ( raw ) ( int16 _ t )'''
|
return self . send ( self . raw_imu_encode ( time_usec , xacc , yacc , zacc , xgyro , ygyro , zgyro , xmag , ymag , zmag ) , force_mavlink1 = force_mavlink1 )
|
def get_accept_list ( self , request ) :
"""Given the incoming request , return a tokenised list of media
type strings ."""
|
header = request . META . get ( 'HTTP_ACCEPT' , '*/*' )
return [ token . strip ( ) for token in header . split ( ',' ) ]
|
def reverse_sequences ( records ) :
"""Reverse the order of sites in sequences ."""
|
logging . info ( 'Applying _reverse_sequences generator: ' 'reversing the order of sites in sequences.' )
for record in records :
rev_record = SeqRecord ( record . seq [ : : - 1 ] , id = record . id , name = record . name , description = record . description )
# Copy the annotations over
_reverse_annotations ( record , rev_record )
yield rev_record
|
def parse_components ( text , field_datatype = 'ST' , version = None , encoding_chars = None , validation_level = None , references = None ) :
"""Parse the given ER7 - encoded components and return a list of : class : ` Component < hl7apy . core . Component > `
instances .
: type text : ` ` str ` `
: param text : the ER7 - encoded string containing the components to be parsed
: type field _ datatype : ` ` str ` `
: param field _ datatype : the datatype of the components ( e . g . ST )
: type version : ` ` str ` `
: param version : the HL7 version ( e . g . " 2.5 " ) , or ` ` None ` ` to use the default
( see : func : ` set _ default _ version < hl7apy . set _ default _ version > ` )
: type encoding _ chars : ` ` dict ` `
: param encoding _ chars : a dictionary containing the encoding chars or None to use the default
( see : func : ` set _ default _ encoding _ chars < hl7apy . set _ default _ encoding _ chars > ` )
: type validation _ level : ` ` int ` `
: param validation _ level : the validation level . Possible values are those defined in
: class : ` VALIDATION _ LEVEL < hl7apy . consts . VALIDATION _ LEVEL > ` class or ` ` None ` ` to use the default
validation level ( see : func : ` set _ default _ validation _ level < hl7apy . set _ default _ validation _ level > ` )
: type references : ` ` list ` `
: param references : A list of the references of the : class : ` Component < hl7apy . core . Component > ` ' s children
: return : a list of : class : ` Component < hl7apy . core . Component > ` instances
> > > components = " NUCLEAR ^ NELDA ^ W ^ ^ TEST "
> > > xpn = parse _ components ( components , field _ datatype = " XPN " )
> > > print ( xpn )
[ < Component XPN _ 1 ( FAMILY _ NAME ) of type FN > , < Component XPN _ 2 ( GIVEN _ NAME ) of type ST > , < Component XPN _ 3 ( SECOND _ AND _ FURTHER _ GIVEN _ NAMES _ OR _ INITIALS _ THEREOF ) of type ST > , < Component XPN _ 5 ( PREFIX _ E _ G _ DR ) of type ST > ]
> > > print ( parse _ components ( components ) )
[ < Component ST ( None ) of type ST > , < Component ST ( None ) of type ST > , < Component ST ( None ) of type ST > , < Component ST ( None ) of type ST > , < Component ST ( None ) of type ST > ]"""
|
version = _get_version ( version )
encoding_chars = _get_encoding_chars ( encoding_chars , version )
validation_level = _get_validation_level ( validation_level )
component_sep = encoding_chars [ 'COMPONENT' ]
components = [ ]
for index , component in enumerate ( text . split ( component_sep ) ) :
if is_base_datatype ( field_datatype , version ) :
component_datatype = field_datatype
component_name = None
elif field_datatype is None or field_datatype == 'varies' :
component_datatype = None
component_name = 'VARIES_{0}' . format ( index + 1 )
else :
component_name = "{0}_{1}" . format ( field_datatype , index + 1 )
component_datatype = None
try :
reference = references [ component_name ] [ 'ref' ] if None not in ( references , component_name ) else None
except KeyError :
reference = None
if component . strip ( ) or component_name is None or component_name . startswith ( "VARIES_" ) :
components . append ( parse_component ( component , component_name , component_datatype , version , encoding_chars , validation_level , reference ) )
return components
|
def _construct_instance ( cls , values ) :
"""method used to construct instances from query results
this is where polymorphic deserialization occurs"""
|
# we ' re going to take the values , which is from the DB as a dict
# and translate that into our local fields
# the db _ map is a db _ field - > model field map
if cls . _db_map :
values = dict ( ( cls . _db_map . get ( k , k ) , v ) for k , v in values . items ( ) )
if cls . _is_polymorphic :
disc_key = values . get ( cls . _discriminator_column_name )
if disc_key is None :
raise PolymorphicModelException ( 'discriminator value was not found in values' )
poly_base = cls if cls . _is_polymorphic_base else cls . _polymorphic_base
klass = poly_base . _get_model_by_discriminator_value ( disc_key )
if klass is None :
poly_base . _discover_polymorphic_submodels ( )
klass = poly_base . _get_model_by_discriminator_value ( disc_key )
if klass is None :
raise PolymorphicModelException ( 'unrecognized discriminator column {0} for class {1}' . format ( disc_key , poly_base . __name__ ) )
if not issubclass ( klass , cls ) :
raise PolymorphicModelException ( '{0} is not a subclass of {1}' . format ( klass . __name__ , cls . __name__ ) )
values = dict ( ( k , v ) for k , v in values . items ( ) if k in klass . _columns . keys ( ) )
else :
klass = cls
instance = klass ( ** values )
instance . _set_persisted ( force = True )
return instance
|
def repo_id ( self , repo : str ) -> str :
"""Returns an unique identifier from a repo URL for the folder the repo is gonna be pulled in ."""
|
if repo . startswith ( "http" ) :
repo_id = re . sub ( r"https?://(.www)?" , "" , repo )
repo_id = re . sub ( r"\.git/?$" , "" , repo_id )
else :
repo_id = repo . replace ( "file://" , "" )
repo_id = re . sub ( r"\.git/?$" , "" , repo_id )
if repo_id . startswith ( "~" ) :
repo_id = str ( Path ( repo_id ) . resolve ( ) )
# replaces everything that isn ' t alphanumeric , a dot or an underscore
# to make sure it ' s a valid folder name and to keep it readable
# multiple consecutive invalid characters replaced with a single underscore
repo_id = re . sub ( r"[^a-zA-Z0-9._]+" , "_" , repo_id )
# and add a hash of the original to make it absolutely unique
return repo_id + hashlib . sha256 ( repo . encode ( "utf-8" ) ) . hexdigest ( )
|
def setControl ( self , request_type , request , value , index , buffer_or_len , callback = None , user_data = None , timeout = 0 ) :
"""Setup transfer for control use .
request _ type , request , value , index
See USBDeviceHandle . controlWrite .
request _ type defines transfer direction ( see
ENDPOINT _ OUT and ENDPOINT _ IN ) ) .
buffer _ or _ len
Either a string ( when sending data ) , or expected data length ( when
receiving data ) .
callback
Callback function to be invoked on transfer completion .
Called with transfer as parameter , return value ignored .
user _ data
User data to pass to callback function .
timeout
Transfer timeout in milliseconds . 0 to disable ."""
|
if self . __submitted :
raise ValueError ( 'Cannot alter a submitted transfer' )
if self . __doomed :
raise DoomedTransferError ( 'Cannot reuse a doomed transfer' )
if isinstance ( buffer_or_len , ( int , long ) ) :
length = buffer_or_len
# pylint : disable = undefined - variable
string_buffer , transfer_py_buffer = create_binary_buffer ( length + CONTROL_SETUP_SIZE , )
# pylint : enable = undefined - variable
else :
length = len ( buffer_or_len )
string_buffer , transfer_py_buffer = create_binary_buffer ( CONTROL_SETUP + buffer_or_len , )
self . __initialized = False
self . __transfer_buffer = string_buffer
# pylint : disable = undefined - variable
self . __transfer_py_buffer = integer_memoryview ( transfer_py_buffer , ) [ CONTROL_SETUP_SIZE : ]
# pylint : enable = undefined - variable
self . __user_data = user_data
libusb1 . libusb_fill_control_setup ( string_buffer , request_type , request , value , index , length )
libusb1 . libusb_fill_control_transfer ( self . __transfer , self . __handle , string_buffer , self . __ctypesCallbackWrapper , None , timeout )
self . __callback = callback
self . __initialized = True
|
def is_relative ( modname , from_file ) :
"""return true if the given module name is relative to the given
file name
: type modname : str
: param modname : name of the module we are interested in
: type from _ file : str
: param from _ file :
path of the module from which modname has been imported
: rtype : bool
: return :
true if the module has been imported relatively to ` from _ file `"""
|
if not os . path . isdir ( from_file ) :
from_file = os . path . dirname ( from_file )
if from_file in sys . path :
return False
try :
stream , _ , _ = imp . find_module ( modname . split ( "." ) [ 0 ] , [ from_file ] )
# Close the stream to avoid ResourceWarnings .
if stream :
stream . close ( )
return True
except ImportError :
return False
|
def Jobs ( ) :
"""Get the number of jobs that are running and finished , and the number of
total tools running and finished for those jobs"""
|
jobs = [ 0 , 0 , 0 , 0 ]
# get running jobs
try :
d_client = docker . from_env ( )
c = d_client . containers . list ( all = False , filters = { 'label' : 'vent-plugin' } )
files = [ ]
for container in c :
jobs [ 1 ] += 1
if 'file' in container . attrs [ 'Config' ] [ 'Labels' ] :
if container . attrs [ 'Config' ] [ 'Labels' ] [ 'file' ] not in files :
files . append ( container . attrs [ 'Config' ] [ 'Labels' ] [ 'file' ] )
jobs [ 0 ] = len ( files )
except Exception as e : # pragma : no cover
logger . error ( 'Could not get running jobs ' + str ( e ) )
# get finished jobs
try :
d_client = docker . from_env ( )
c = d_client . containers . list ( all = True , filters = { 'label' : 'vent-plugin' , 'status' : 'exited' } )
file_names = [ ]
tool_names = [ ]
finished_jobs = [ ]
path_dirs = PathDirs ( )
manifest = join ( path_dirs . meta_dir , 'status.json' )
if exists ( manifest ) :
file_status = 'a'
else :
file_status = 'w'
# get a list of past jobs ' file names if status . json exists
if file_status == 'a' :
with open ( manifest , 'r' ) as infile :
for line in infile :
finished_jobs . append ( json . loads ( line ) )
# get a list of file names so we can check against each container
file_names = [ d [ 'FileName' ] for d in finished_jobs ]
# multiple tools can run on 1 file . Use a tuple to status check
tool_names = [ ( d [ 'FileName' ] , d [ 'VentPlugin' ] ) for d in finished_jobs ]
for container in c :
jobs [ 3 ] += 1
if 'file' in container . attrs [ 'Config' ] [ 'Labels' ] : # make sure the file name and the tool tup exists because
# multiple tools can run on 1 file .
if ( container . attrs [ 'Config' ] [ 'Labels' ] [ 'file' ] , container . attrs [ 'Config' ] [ 'Labels' ] [ 'vent.name' ] ) not in tool_names : # TODO figure out a nicer way of getting desired values
# from containers . attrs .
new_file = { }
new_file [ 'FileName' ] = container . attrs [ 'Config' ] [ 'Labels' ] [ 'file' ]
new_file [ 'VentPlugin' ] = container . attrs [ 'Config' ] [ 'Labels' ] [ 'vent.name' ]
new_file [ 'StartedAt' ] = container . attrs [ 'State' ] [ 'StartedAt' ]
new_file [ 'FinishedAt' ] = container . attrs [ 'State' ] [ 'FinishedAt' ]
new_file [ 'ID' ] = container . attrs [ 'Id' ] [ : 12 ]
# create / append a json file with all wanted information
with open ( manifest , file_status ) as outfile :
json . dump ( new_file , outfile )
outfile . write ( '\n' )
# delete any containers with ' vent - plugin ' in the groups
if 'vent-plugin' in container . attrs [ 'Config' ] [ 'Labels' ] :
container . remove ( )
# add extra one to account for file that just finished if the file was
# just created since file _ names is processed near the beginning
if file_status == 'w' and len ( file_names ) == 1 :
jobs [ 2 ] = len ( set ( file_names ) ) + 1
else :
jobs [ 2 ] = len ( set ( file_names ) )
jobs [ 3 ] = jobs [ 3 ] - jobs [ 1 ]
except Exception as e : # pragma : no cover
logger . error ( 'Could not get finished jobs ' + str ( e ) )
return tuple ( jobs )
|
def rst_to_pypi ( contents ) :
"""Convert the given GitHub RST contents to PyPi RST contents ( since some RST directives are not available in PyPi ) .
Args :
contents ( str ) : The GitHub compatible RST contents .
Returns :
str : The PyPi compatible RST contents ."""
|
# The PyPi description does not support the SVG file type .
contents = contents . replace ( ".svg?pypi=png.from.svg" , ".png" )
# Convert ` ` < br class = " title " > ` ` to a H1 title
asterisks_length = len ( PackageHelper . get_name ( ) )
asterisks = "*" * asterisks_length
title = asterisks + "\n" + PackageHelper . get_name ( ) + "\n" + asterisks ;
contents = re . sub ( r"(\.\. raw\:\: html\n)(\n {2,4})(\<br class=\"title\"\>)" , title , contents )
# The PyPi description does not support raw HTML
contents = re . sub ( r"(\.\. raw\:\: html\n)((\n {2,4})([A-Za-z0-9<>\ =\"\/])*)*" , "" , contents )
return contents
|
def build_journals_re_kb ( fpath ) :
"""Load journals regexps knowledge base
@ see build _ journals _ kb"""
|
def make_tuple ( match ) :
regexp = match . group ( 'seek' )
repl = match . group ( 'repl' )
return regexp , repl
kb = [ ]
with file_resolving ( fpath ) as fh :
for rawline in fh :
if rawline . startswith ( '#' ) :
continue
# Extract the seek - > replace terms from this KB line :
m_kb_line = re_kb_line . search ( rawline )
kb . append ( make_tuple ( m_kb_line ) )
return kb
|
def del_node ( self , char , node ) :
"""Remove a node from a character ."""
|
del self . _real . character [ char ] . node [ node ]
for cache in ( self . _char_nodes_rulebooks_cache , self . _node_stat_cache , self . _node_successors_cache ) :
try :
del cache [ char ] [ node ]
except KeyError :
pass
if char in self . _char_nodes_cache and node in self . _char_nodes_cache [ char ] :
self . _char_nodes_cache [ char ] = self . _char_nodes_cache [ char ] - frozenset ( [ node ] )
if char in self . _portal_stat_cache :
portal_stat_cache_char = self . _portal_stat_cache [ char ]
if node in portal_stat_cache_char :
del portal_stat_cache_char [ node ]
for charo in portal_stat_cache_char . values ( ) :
if node in charo :
del charo [ node ]
if char in self . _char_portals_rulebooks_cache :
portal_rulebook_cache_char = self . _char_portals_rulebooks_cache [ char ]
if node in portal_rulebook_cache_char :
del portal_rulebook_cache_char [ node ]
for porto in portal_rulebook_cache_char . values ( ) :
if node in porto :
del porto [ node ]
|
def prompt_for_project ( ctx , entity ) :
"""Ask the user for a project , creating one if necessary ."""
|
result = ctx . invoke ( projects , entity = entity , display = False )
try :
if len ( result ) == 0 :
project = click . prompt ( "Enter a name for your first project" )
# description = editor ( )
project = api . upsert_project ( project , entity = entity ) [ "name" ]
else :
project_names = [ project [ "name" ] for project in result ]
question = { 'type' : 'list' , 'name' : 'project_name' , 'message' : "Which project should we use?" , 'choices' : project_names + [ "Create New" ] }
result = whaaaaat . prompt ( [ question ] )
if result :
project = result [ 'project_name' ]
else :
project = "Create New"
# TODO : check with the server if the project exists
if project == "Create New" :
project = click . prompt ( "Enter a name for your new project" , value_proc = api . format_project )
# description = editor ( )
project = api . upsert_project ( project , entity = entity ) [ "name" ]
except wandb . apis . CommError as e :
raise ClickException ( str ( e ) )
return project
|
def decode ( value : str ) -> Union [ str , None , bool , int , float ] :
"""Decode encoded credential attribute value .
: param value : numeric string to decode
: return : decoded value , stringified if original was neither str , bool , int , nor float"""
|
assert value . isdigit ( ) or value [ 0 ] == '-' and value [ 1 : ] . isdigit ( )
if - I32_BOUND <= int ( value ) < I32_BOUND : # it ' s an i32 : it is its own encoding
return int ( value )
elif int ( value ) == I32_BOUND :
return None
( prefix , value ) = ( int ( value [ 0 ] ) , int ( value [ 1 : ] ) )
ival = int ( value ) - I32_BOUND
if ival == 0 :
return ''
# special case : empty string encodes as 2 * * 31
elif ival == 1 :
return False
# sentinel for bool False
elif ival == 2 :
return True
# sentinel for bool True
blen = ceil ( log ( ival , 16 ) / 2 )
ibytes = unhexlify ( ival . to_bytes ( blen , 'big' ) )
return DECODE_PREFIX . get ( prefix , str ) ( ibytes . decode ( ) )
|
def to_python ( self , value ) :
"""Strips any dodgy HTML tags from the input"""
|
if value in self . empty_values :
try :
return self . empty_value
except AttributeError : # CharField . empty _ value was introduced in Django 1.11 ; in prior
# versions a unicode string was returned for empty values in
# all cases .
return u''
return bleach . clean ( value , ** self . bleach_options )
|
def fit ( self , X , y = None ) :
"""Determine the categorical columns to be dummy encoded .
Parameters
X : pandas . DataFrame or dask . dataframe . DataFrame
y : ignored
Returns
self"""
|
self . columns_ = X . columns
columns = self . columns
if columns is None :
columns = X . select_dtypes ( include = [ "category" ] ) . columns
else :
for column in columns :
assert is_categorical_dtype ( X [ column ] ) , "Must be categorical"
self . categorical_columns_ = columns
self . non_categorical_columns_ = X . columns . drop ( self . categorical_columns_ )
if _HAS_CTD :
self . dtypes_ = { col : X [ col ] . dtype for col in self . categorical_columns_ }
else :
self . dtypes_ = { col : ( X [ col ] . cat . categories , X [ col ] . cat . ordered ) for col in self . categorical_columns_ }
left = len ( self . non_categorical_columns_ )
self . categorical_blocks_ = { }
for col in self . categorical_columns_ :
right = left + len ( X [ col ] . cat . categories )
if self . drop_first :
right -= 1
self . categorical_blocks_ [ col ] , left = slice ( left , right ) , right
if isinstance ( X , pd . DataFrame ) :
sample = X . iloc [ : 1 ]
else :
sample = X . _meta_nonempty
self . transformed_columns_ = pd . get_dummies ( sample , drop_first = self . drop_first ) . columns
return self
|
def save ( self , commit = True ) :
"""Save model to database"""
|
db . session . add ( self )
if commit :
db . session . commit ( )
return self
|
def batch ( byte_array , funcs ) :
"""Converts a batch to a list of values .
: param byte _ array : a byte array of length n * item _ length + 8
: return : a list of uuid objects"""
|
result = [ ]
length = bytes_to_int ( byte_array [ 0 : 4 ] )
item_size = bytes_to_int ( byte_array [ 4 : 8 ] )
for i in range ( 0 , length ) :
chunk = byte_array [ 8 + i * item_size : 8 + ( i + 1 ) * item_size ]
for f in funcs :
f ( chunk )
return result
|
def password ( name , default = None ) :
"""Grabs hidden ( password ) input from command line .
: param name : prompt text
: param default : default value if no input provided ."""
|
prompt = name + ( default and ' [%s]' % default or '' )
prompt += name . endswith ( '?' ) and ' ' or ': '
while True :
rv = getpass . getpass ( prompt )
if rv :
return rv
if default is not None :
return default
|
def read ( self , num_bytes = None ) :
"""Read and return the specified bytes from the buffer ."""
|
res = self . get_next ( num_bytes )
self . skip ( len ( res ) )
return res
|
def _at_dump_options ( self , calculator , rule , scope , block ) :
"""Implements @ dump _ options"""
|
sys . stderr . write ( "%s\n" % repr ( rule . options ) )
|
def build_wxsfile ( target , source , env ) :
"""Compiles a . wxs file from the keywords given in env [ ' msi _ spec ' ] and
by analyzing the tree of source nodes and their tags ."""
|
file = open ( target [ 0 ] . get_abspath ( ) , 'w' )
try : # Create a document with the Wix root tag
doc = Document ( )
root = doc . createElement ( 'Wix' )
root . attributes [ 'xmlns' ] = 'http://schemas.microsoft.com/wix/2003/01/wi'
doc . appendChild ( root )
filename_set = [ ]
# this is to circumvent duplicates in the shortnames
id_set = { }
# this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section ( root , env )
build_wxsfile_file_section ( root , source , env [ 'NAME' ] , env [ 'VERSION' ] , env [ 'VENDOR' ] , filename_set , id_set )
generate_guids ( root )
build_wxsfile_features_section ( root , source , env [ 'NAME' ] , env [ 'VERSION' ] , env [ 'SUMMARY' ] , id_set )
build_wxsfile_default_gui ( root )
build_license_file ( target [ 0 ] . get_dir ( ) , env )
# write the xml to a file
file . write ( doc . toprettyxml ( ) )
# call a user specified function
if 'CHANGE_SPECFILE' in env :
env [ 'CHANGE_SPECFILE' ] ( target , source )
except KeyError as e :
raise SCons . Errors . UserError ( '"%s" package field for MSI is missing.' % e . args [ 0 ] )
|
def fill ( h1 : Histogram1D , ax : Axes , ** kwargs ) :
"""Fill plot of 1D histogram ."""
|
show_stats = kwargs . pop ( "show_stats" , False )
# show _ values = kwargs . pop ( " show _ values " , False )
density = kwargs . pop ( "density" , False )
cumulative = kwargs . pop ( "cumulative" , False )
kwargs [ "label" ] = kwargs . get ( "label" , h1 . name )
data = get_data ( h1 , cumulative = cumulative , density = density )
_apply_xy_lims ( ax , h1 , data , kwargs )
_add_ticks ( ax , h1 , kwargs )
_add_labels ( ax , h1 , kwargs )
ax . fill_between ( h1 . bin_centers , 0 , data , ** kwargs )
if show_stats :
_add_stats_box ( h1 , ax , stats = show_stats )
# if show _ values :
# _ add _ values ( ax , h1 , data )
return ax
|
def eval_master_func ( opts ) :
'''Evaluate master function if master type is ' func '
and save it result in opts [ ' master ' ]'''
|
if '__master_func_evaluated' not in opts : # split module and function and try loading the module
mod_fun = opts [ 'master' ]
mod , fun = mod_fun . split ( '.' )
try :
master_mod = salt . loader . raw_mod ( opts , mod , fun )
if not master_mod :
raise KeyError
# we take whatever the module returns as master address
opts [ 'master' ] = master_mod [ mod_fun ] ( )
# Check for valid types
if not isinstance ( opts [ 'master' ] , ( six . string_types , list ) ) :
raise TypeError
opts [ '__master_func_evaluated' ] = True
except KeyError :
log . error ( 'Failed to load module %s' , mod_fun )
sys . exit ( salt . defaults . exitcodes . EX_GENERIC )
except TypeError :
log . error ( '%s returned from %s is not a string' , opts [ 'master' ] , mod_fun )
sys . exit ( salt . defaults . exitcodes . EX_GENERIC )
log . info ( 'Evaluated master from module: %s' , mod_fun )
|
def due ( self ) :
"""The amount due for this invoice . Takes into account all entities in the invoice .
Can be < 0 if the invoice was overpaid ."""
|
invoice_charges = Charge . objects . filter ( invoice = self )
invoice_transactions = Transaction . successful . filter ( invoice = self )
return total_amount ( invoice_charges ) - total_amount ( invoice_transactions )
|
def get_list_connections ( self , environment , product , unique_name_list = None , is_except = False ) :
"""Gets list of connections that satisfy the filter by environment , product and ( optionally ) unique DB names
: param environment : Environment name
: param product : Product name
: param unique _ name _ list : list of unique db aliases
: param is _ except : take the connections with aliases provided or , the other wat around , take all the rest
: return : list of dictionaries with connections"""
|
return_list = [ ]
for item in self . connection_sets :
if unique_name_list :
if item [ 'unique_name' ] :
if is_except :
if item [ 'environment' ] == environment and item [ 'product' ] == product and ( item [ 'unique_name' ] not in unique_name_list ) :
return_list . append ( item )
elif not is_except :
if item [ 'environment' ] == environment and item [ 'product' ] == product and ( item [ 'unique_name' ] in unique_name_list ) :
return_list . append ( item )
else :
if item [ 'environment' ] == environment and item [ 'product' ] == product :
return_list . append ( item )
return return_list
|
def read_frame ( self ) :
"""Reads a frame and converts the color if needed .
In case no frame is available , i . e . self . capture . read ( ) returns False
as the first return value , the event _ source of the TimedAnimation is
stopped , and if possible the capture source released .
Returns :
None if stopped , otherwise the color converted source image ."""
|
ret , frame = self . capture . read ( )
if not ret :
self . event_source . stop ( )
try :
self . capture . release ( )
except AttributeError : # has no release method , thus just pass
pass
return None
if self . convert_color != - 1 and is_color_image ( frame ) :
return cv2 . cvtColor ( frame , self . convert_color )
return frame
|
def render ( self , ** kwargs ) :
"""Renders the HTML representation of the element ."""
|
return self . _template . render ( this = self , kwargs = kwargs )
|
def get ( key , default = None ) :
'''Get a ( list of ) value ( s ) from the minion datastore
. . versionadded : : 2015.8.0
CLI Example :
. . code - block : : bash
salt ' * ' data . get key
salt ' * ' data . get ' [ " key1 " , " key2 " ] ' '''
|
store = load ( )
if isinstance ( key , six . string_types ) :
return store . get ( key , default )
elif default is None :
return [ store [ k ] for k in key if k in store ]
else :
return [ store . get ( k , default ) for k in key ]
|
def determine_plasma_store_config ( object_store_memory = None , plasma_directory = None , huge_pages = False ) :
"""Figure out how to configure the plasma object store .
This will determine which directory to use for the plasma store ( e . g . ,
/ tmp or / dev / shm ) and how much memory to start the store with . On Linux ,
we will try to use / dev / shm unless the shared memory file system is too
small , in which case we will fall back to / tmp . If any of the object store
memory or plasma directory parameters are specified by the user , then those
values will be preserved .
Args :
object _ store _ memory ( int ) : The user - specified object store memory
parameter .
plasma _ directory ( str ) : The user - specified plasma directory parameter .
huge _ pages ( bool ) : The user - specified huge pages parameter .
Returns :
A tuple of the object store memory to use and the plasma directory to
use . If either of these values is specified by the user , then that
value will be preserved ."""
|
system_memory = ray . utils . get_system_memory ( )
# Choose a default object store size .
if object_store_memory is None :
object_store_memory = int ( system_memory * 0.3 )
# Cap memory to avoid memory waste and perf issues on large nodes
if ( object_store_memory > ray_constants . DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES ) :
logger . warning ( "Warning: Capping object memory store to {}GB. " . format ( ray_constants . DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES // 1e9 ) + "To increase this further, specify `object_store_memory` " "when calling ray.init() or ray start." )
object_store_memory = ( ray_constants . DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES )
# Determine which directory to use . By default , use / tmp on MacOS and
# / dev / shm on Linux , unless the shared - memory file system is too small ,
# in which case we default to / tmp on Linux .
if plasma_directory is None :
if sys . platform == "linux" or sys . platform == "linux2" :
shm_avail = ray . utils . get_shared_memory_bytes ( )
# Compare the requested memory size to the memory available in
# / dev / shm .
if shm_avail > object_store_memory :
plasma_directory = "/dev/shm"
else :
plasma_directory = "/tmp"
logger . warning ( "WARNING: The object store is using /tmp instead of " "/dev/shm because /dev/shm has only {} bytes available. " "This may slow down performance! You may be able to free " "up space by deleting files in /dev/shm or terminating " "any running plasma_store_server processes. If you are " "inside a Docker container, you may need to pass an " "argument with the flag '--shm-size' to 'docker run'." . format ( shm_avail ) )
else :
plasma_directory = "/tmp"
# Do some sanity checks .
if object_store_memory > system_memory :
raise Exception ( "The requested object store memory size is greater " "than the total available memory." )
else :
plasma_directory = os . path . abspath ( plasma_directory )
logger . warning ( "WARNING: object_store_memory is not verified when " "plasma_directory is set." )
if not os . path . isdir ( plasma_directory ) :
raise Exception ( "The file {} does not exist or is not a directory." . format ( plasma_directory ) )
return object_store_memory , plasma_directory
|
def find_problems ( cls ) :
"""Checks for problems in the tree structure , problems can occur when :
1 . your code breaks and you get incomplete transactions ( always
use transactions ! )
2 . changing the ` ` steplen ` ` value in a model ( you must
: meth : ` dump _ bulk ` first , change ` ` steplen ` ` and then
: meth : ` load _ bulk `
: returns : A tuple of five lists :
1 . a list of ids of nodes with characters not found in the
` ` alphabet ` `
2 . a list of ids of nodes when a wrong ` ` path ` ` length
according to ` ` steplen ` `
3 . a list of ids of orphaned nodes
4 . a list of ids of nodes with the wrong depth value for
their path
5 . a list of ids nodes that report a wrong number of children"""
|
cls = get_result_class ( cls )
vendor = cls . get_database_vendor ( 'write' )
evil_chars , bad_steplen , orphans = [ ] , [ ] , [ ]
wrong_depth , wrong_numchild = [ ] , [ ]
for node in cls . objects . all ( ) :
found_error = False
for char in node . path :
if char not in cls . alphabet :
evil_chars . append ( node . pk )
found_error = True
break
if found_error :
continue
if len ( node . path ) % cls . steplen :
bad_steplen . append ( node . pk )
continue
try :
node . get_parent ( True )
except cls . DoesNotExist :
orphans . append ( node . pk )
continue
if node . depth != int ( len ( node . path ) / cls . steplen ) :
wrong_depth . append ( node . pk )
continue
real_numchild = cls . objects . filter ( path__range = cls . _get_children_path_interval ( node . path ) ) . extra ( where = [ ( sql_length ( "path" , vendor = vendor ) + '/%d=%d' ) % ( cls . steplen , node . depth + 1 ) ] ) . count ( )
if real_numchild != node . numchild :
wrong_numchild . append ( node . pk )
continue
return evil_chars , bad_steplen , orphans , wrong_depth , wrong_numchild
|
def msg2subjective ( msg , processor , subject , ** config ) :
"""Return a human - readable text representation of a dict - like
fedmsg message from the subjective perspective of a user .
For example , if the subject viewing the message is " oddshocks "
and the message would normally translate into " oddshocks commented on
ticket # 174 " , it would instead translate into " you commented on ticket
#174 " ."""
|
text = processor . subjective ( msg , subject , ** config )
if not text :
text = processor . subtitle ( msg , ** config )
return text
|
def get_nameid_format ( self ) :
"""Gets the NameID Format provided by the SAML Response from the IdP
: returns : NameID Format
: rtype : string | None"""
|
nameid_format = None
nameid_data = self . get_nameid_data ( )
if nameid_data and 'Format' in nameid_data . keys ( ) :
nameid_format = nameid_data [ 'Format' ]
return nameid_format
|
async def filter_commands ( self , commands , * , sort = False , key = None ) :
"""| coro |
Returns a filtered list of commands and optionally sorts them .
This takes into account the : attr : ` verify _ checks ` and : attr : ` show _ hidden `
attributes .
Parameters
commands : Iterable [ : class : ` Command ` ]
An iterable of commands that are getting filtered .
sort : : class : ` bool `
Whether to sort the result .
key : Optional [ Callable [ : class : ` Command ` , Any ] ]
An optional key function to pass to : func : ` py : sorted ` that
takes a : class : ` Command ` as its sole parameter . If ` ` sort ` ` is
passed as ` ` True ` ` then this will default as the command name .
Returns
List [ : class : ` Command ` ]
A list of commands that passed the filter ."""
|
if sort and key is None :
key = lambda c : c . name
iterator = commands if self . show_hidden else filter ( lambda c : not c . hidden , commands )
if not self . verify_checks : # if we do not need to verify the checks then we can just
# run it straight through normally without using await .
return sorted ( iterator , key = key ) if sort else list ( iterator )
# if we ' re here then we need to check every command if it can run
async def predicate ( cmd ) :
try :
return await cmd . can_run ( self . context )
except CommandError :
return False
ret = [ ]
for cmd in iterator :
valid = await predicate ( cmd )
if valid :
ret . append ( cmd )
if sort :
ret . sort ( key = key )
return ret
|
def main ( arguments = None ) :
"""The main function used when ` ` yaml _ to _ database . py ` ` when installed as a cl tool"""
|
# setup the command - line util settings
su = tools ( arguments = arguments , docString = __doc__ , logLevel = "WARNING" , options_first = False , projectName = False )
arguments , settings , log , dbConn = su . setup ( )
# unpack remaining cl arguments using ` exec ` to setup the variable names
# automatically
for arg , val in arguments . iteritems ( ) :
if arg [ 0 ] == "-" :
varname = arg . replace ( "-" , "" ) + "Flag"
else :
varname = arg . replace ( "<" , "" ) . replace ( ">" , "" )
if isinstance ( val , str ) or isinstance ( val , unicode ) :
exec ( varname + " = '%s'" % ( val , ) )
else :
exec ( varname + " = %s" % ( val , ) )
if arg == "--dbConn" :
dbConn = val
log . debug ( '%s = %s' % ( varname , val , ) )
from fundamentals . mysql import sqlite2mysql
converter = sqlite2mysql ( log = log , settings = settings , pathToSqlite = pathToSqliteDB , tablePrefix = tablePrefix , dbConn = dbConn )
converter . convert_sqlite_to_mysql ( )
return
|
def decode ( self , encoded ) :
"""Decodes a tensor into a sequence .
Args :
encoded ( torch . Tensor ) : Encoded sequence .
Returns :
str : Sequence decoded from ` ` encoded ` ` ."""
|
encoded = super ( ) . decode ( encoded )
return self . tokenizer . decode ( [ self . itos [ index ] for index in encoded ] )
|
def extract_public_key ( args ) :
"""Load an ECDSA private key and extract the embedded public key as raw binary data ."""
|
sk = _load_ecdsa_signing_key ( args )
vk = sk . get_verifying_key ( )
args . public_keyfile . write ( vk . to_string ( ) )
print ( "%s public key extracted to %s" % ( args . keyfile . name , args . public_keyfile . name ) )
|
def get_unique_connection_configs ( config = None ) :
"""Returns a list of unique Redis connections from config"""
|
if config is None :
from . settings import QUEUES
config = QUEUES
connection_configs = [ ]
for key , value in config . items ( ) :
value = filter_connection_params ( value )
if value not in connection_configs :
connection_configs . append ( value )
return connection_configs
|
def nonce ( ) :
"""Returns a new nonce to be used with the Piazza API ."""
|
nonce_part1 = _int2base ( int ( _time ( ) * 1000 ) , 36 )
nonce_part2 = _int2base ( round ( _random ( ) * 1679616 ) , 36 )
return "{}{}" . format ( nonce_part1 , nonce_part2 )
|
def convert_to_ns ( self , value ) :
'''converts a value to the prefixed rdf ns equivalent . If not found
returns the value as is
args :
value : the value to convert'''
|
parsed = self . parse_uri ( value )
try :
rtn_val = "%s_%s" % ( self . uri_dict [ parsed [ 0 ] ] , parsed [ 1 ] )
except KeyError :
rtn_val = self . pyhttp ( value )
return rtn_val
|
def Stop ( self , join_timeout = 600 ) :
"""This stops all the worker threads ."""
|
if not self . started :
logging . warning ( "Tried to stop a thread pool that was not running." )
return
# Remove all workers from the pool .
workers = list ( itervalues ( self . _workers ) )
self . _workers = { }
self . _workers_ro_copy = { }
# Send a stop message to all the workers . We need to be careful here to not
# send messages while we are still counting . If threads that haven ' t been
# counted yet pick up a message and exit , the count will be off and the
# shutdown process will deadlock .
stop_messages_needed = 0
for worker in workers :
if worker . isAlive ( ) :
stop_messages_needed += 1
for _ in range ( stop_messages_needed ) :
self . _queue . put ( STOP_MESSAGE )
self . started = False
self . Join ( )
# Wait for the threads to all exit now .
for worker in workers :
worker . join ( join_timeout )
if worker . isAlive ( ) :
raise RuntimeError ( "Threadpool worker did not finish in time." )
|
def _prep_clients ( self , clients ) :
"""Prep a client by tagging it with and id and wrapping methods .
Methods are wrapper to catch ConnectionError so that we can remove
it from the pool until the instance comes back up .
: returns : patched clients"""
|
for pool_id , client in enumerate ( clients ) : # Tag it with an id we ' ll use to identify it in the pool
if hasattr ( client , "pool_id" ) :
raise ValueError ( "%r is already part of a pool." , client )
setattr ( client , "pool_id" , pool_id )
# Wrap all public functions
self . _wrap_functions ( client )
return clients
|
def to_graphml ( graph : BELGraph , path : Union [ str , BinaryIO ] ) -> None :
"""Write this graph to GraphML XML file using : func : ` networkx . write _ graphml ` .
The . graphml file extension is suggested so Cytoscape can recognize it ."""
|
rv = nx . MultiDiGraph ( )
for node in graph :
rv . add_node ( node . as_bel ( ) , function = node . function )
for u , v , key , edge_data in graph . edges ( data = True , keys = True ) :
rv . add_edge ( u . as_bel ( ) , v . as_bel ( ) , interaction = edge_data [ RELATION ] , bel = graph . edge_to_bel ( u , v , edge_data ) , key = key , )
nx . write_graphml ( rv , path )
|
def fit_creatine ( self , reject_outliers = 3.0 , fit_lb = 2.7 , fit_ub = 3.5 ) :
"""Fit a model to the portion of the summed spectra containing the
creatine and choline signals .
Parameters
reject _ outliers : float or bool
If set to a float , this is the z score threshold for rejection ( on
any of the parameters ) . If set to False , no outlier rejection
fit _ lb , fit _ ub : float
What part of the spectrum ( in ppm ) contains the creatine peak .
Default ( 2.7 , 3.5)
Note
We use upper and lower bounds that are a variation on the bounds
mentioned on the GANNET ISMRM2013 poster [ 1 ] _ .
[1 ] RAE Edden et al ( 2013 ) . Gannet GABA analysis toolkit . ISMRM
conference poster ."""
|
# We fit a two - lorentz function to this entire chunk of the spectrum ,
# to catch both choline and creatine
model , signal , params = ana . fit_two_lorentzian ( self . sum_spectra , self . f_ppm , lb = fit_lb , ub = fit_ub )
# Use an array of ones to index everything but the outliers and nans :
ii = np . ones ( signal . shape [ 0 ] , dtype = bool )
# Reject outliers :
if reject_outliers :
model , signal , params , ii = self . _outlier_rejection ( params , model , signal , ii )
# We ' ll keep around a private attribute to tell us which transients
# were good ( this is for both creatine and choline ) :
self . _cr_transients = np . where ( ii )
# Now we separate choline and creatine params from each other ( remember
# that they both share offset and drift ! ) :
self . choline_params = params [ : , ( 0 , 2 , 4 , 6 , 8 , 9 ) ]
self . creatine_params = params [ : , ( 1 , 3 , 5 , 7 , 8 , 9 ) ]
self . cr_idx = ut . make_idx ( self . f_ppm , fit_lb , fit_ub )
# We ' ll need to generate the model predictions from these parameters ,
# because what we ' re holding in ' model ' is for both together :
self . choline_model = np . zeros ( ( self . creatine_params . shape [ 0 ] , np . abs ( self . cr_idx . stop - self . cr_idx . start ) ) )
self . creatine_model = np . zeros ( ( self . choline_params . shape [ 0 ] , np . abs ( self . cr_idx . stop - self . cr_idx . start ) ) )
for idx in range ( self . creatine_params . shape [ 0 ] ) :
self . creatine_model [ idx ] = ut . lorentzian ( self . f_ppm [ self . cr_idx ] , * self . creatine_params [ idx ] )
self . choline_model [ idx ] = ut . lorentzian ( self . f_ppm [ self . cr_idx ] , * self . choline_params [ idx ] )
self . creatine_signal = signal
self . creatine_auc = self . _calc_auc ( ut . lorentzian , self . creatine_params , self . cr_idx )
self . choline_auc = self . _calc_auc ( ut . lorentzian , self . choline_params , self . cr_idx )
|
def create_tag ( self , version , params ) :
"""Create VCS tag
: param version :
: param params :
: return :"""
|
cmd = self . _command . tag ( version , params )
( code , stdout , stderr ) = self . _exec ( cmd )
if code :
raise errors . VCSError ( 'Can\'t create VCS tag %s. Process exited with code %d and message: %s' % ( version , code , stderr or stdout ) )
|
def _create_vxr ( self , f , recStart , recEnd , currentVDR , priorVXR , vvrOffset ) :
'''Create a VXR AND use a VXR
Parameters :
f : file
The open CDF file
recStart : int
The start record of this block
recEnd : int
The ending record of this block
currentVDR : int
The byte location of the variables VDR
priorVXR : int
The byte location of the previous VXR
vvrOffset : int
The byte location of ther VVR
Returns :
vxroffset : int
The byte location of the created vxr'''
|
# add a VXR , use an entry , and link it to the prior VXR if it exists
vxroffset = self . _write_vxr ( f )
self . _use_vxrentry ( f , vxroffset , recStart , recEnd , vvrOffset )
if ( priorVXR == 0 ) : # VDR ' s VXRhead
self . _update_offset_value ( f , currentVDR + 28 , 8 , vxroffset )
else : # VXR ' s next
self . _update_offset_value ( f , priorVXR + 12 , 8 , vxroffset )
# VDR ' s VXRtail
self . _update_offset_value ( f , currentVDR + 36 , 8 , vxroffset )
return vxroffset
|
def arducopter_arm ( self ) :
'''arm motors ( arducopter only )'''
|
if self . mavlink10 ( ) :
self . mav . command_long_send ( self . target_system , # target _ system
self . target_component , mavlink . MAV_CMD_COMPONENT_ARM_DISARM , # command
0 , # confirmation
1 , # param1 ( 1 to indicate arm )
0 , # param2 ( all other params meaningless )
0 , # param3
0 , # param4
0 , # param5
0 , # param6
0 )
|
def _dispatch ( self , input_batch : List [ SingleQuery ] ) :
"""Helper method to dispatch a batch of input to self . serve _ method ."""
|
method = getattr ( self , self . serve_method )
if hasattr ( method , "ray_serve_batched_input" ) :
batch = [ inp . data for inp in input_batch ]
result = _execute_and_seal_error ( method , batch , self . serve_method )
for res , inp in zip ( result , input_batch ) :
ray . worker . global_worker . put_object ( inp . result_object_id , res )
else :
for inp in input_batch :
result = _execute_and_seal_error ( method , inp . data , self . serve_method )
ray . worker . global_worker . put_object ( inp . result_object_id , result )
|
def add ( queue_name , payload = None , content_type = None , source = None , task_id = None , build_id = None , release_id = None , run_id = None ) :
"""Adds a work item to a queue .
Args :
queue _ name : Name of the queue to add the work item to .
payload : Optional . Payload that describes the work to do as a string .
If not a string and content _ type is not provided , then this
function assumes the payload is a JSON - able Python object .
content _ type : Optional . Content type of the payload .
source : Optional . Who or what originally created the task .
task _ id : Optional . When supplied , only enqueue this task if a task
with this ID does not already exist . If a task with this ID already
exists , then this function will do nothing .
build _ id : Build ID to associate with this task . May be None .
release _ id : Release ID to associate with this task . May be None .
run _ id : Run ID to associate with this task . May be None .
Returns :
ID of the task that was added ."""
|
if task_id :
task = WorkQueue . query . filter_by ( task_id = task_id ) . first ( )
if task :
return task . task_id
else :
task_id = uuid . uuid4 ( ) . hex
if payload and not content_type and not isinstance ( payload , basestring ) :
payload = json . dumps ( payload )
content_type = 'application/json'
now = datetime . datetime . utcnow ( )
task = WorkQueue ( task_id = task_id , queue_name = queue_name , eta = now , source = source , build_id = build_id , release_id = release_id , run_id = run_id , payload = payload , content_type = content_type )
db . session . add ( task )
return task . task_id
|
def build ( self , filename , bytecode_compile = True ) :
"""Package the PEX into a zipfile .
: param filename : The filename where the PEX should be stored .
: param bytecode _ compile : If True , precompile . py files into . pyc files .
If the PEXBuilder is not yet frozen , it will be frozen by ` ` build ` ` . This renders the
PEXBuilder immutable ."""
|
if not self . _frozen :
self . freeze ( bytecode_compile = bytecode_compile )
try :
os . unlink ( filename + '~' )
self . _logger . warn ( 'Previous binary unexpectedly exists, cleaning: %s' % ( filename + '~' ) )
except OSError : # The expectation is that the file does not exist , so continue
pass
if os . path . dirname ( filename ) :
safe_mkdir ( os . path . dirname ( filename ) )
with open ( filename + '~' , 'ab' ) as pexfile :
assert os . path . getsize ( pexfile . name ) == 0
pexfile . write ( to_bytes ( '%s\n' % self . _shebang ) )
self . _chroot . zip ( filename + '~' , mode = 'a' )
if os . path . exists ( filename ) :
os . unlink ( filename )
os . rename ( filename + '~' , filename )
chmod_plus_x ( filename )
|
def add_data ( self , new_cols = None ) :
"""Adds a column with the requested data .
If you want to see for example the mass , the colormap used in
jmol and the block of the element , just use : :
[ ' mass ' , ' jmol _ color ' , ' block ' ]
The underlying ` ` pd . DataFrame ` ` can be accessed with
` ` constants . elements ` ` .
To see all available keys use ` ` constants . elements . info ( ) ` ` .
The data comes from the module ` mendeleev
< http : / / mendeleev . readthedocs . org / en / latest / > ` _ written
by Lukasz Mentel .
Please note that I added three columns to the mendeleev data : :
[ ' atomic _ radius _ cc ' , ' atomic _ radius _ gv ' , ' gv _ color ' ,
' valency ' ]
The ` ` atomic _ radius _ cc ` ` is used by default by this module
for determining bond lengths .
The three others are taken from the MOLCAS grid viewer written
by Valera Veryazov .
Args :
new _ cols ( str ) : You can pass also just one value .
E . g . ` ` ' mass ' ` ` is equivalent to ` ` [ ' mass ' ] ` ` . If
` ` new _ cols ` ` is ` ` None ` ` all available data
is returned .
inplace ( bool ) :
Returns :
Cartesian :"""
|
atoms = self [ 'atom' ]
data = constants . elements
if pd . api . types . is_list_like ( new_cols ) :
new_cols = set ( new_cols )
elif new_cols is None :
new_cols = set ( data . columns )
else :
new_cols = [ new_cols ]
new_frame = data . loc [ atoms , set ( new_cols ) - set ( self . columns ) ]
new_frame . index = self . index
return self . __class__ ( pd . concat ( [ self . _frame , new_frame ] , axis = 1 ) )
|
def AsRegEx ( self ) :
"""Return the current glob as a simple regex .
Note : No interpolation is performed .
Returns :
A RegularExpression ( ) object ."""
|
parts = self . __class__ . REGEX_SPLIT_PATTERN . split ( self . _value )
result = u"" . join ( self . _ReplaceRegExPart ( p ) for p in parts )
return rdf_standard . RegularExpression ( u"(?i)\\A%s\\Z" % result )
|
def add_issue_status ( self , name , ** attrs ) :
"""Add a Issue status to the project and returns a
: class : ` IssueStatus ` object .
: param name : name of the : class : ` IssueStatus `
: param attrs : optional attributes for : class : ` IssueStatus `"""
|
return IssueStatuses ( self . requester ) . create ( self . id , name , ** attrs )
|
def find_unique_values ( input_file , property_name ) :
'''Find unique values of a given property in a geojson file .
Args
input _ file ( str ) : File name .
property _ name ( str ) : Property name .
Returns
List of distinct values of property . If property does not exist , it returns None .'''
|
with open ( input_file ) as f :
feature_collection = geojson . load ( f )
features = feature_collection [ 'features' ]
values = np . array ( [ feat [ 'properties' ] . get ( property_name ) for feat in features ] )
return np . unique ( values )
|
def ximshow_file ( singlefile , args_cbar_label = None , args_cbar_orientation = None , args_z1z2 = None , args_bbox = None , args_firstpix = None , args_keystitle = None , args_ds9reg = None , args_geometry = "0,0,640,480" , pdf = None , show = True , debugplot = None , using_jupyter = False ) :
"""Function to execute ximshow ( ) as called from command line .
Parameters
singlefile : string
Name of the FITS file to be displayed .
args _ cbar _ label : string
Color bar label .
args _ cbar _ orientation : string
Color bar orientation : valid options are ' horizontal ' or
' vertical ' .
args _ z1z2 : string or None
String providing the image cuts tuple : z1 , z2 , minmax of None
args _ bbox : string or None
String providing the bounding box tuple : nc1 , nc2 , ns1 , ns2
args _ firstpix : string or None
String providing the coordinates of lower left pixel .
args _ keystitle : string or None
Tuple of FITS keywords . format : key1 , key2 , . . . , keyn . format
args _ ds9reg : file handler
Ds9 region file to be overplotted .
args _ geometry : string or None
Tuple x , y , dx , dy to define the window geometry . This
information is ignored if args _ pdffile is not None .
pdf : PdfFile object or None
If not None , output is sent to PDF file .
show : bool
If True , the function shows the displayed image . Otherwise
the function just invoke the plt . imshow ( ) function and
plt . show ( ) is expected to be executed outside .
debugplot : integer or None
Determines whether intermediate computations and / or plots
are displayed . The valid codes are defined in
numina . array . display . pause _ debugplot .
using _ jupyter : bool
If True , this function is called from a jupyter notebook .
Returns
ax : axes object
Matplotlib axes instance . This value is returned only when
' show ' is False ."""
|
# read z1 , z2
if args_z1z2 is None :
z1z2 = None
elif args_z1z2 == "minmax" :
z1z2 = "minmax"
else :
tmp_str = args_z1z2 . split ( "," )
z1z2 = float ( tmp_str [ 0 ] ) , float ( tmp_str [ 1 ] )
# read geometry
if args_geometry is None :
geometry = None
else :
tmp_str = args_geometry . split ( "," )
x_geom = int ( tmp_str [ 0 ] )
y_geom = int ( tmp_str [ 1 ] )
dx_geom = int ( tmp_str [ 2 ] )
dy_geom = int ( tmp_str [ 3 ] )
geometry = x_geom , y_geom , dx_geom , dy_geom
# read input FITS file
hdulist = fits . open ( singlefile )
image_header = hdulist [ 0 ] . header
image2d = hdulist [ 0 ] . data
hdulist . close ( )
naxis1 = image_header [ 'naxis1' ]
if 'naxis2' in image_header :
naxis2 = image_header [ 'naxis2' ]
else :
naxis2 = 1
# read wavelength calibration
if 'crpix1' in image_header :
crpix1 = image_header [ 'crpix1' ]
else :
crpix1 = None
if 'crval1' in image_header :
crval1 = image_header [ 'crval1' ]
else :
crval1 = None
if 'cdelt1' in image_header :
cdelt1 = image_header [ 'cdelt1' ]
else :
cdelt1 = None
# title for plot
title = singlefile
if args_keystitle is not None :
keystitle = args_keystitle
keysformat = "." . join ( keystitle . split ( "." ) [ 1 : ] )
keysnames = keystitle . split ( "." ) [ 0 ]
tuple_of_keyval = ( )
for key in keysnames . split ( "," ) :
keyval = image_header [ key ]
tuple_of_keyval += ( keyval , )
title += "\n" + str ( keysformat % tuple_of_keyval )
if len ( image2d . shape ) == 1 :
if image2d . shape != ( naxis1 , ) :
raise ValueError ( "Unexpected error with NAXIS1" )
image2d = np . reshape ( image2d , ( 1 , naxis1 ) )
elif len ( image2d . shape ) == 2 :
if image2d . shape != ( naxis2 , naxis1 ) :
raise ValueError ( "Unexpected error with NAXIS1, NAXIS2" )
else :
raise ValueError ( "Unexpected number of dimensions > 2" )
print ( '>>> File..:' , singlefile )
print ( '>>> NAXIS1:' , naxis1 )
print ( '>>> NAXIS2:' , naxis2 )
# read bounding box
if args_bbox is None :
nc1 = 1
nc2 = naxis1
ns1 = 1
ns2 = naxis2
else :
tmp_bbox = args_bbox . split ( "," )
nc1 = int ( tmp_bbox [ 0 ] )
nc2 = int ( tmp_bbox [ 1 ] )
ns1 = int ( tmp_bbox [ 2 ] )
ns2 = int ( tmp_bbox [ 3 ] )
if nc1 < 1 :
nc1 = 1
if nc2 > naxis1 :
nc2 = naxis1
if ns1 < 1 :
ns1 = 1
if ns2 > naxis2 :
ns2 = naxis2
# read coordinates of lower left pixel
if args_firstpix is None :
nc0 = 1
ns0 = 1
else :
tmp_firstpix = args_firstpix . split ( "," )
nc0 = int ( tmp_firstpix [ 0 ] )
ns0 = int ( tmp_firstpix [ 1 ] )
# display image
ax = ximshow ( image2d = image2d , show = False , cbar_label = args_cbar_label , cbar_orientation = args_cbar_orientation , title = title , z1z2 = z1z2 , image_bbox = ( nc1 , nc2 , ns1 , ns2 ) , first_pixel = ( nc0 , ns0 ) , crpix1 = crpix1 , crval1 = crval1 , cdelt1 = cdelt1 , ds9regfile = args_ds9reg , geometry = geometry , debugplot = debugplot , using_jupyter = using_jupyter )
if pdf is not None :
if show :
pdf . savefig ( )
else :
return ax
else :
if show :
pause_debugplot ( debugplot , pltshow = True )
else : # return axes
return ax
|
def walk ( self , dag , walk_func ) :
"""Walks each node of the graph , in parallel if it can .
The walk _ func is only called when the nodes dependencies have been
satisfied"""
|
# First , we ' ll topologically sort all of the nodes , with nodes that
# have no dependencies first . We do this to ensure that we don ' t call
# . join on a thread that hasn ' t yet been started .
# TODO ( ejholmes ) : An alternative would be to ensure that Thread . join
# blocks if the thread has not yet been started .
nodes = dag . topological_sort ( )
nodes . reverse ( )
# This maps a node name to a thread of execution .
threads = { }
# Blocks until all of the given nodes have completed execution ( whether
# successfully , or errored ) . Returns True if all nodes returned True .
def wait_for ( nodes ) :
for node in nodes :
thread = threads [ node ]
while thread . is_alive ( ) :
threads [ node ] . join ( 0.5 )
# For each node in the graph , we ' re going to allocate a thread to
# execute . The thread will block executing walk _ func , until all of the
# nodes dependencies have executed .
for node in nodes :
def fn ( n , deps ) :
if deps :
logger . debug ( "%s waiting for %s to complete" , n , ", " . join ( deps ) )
# Wait for all dependencies to complete .
wait_for ( deps )
logger . debug ( "%s starting" , n )
self . semaphore . acquire ( )
try :
return walk_func ( n )
finally :
self . semaphore . release ( )
deps = dag . all_downstreams ( node )
threads [ node ] = Thread ( target = fn , args = ( node , deps ) , name = node )
# Start up all of the threads .
for node in nodes :
threads [ node ] . start ( )
# Wait for all threads to complete executing .
wait_for ( nodes )
|
def login ( access_code : str , client_id : str = CLIENT_ID , client_secret : str = CLIENT_SECRET , headers : dict = HEADERS , redirect_uri : str = REDIRECT_URI ) :
"""Get access _ token fron an user authorized code , the client id and the client secret key .
( See https : / / developer . google . com / v3 / oauth / # web - application - flow ) ."""
|
if not CLIENT_ID or not CLIENT_SECRET :
raise GoogleApiError ( { "error_message" : _ ( "Login with google account is disabled. Contact " "with the sysadmins. Maybe they're snoozing in a " "secret hideout of the data center." ) } )
url = _build_url ( "login" , "access-token" )
params = { "code" : access_code , "client_id" : client_id , "client_secret" : client_secret , "grant_type" : "authorization_code" , "redirect_uri" : redirect_uri }
data = _post ( url , params = params , headers = headers )
return AuthInfo ( access_token = data . get ( "access_token" , None ) )
|
def app_stop ( device_id , app_id ) :
"""stops an app with corresponding package name"""
|
if not is_valid_app_id ( app_id ) :
abort ( 403 )
if not is_valid_device_id ( device_id ) :
abort ( 403 )
if device_id not in devices :
abort ( 404 )
success = devices [ device_id ] . stop_app ( app_id )
return jsonify ( success = success )
|
def _get_rating ( self , entry ) :
"""Get the rating and share for a specific row"""
|
r_info = ''
for string in entry [ 2 ] . strings :
r_info += string
rating , share = r_info . split ( '/' )
return ( rating , share . strip ( '*' ) )
|
def code_constants ( self ) :
"""All of the constants that are used by this functions ' s code ."""
|
# TODO : remove link register values
return [ const . value for block in self . blocks for const in block . vex . constants ]
|
def write_csvs ( dirname : PathLike , adata : AnnData , skip_data : bool = True , sep : str = ',' ) :
"""See : meth : ` ~ anndata . AnnData . write _ csvs ` ."""
|
dirname = Path ( dirname )
if dirname . suffix == '.csv' :
dirname = dirname . with_suffix ( '' )
logger . info ( "writing '.csv' files to %s" , dirname )
if not dirname . is_dir ( ) :
dirname . mkdir ( parents = True , exist_ok = True )
dir_uns = dirname / 'uns'
if not dir_uns . is_dir ( ) :
dir_uns . mkdir ( parents = True , exist_ok = True )
d = dict ( obs = adata . _obs , var = adata . _var , obsm = adata . _obsm . to_df ( ) , varm = adata . _varm . to_df ( ) , )
if not skip_data :
d [ 'X' ] = pd . DataFrame ( adata . _X . toarray ( ) if issparse ( adata . _X ) else adata . _X )
d_write = { ** d , ** adata . _uns }
not_yet_raised_sparse_warning = True
for key , value in d_write . items ( ) :
if issparse ( value ) :
if not_yet_raised_sparse_warning :
warnings . warn ( 'Omitting to write sparse annotation.' )
not_yet_raised_sparse_warning = False
continue
filename = dirname
if key not in { 'X' , 'var' , 'obs' , 'obsm' , 'varm' } :
filename = dir_uns
filename /= '{}.csv' . format ( key )
df = value
if not isinstance ( value , pd . DataFrame ) :
value = np . array ( value )
if np . ndim ( value ) == 0 :
value = value [ None ]
try :
df = pd . DataFrame ( value )
except Exception as e :
warnings . warn ( 'Omitting to write {!r}.' . format ( key ) , type ( e ) )
continue
df . to_csv ( filename , sep = sep , header = key in { 'obs' , 'var' , 'obsm' , 'varm' } , index = key in { 'obs' , 'var' } , )
|
def get_all_context_names ( context_num ) :
"""Based on the nucleotide base context number , return
a list of strings representing each context .
Parameters
context _ num : int
number representing the amount of nucleotide base context to use .
Returns
a list of strings containing the names of the base contexts"""
|
if context_num == 0 :
return [ 'None' ]
elif context_num == 1 :
return [ 'A' , 'C' , 'T' , 'G' ]
elif context_num == 1.5 :
return [ 'C*pG' , 'CpG*' , 'TpC*' , 'G*pA' , 'A' , 'C' , 'T' , 'G' ]
elif context_num == 2 :
dinucs = list ( set ( [ d1 + d2 for d1 in 'ACTG' for d2 in 'ACTG' ] ) )
return dinucs
elif context_num == 3 :
trinucs = list ( set ( [ t1 + t2 + t3 for t1 in 'ACTG' for t2 in 'ACTG' for t3 in 'ACTG' ] ) )
return trinucs
|
def get_content_string ( self ) :
"""Ge thet Clusterpoint response ' s content as a string ."""
|
return '' . join ( [ ET . tostring ( element , encoding = "utf-8" , method = "xml" ) for element in list ( self . _content ) ] )
|
def get_l ( self ) :
"""Get Galactic Longitude ( l ) corresponding to the current position
: return : Galactic Longitude"""
|
try :
return self . l . value
except AttributeError : # Transform from L , B to R . A . , Dec
return self . sky_coord . transform_to ( 'galactic' ) . l . value
|
def optimize ( self , angles0 , target ) :
"""Calculate an optimum argument of an objective function ."""
|
def new_objective ( angles ) :
a = angles - angles0
if isinstance ( self . smooth_factor , ( np . ndarray , list ) ) :
if len ( a ) == len ( self . smooth_factor ) :
return ( self . f ( angles , target ) + np . sum ( self . smooth_factor * np . power ( a , 2 ) ) )
else :
raise ValueError ( 'len(smooth_factor) != number of joints' )
else :
return ( self . f ( angles , target ) + self . smooth_factor * np . sum ( np . power ( a , 2 ) ) )
return scipy . optimize . minimize ( new_objective , angles0 , ** self . optimizer_opt ) . x
|
def declared_symbols ( self ) :
"""Return all local symbols here , and also of the parents"""
|
return self . local_declared_symbols | ( self . parent . declared_symbols if self . parent else set ( ) )
|
def refresh ( self ) :
"""Refresh an existing lock to prevent it from expiring .
Uses a LUA ( EVAL ) script to ensure only a lock which we own is being overwritten .
Returns True if refresh succeeded , False if not ."""
|
keys = [ self . name ]
args = [ self . value , self . timeout ]
# Redis docs claim EVALs are atomic , and I ' m inclined to believe it .
if hasattr ( self , '_refresh_script' ) :
return self . _refresh_script ( keys = keys , args = args ) == 1
else :
keys_and_args = keys + args
return self . redis . eval ( self . lua_refresh , len ( keys ) , * keys_and_args )
|
def camel_to_title ( name ) :
"""Takes a camelCaseFieldName and returns an Title Case Field Name
Args :
name ( str ) : E . g . camelCaseFieldName
Returns :
str : Title Case converted name . E . g . Camel Case Field Name"""
|
split = re . findall ( r"[A-Z]?[a-z0-9]+|[A-Z]+(?=[A-Z]|$)" , name )
ret = " " . join ( split )
ret = ret [ 0 ] . upper ( ) + ret [ 1 : ]
return ret
|
def handle_event ( self , package ) :
'''Handle an event from the epull _ sock ( all local minion events )'''
|
if not self . ready :
raise tornado . gen . Return ( )
tag , data = salt . utils . event . SaltEvent . unpack ( package )
log . debug ( 'Minion of \'%s\' is handling event tag \'%s\'' , self . opts [ 'master' ] , tag )
tag_functions = { 'beacons_refresh' : self . _handle_tag_beacons_refresh , 'environ_setenv' : self . _handle_tag_environ_setenv , 'fire_master' : self . _handle_tag_fire_master , 'grains_refresh' : self . _handle_tag_grains_refresh , 'matchers_refresh' : self . _handle_tag_matchers_refresh , 'manage_schedule' : self . _handle_tag_manage_schedule , 'manage_beacons' : self . _handle_tag_manage_beacons , '_minion_mine' : self . _handle_tag_minion_mine , 'module_refresh' : self . _handle_tag_module_refresh , 'pillar_refresh' : self . _handle_tag_pillar_refresh , 'salt/auth/creds' : self . _handle_tag_salt_auth_creds , '_salt_error' : self . _handle_tag_salt_error , '__schedule_return' : self . _handle_tag_schedule_return , master_event ( type = 'disconnected' ) : self . _handle_tag_master_disconnected_failback , master_event ( type = 'failback' ) : self . _handle_tag_master_disconnected_failback , master_event ( type = 'connected' ) : self . _handle_tag_master_connected , }
# Run the appropriate function
for tag_function in tag_functions :
if tag . startswith ( tag_function ) :
tag_functions [ tag_function ] ( tag , data )
|
def multi ( children , quiet_exceptions = ( ) ) :
"""Runs multiple asynchronous operations in parallel .
` ` children ` ` may either be a list or a dict whose values are
yieldable objects . ` ` multi ( ) ` ` returns a new yieldable
object that resolves to a parallel structure containing their
results . If ` ` children ` ` is a list , the result is a list of
results in the same order ; if it is a dict , the result is a dict
with the same keys .
That is , ` ` results = yield multi ( list _ of _ futures ) ` ` is equivalent
to : :
results = [ ]
for future in list _ of _ futures :
results . append ( yield future )
If any children raise exceptions , ` ` multi ( ) ` ` will raise the first
one . All others will be logged , unless they are of types
contained in the ` ` quiet _ exceptions ` ` argument .
If any of the inputs are ` YieldPoints < YieldPoint > ` , the returned
yieldable object is a ` YieldPoint ` . Otherwise , returns a ` . Future ` .
This means that the result of ` multi ` can be used in a native
coroutine if and only if all of its children can be .
In a ` ` yield ` ` - based coroutine , it is not normally necessary to
call this function directly , since the coroutine runner will
do it automatically when a list or dict is yielded . However ,
it is necessary in ` ` await ` ` - based coroutines , or to pass
the ` ` quiet _ exceptions ` ` argument .
This function is available under the names ` ` multi ( ) ` ` and ` ` Multi ( ) ` `
for historical reasons .
. . versionchanged : : 4.2
If multiple yieldables fail , any exceptions after the first
( which is raised ) will be logged . Added the ` ` quiet _ exceptions ` `
argument to suppress this logging for selected exception types .
. . versionchanged : : 4.3
Replaced the class ` ` Multi ` ` and the function ` ` multi _ future ` `
with a unified function ` ` multi ` ` . Added support for yieldables
other than ` YieldPoint ` and ` . Future ` ."""
|
if _contains_yieldpoint ( children ) :
return MultiYieldPoint ( children , quiet_exceptions = quiet_exceptions )
else :
return multi_future ( children , quiet_exceptions = quiet_exceptions )
|
def udf_signature ( input_type , pin , klass ) :
"""Compute the appropriate signature for a
: class : ` ~ ibis . expr . operations . Node ` from a list of input types
` input _ type ` .
Parameters
input _ type : List [ ibis . expr . datatypes . DataType ]
A list of : class : ` ~ ibis . expr . datatypes . DataType ` instances representing
the signature of a UDF / UDAF .
pin : Optional [ int ]
If this is not None , pin the ` pin ` - th argument type to ` klass `
klass : Union [ Type [ pd . Series ] , Type [ SeriesGroupBy ] ]
The pandas object that every argument type should contain
Returns
Tuple [ Type ]
A tuple of types appropriate for use in a multiple dispatch signature .
Examples
> > > from pprint import pprint
> > > import pandas as pd
> > > from pandas . core . groupby import SeriesGroupBy
> > > import ibis . expr . datatypes as dt
> > > input _ type = [ dt . string , dt . double ]
> > > sig = udf _ signature ( input _ type , pin = None , klass = pd . Series )
> > > pprint ( sig ) # doctest : + ELLIPSIS
( ( < class ' . . . Series ' > , < . . . ' . . . str . . . ' > , < . . . ' NoneType ' > ) ,
( < class ' . . . Series ' > ,
< . . . ' float ' > ,
< . . . ' numpy . floating ' > ,
< . . . ' NoneType ' > ) )
> > > not _ nullable _ types = [
. . . dt . String ( nullable = False ) , dt . Double ( nullable = False ) ]
> > > sig = udf _ signature ( not _ nullable _ types , pin = None , klass = pd . Series )
> > > pprint ( sig ) # doctest : + ELLIPSIS
( ( < class ' . . . Series ' > , < . . . ' . . . str . . . ' > ) ,
( < class ' . . . Series ' > ,
< . . . ' float ' > ,
< . . . ' numpy . floating ' > ) )
> > > sig0 = udf _ signature ( input _ type , pin = 0 , klass = SeriesGroupBy )
> > > sig1 = udf _ signature ( input _ type , pin = 1 , klass = SeriesGroupBy )
> > > pprint ( sig0 ) # doctest : + ELLIPSIS
( < class ' . . . SeriesGroupBy ' > ,
( < class ' . . . SeriesGroupBy ' > ,
< . . . ' float ' > ,
< . . . ' numpy . floating ' > ,
< . . . ' NoneType ' > ) )
> > > pprint ( sig1 ) # doctest : + ELLIPSIS
( ( < class ' . . . SeriesGroupBy ' > ,
< . . . ' . . . str . . . ' > ,
< . . . ' NoneType ' > ) ,
< class ' . . . SeriesGroupBy ' > )"""
|
nargs = len ( input_type )
if not nargs :
return ( )
if nargs == 1 :
r , = input_type
result = ( klass , ) + rule_to_python_type ( r ) + nullable ( r )
return ( result , )
return tuple ( klass if pin is not None and pin == i else ( ( klass , ) + rule_to_python_type ( r ) + nullable ( r ) ) for i , r in enumerate ( input_type ) )
|
def ExtractEvents ( self , parser_mediator , registry_key , ** kwargs ) :
"""Extracts events from a Windows Registry key .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
registry _ key ( dfwinreg . WinRegistryKey ) : Windows Registry key ."""
|
version_value = registry_key . GetValueByName ( 'Version' )
count_subkey = registry_key . GetSubkeyByName ( 'Count' )
if not version_value :
parser_mediator . ProduceExtractionWarning ( 'missing version value' )
return
if not version_value . DataIsInteger ( ) :
parser_mediator . ProduceExtractionWarning ( 'unsupported version value data type' )
return
format_version = version_value . GetDataAsObject ( )
if format_version not in ( 3 , 5 ) :
parser_mediator . ProduceExtractionWarning ( 'unsupported format version: {0:d}' . format ( format_version ) )
return
if not count_subkey :
parser_mediator . ProduceExtractionWarning ( 'missing count subkey' )
return
userassist_entry_index = 0
for registry_value in count_subkey . GetValues ( ) :
try : # Note that Python 2 codecs . decode ( ) does not support keyword arguments
# such as encodings = ' rot - 13 ' .
value_name = codecs . decode ( registry_value . name , 'rot-13' )
except UnicodeEncodeError as exception :
logger . debug ( ( 'Unable to decode UserAssist string: {0:s} with error: {1!s}.\n' 'Attempting piecewise decoding.' ) . format ( registry_value . name , exception ) )
characters = [ ]
for char in registry_value . name :
if ord ( char ) < 128 :
try :
characters . append ( char . decode ( 'rot-13' ) )
except UnicodeEncodeError :
characters . append ( char )
else :
characters . append ( char )
value_name = '' . join ( characters )
if format_version == 5 :
path_segments = value_name . split ( '\\' )
for segment_index , path_segment in enumerate ( path_segments ) : # Remove the { } from the path segment to get the GUID .
guid = path_segments [ segment_index ] [ 1 : - 1 ]
path_segments [ segment_index ] = known_folder_ids . PATHS . get ( guid , path_segment )
value_name = '\\' . join ( path_segments )
# Check if we might need to substitute values .
if '%' in value_name : # TODO : fix missing self . _ knowledge _ base
# pylint : disable = no - member
environment_variables = self . _knowledge_base . GetEnvironmentVariables ( )
value_name = path_helper . PathHelper . ExpandWindowsPath ( value_name , environment_variables )
if value_name == 'UEME_CTLSESSION' :
continue
if format_version == 3 :
entry_map = self . _GetDataTypeMap ( 'user_assist_entry_v3' )
elif format_version == 5 :
entry_map = self . _GetDataTypeMap ( 'user_assist_entry_v5' )
else :
parser_mediator . ProduceExtractionWarning ( 'unsupported format version: {0:d}' . format ( format_version ) )
continue
if not registry_value . DataIsBinaryData ( ) :
parser_mediator . ProduceExtractionWarning ( 'unsupported value data type: {0:s}' . format ( registry_value . data_type_string ) )
continue
entry_data_size = entry_map . GetByteSize ( )
value_data_size = len ( registry_value . data )
if entry_data_size != value_data_size :
parser_mediator . ProduceExtractionWarning ( 'unsupported value data size: {0:d}' . format ( value_data_size ) )
continue
try :
user_assist_entry = self . _ReadStructureFromByteStream ( registry_value . data , 0 , entry_map )
except ( ValueError , errors . ParseError ) as exception :
parser_mediator . ProduceExtractionWarning ( 'unable to parse UserAssist entry value with error: {0!s}' . format ( exception ) )
continue
event_data = UserAssistWindowsRegistryEventData ( )
event_data . key_path = count_subkey . path
event_data . number_of_executions = user_assist_entry . number_of_executions
event_data . value_name = value_name
if format_version == 3 :
if event_data . number_of_executions > 5 :
event_data . number_of_executions -= 5
elif format_version == 5 :
userassist_entry_index += 1
event_data . application_focus_count = ( user_assist_entry . application_focus_count )
event_data . application_focus_duration = ( user_assist_entry . application_focus_duration )
event_data . entry_index = userassist_entry_index
timestamp = user_assist_entry . last_execution_time
if not timestamp :
date_time = dfdatetime_semantic_time . SemanticTime ( 'Not set' )
else :
date_time = dfdatetime_filetime . Filetime ( timestamp = timestamp )
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_LAST_RUN )
parser_mediator . ProduceEventWithEventData ( event , event_data )
|
def package_version ( ) :
"""Get the package version via Git Tag ."""
|
version_path = os . path . join ( os . path . dirname ( __file__ ) , 'version.py' )
version = read_version ( version_path )
write_version ( version_path , version )
return version
|
def sign ( self , data : bytes , v : int = 27 ) -> Signature :
"""Sign data hash with local private key"""
|
assert v in ( 0 , 27 ) , 'Raiden is only signing messages with v in (0, 27)'
_hash = eth_sign_sha3 ( data )
signature = self . private_key . sign_msg_hash ( message_hash = _hash )
sig_bytes = signature . to_bytes ( )
# adjust last byte to v
return sig_bytes [ : - 1 ] + bytes ( [ sig_bytes [ - 1 ] + v ] )
|
def create_on_demand ( self , instance_type = 'default' , tags = None , root_device_type = 'ebs' , size = 'default' , vol_type = 'gp2' , delete_on_termination = False ) :
"""Create one or more EC2 on - demand instances .
: param size : Size of root device
: type size : int
: param delete _ on _ termination :
: type delete _ on _ termination : boolean
: param vol _ type :
: type vol _ type : str
: param root _ device _ type : The type of the root device .
: type root _ device _ type : str
: param instance _ type : A section name in amazon . json
: type instance _ type : str
: param tags :
: type tags : dict
: return : List of instances created
: rtype : list"""
|
name , size = self . _get_default_name_size ( instance_type , size )
if root_device_type == 'ebs' :
self . images [ instance_type ] [ 'block_device_map' ] = self . _configure_ebs_volume ( vol_type , name , size , delete_on_termination )
reservation = self . ec2 . run_instances ( ** self . images [ instance_type ] )
logger . info ( 'Creating requested tags...' )
for i in reservation . instances :
self . retry_on_ec2_error ( self . ec2 . create_tags , [ i . id ] , tags or { } )
instances = [ ]
logger . info ( 'Waiting for instances to become ready...' )
while len ( reservation . instances ) : # pylint : disable = len - as - condition
for i in reservation . instances :
if i . state == 'running' :
instances . append ( i )
reservation . instances . pop ( reservation . instances . index ( i ) )
logger . info ( '%s is %s at %s (%s)' , i . id , i . state , i . public_dns_name , i . ip_address )
else :
self . retry_on_ec2_error ( i . update )
return instances
|
def validate_pathname ( filepath ) :
'''检查路径中是否包含特殊字符 .
百度网盘对路径 / 文件名的要求很严格 :
1 . 路径长度限制为1000
2 . 路径中不能包含以下字符 : \\ ? | " > < : *
3 . 文件名或路径名开头结尾不能是 “ . ” 或空白字符 , 空白字符包括 : \r , \n , \t , 空格 , \0 , \x0B
@ return , 返回的状态码 : 0 表示正常'''
|
if filepath == '/' :
return ValidatePathState . OK
if len ( filepath ) > 1000 :
return ValidatePathState . LENGTH_ERROR
filter2 = '\\?|"><:*'
for c in filter2 :
if c in filepath :
return ValidatePathState . CHAR_ERROR2
paths = rec_split_path ( filepath )
filter3 = '.\r\n\t \0\x0b'
for path in paths :
if path [ 0 ] in filter3 or path [ - 1 ] in filter3 :
return ValidatePathState . CHAR_ERROR3
return ValidatePathState . OK
|
def distribute_batches ( self , indices ) :
"""Assigns batches to workers .
Consecutive ranks are getting consecutive batches .
: param indices : torch . tensor with batch indices"""
|
assert len ( indices ) == self . num_samples
indices = indices . view ( - 1 , self . batch_size )
indices = indices [ self . rank : : self . world_size ] . contiguous ( )
indices = indices . view ( - 1 )
indices = indices . tolist ( )
assert len ( indices ) == self . num_samples // self . world_size
return indices
|
def raxisa ( matrix ) :
"""Compute the axis of the rotation given by an input matrix
and the angle of the rotation about that axis .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / raxisa _ c . html
: param matrix : Rotation matrix .
: type matrix : 3x3 - Element Array of floats
: return : Axis of the rotation , Angle through which the rotation is performed
: rtype : tuple"""
|
matrix = stypes . toDoubleMatrix ( matrix )
axis = stypes . emptyDoubleVector ( 3 )
angle = ctypes . c_double ( )
libspice . raxisa_c ( matrix , axis , ctypes . byref ( angle ) )
return stypes . cVectorToPython ( axis ) , angle . value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.