signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def _parse ( batch_cmd ) :
""": rtype : ( sh _ cmd , batch _ to _ file _ s , batch _ from _ file )
: returns : parsed result like below :
. . code - block : : python
# when parsing ' diff IN _ BATCH0 IN _ BATCH1 > OUT _ BATCH '
' diff / tmp / relshell - AbCDeF / tmp / relshell - uVwXyz ' ,
( < instance of BatchToFile > , < instance of BatchToFile > ) # ( IN _ BATCH0 , IN _ BATCH1)
' STDOUT ' ,"""
|
cmd_array = shlex . split ( batch_cmd )
( cmd_array , batch_to_file_s ) = BatchCommand . _parse_in_batches ( cmd_array )
( cmd_array , batch_from_file ) = BatchCommand . _parse_out_batch ( cmd_array )
return ( list2cmdline ( cmd_array ) , batch_to_file_s , batch_from_file )
|
def baseDomain ( domain , includeScheme = True ) :
"""Return only the network location portion of the given domain
unless includeScheme is True"""
|
result = ''
url = urlparse ( domain )
if includeScheme :
result = '%s://' % url . scheme
if len ( url . netloc ) == 0 :
result += url . path
else :
result += url . netloc
return result
|
def _check_sensor_platform_consistency ( self , sensor ) :
"""Make sure sensor and platform are consistent
Args :
sensor ( str ) : Sensor name from YAML dataset definition
Raises :
ValueError if they don ' t match"""
|
ref_sensor = SENSORS . get ( self . platform , None )
if ref_sensor and not sensor == ref_sensor :
logger . error ( 'Sensor-Platform mismatch: {} is not a payload ' 'of {}. Did you choose the correct reader?' . format ( sensor , self . platform ) )
|
def main ( ) :
"""main function"""
|
parser = argparse . ArgumentParser ( description = 'Github within the Command Line' )
group = parser . add_mutually_exclusive_group ( )
group . add_argument ( '-n' , '--url' , type = str , help = "Get repos from the user profile's URL" )
group . add_argument ( '-r' , '--recursive' , type = str , help = "Get the file structure from the repo link" )
group . add_argument ( '-R' , '--readme' , type = str , help = "Get the raw version of the repo readme from repo link" )
group . add_argument ( '-re' , '--releases' , type = str , help = "Get the list of releases from repo link" )
group . add_argument ( '-dt' , '--tarball' , type = str , help = "Download the tarball of the given repo" )
group . add_argument ( '-dz' , '--zipball' , type = str , help = "Download the zipball of the given repo" )
group . add_argument ( '-op' , '--openfile' , type = str , help = "Show the contents of the given file in a repo" )
group . add_argument ( '-f' , '--followers' , type = str , help = "Get followers of the user" )
group . add_argument ( '-fo' , '--following' , type = str , help = "Get people following the user" )
group . add_argument ( '-c' , '--contributors' , type = str , help = "Get contributors of a repo" )
if len ( sys . argv ) == 1 :
parser . print_help ( )
return
args = parser . parse_args ( )
# URL
if args . url :
name = url_parse ( args . url )
url = GITHUB_API + 'users/' + name + '/repos'
# TREE
if args . recursive :
name = url_parse ( args . recursive )
url = GITHUB_API + 'repos/' + name + '/branches/master'
response = get_req ( url )
jsondata = json . loads ( response )
sha = jsondata [ 'commit' ] [ 'commit' ] [ 'tree' ] [ 'sha' ]
url = GITHUB_API + 'repos/' + name + '/git/trees/' + sha + '?recursive=1'
# README
if args . readme :
name = url_parse ( args . readme )
url = GITHUB_API + 'repos/' + name + '/readme'
# RELEASES
if args . releases :
name = url_parse ( args . releases )
url = GITHUB_API + 'repos/' + name + '/releases'
# TARBALL / ZIPBALL
if args . tarball or args . zipball :
if args . tarball :
key = '/tarball/'
name = url_parse ( args . tarball )
if args . zipball :
key = '/zipball/'
name = url_parse ( args . zipball )
url = GITHUB_API + 'repos/' + name + key + 'master'
# OPEN ONE FILE
if args . openfile :
name = url_parse ( args . openfile )
position = name . find ( '/' )
user = name [ : position + 1 ]
rest = name [ position + 1 : ]
position = rest . find ( '/' )
repo = rest [ : position + 1 ]
rest = rest [ position + 1 : ]
url = GITHUB_API + 'repos/' + user + repo + 'contents/' + rest
# GET RESPONSES
# TARBALL / ZIPBALL
if args . tarball or args . zipball :
response_url = geturl_req ( url )
position = name . find ( '/' )
name = name [ position + 1 : ]
if args . tarball :
name = name + '.tar.gz'
if args . zipball :
name = name + '.zip'
print ( "\nDownloading " + name + '...\n' )
urllib . request . urlretrieve ( response_url , name )
print ( name + ' has been saved\n' )
return
# FOLLOWERS
if args . followers :
name = url_parse ( args . followers )
url = GITHUB_API + 'users/' + name + '/followers'
# FOLLOWING
if args . following :
name = url_parse ( args . following )
url = GITHUB_API + 'users/' + name + '/following'
# CONTRIBUTORS
if args . contributors :
name = url_parse ( args . contributors )
url = GITHUB_API + 'repos/' + name + '/contributors'
# OTHER OPTIONS
response = get_req ( url )
jsondata = json . loads ( response )
# USERNAME and URL
if args . url :
table = PrettyTable ( [ " Repository " , "★ Star" ] )
table . align [ " Repository " ] = "l"
for i in jsondata :
table . add_row ( [ i [ 'name' ] , i [ 'stargazers_count' ] ] )
print ( table )
# RECURSIVE TREE
if args . recursive :
table = PrettyTable ( [ " File/Folder " , " Size (Bytes) " ] )
table . align [ " File/Folder " ] = "l"
for i in jsondata [ 'tree' ] :
size = '-'
path = i [ 'path' ] + '/'
if i [ 'type' ] == 'blob' :
size = i [ 'size' ]
path = path [ : - 1 ]
table . add_row ( [ path , size ] )
print ( table )
# README
if args . readme :
print ( base64 . b64decode ( jsondata [ 'content' ] ) . decode ( 'utf-8' ) )
# RELEASES
if args . releases :
table = PrettyTable ( [ " Release name " , " Release Date " , " Release Time " ] )
for i in jsondata :
time = str ( dateutil . parser . parse ( i [ 'published_at' ] ) )
date = time [ : 10 ]
time = time [ 11 : ]
time = time [ : 5 ]
time = time + ' UTC'
table . add_row ( [ i [ 'tag_name' ] , date , time ] )
print ( table )
# OPEN ONE FILE
if args . openfile :
try :
print ( base64 . b64decode ( jsondata [ 'content' ] ) . decode ( 'utf-8' ) )
return
except :
print ( "\nDirectory URL was given, hence its contents will be displayed\n" )
table = PrettyTable ( [ "Folder Contents" ] )
for i in jsondata :
table . add_row ( [ i [ 'name' ] ] )
print ( table )
# GET FOLLOWERS
if args . followers :
table = PrettyTable ( [ " FOLLOWERS " ] )
table . align [ " FOLLOWERS " ] = "l"
for i in jsondata :
table . add_row ( [ i [ 'login' ] ] )
print ( "Number of followers:" + str ( len ( jsondata ) ) )
print ( table )
# GET FOLLOWING
if args . following :
table = PrettyTable ( [ " FOLLOWING " ] )
table . align [ " FOLLOWING " ] = "l"
for i in jsondata :
table . add_row ( [ i [ 'login' ] ] )
print ( "Number of following:" + str ( len ( jsondata ) ) )
print ( table )
# GET CONTRIBUTORS
if args . contributors :
table = PrettyTable ( [ " CONTRIBUTORS " ] )
table . align [ " CONTRIBUTORS " ] = "l"
for i in jsondata :
table . add_row ( [ i [ 'login' ] ] )
print ( "Number of contributors:" + str ( len ( jsondata ) ) )
print ( table )
|
def get_picture ( self , login = None , ** kwargs ) :
"""Get a user ' s picture .
: param str login : Login of the user to check
: return : JSON"""
|
_login = kwargs . get ( 'login' , login or self . _login )
_activities_url = PICTURE_URL . format ( login = _login )
return self . _request_api ( url = _activities_url ) . content
|
def frameify ( self , state , data ) :
"""Yield chunk data as a single frame , and buffer the rest ."""
|
# If we ' ve pulled in all the chunk data , buffer the data
if state . chunk_remaining <= 0 :
state . recv_buf += data
return
# Pull in any partially - processed data
data = state . recv_buf + data
# Determine how much belongs to the chunk
if len ( data ) <= state . chunk_remaining :
chunk = data
data = ''
else : # Pull out only what ' s part of the chunk
chunk = data [ : state . chunk_remaining ]
data = data [ state . chunk_remaining : ]
# Update the state
state . recv_buf = data
state . chunk_remaining -= len ( chunk )
# Yield the chunk
try :
yield chunk
except FrameSwitch :
pass
|
def move ( self , new_path , replication = None ) :
"""Move current path to a new location .
: param new _ path : target location of current file / directory
: param replication : number of replication"""
|
if not new_path . startswith ( '/' ) :
new_path = self . _normpath ( self . dirname + '/' + new_path )
else :
new_path = self . _normpath ( new_path )
if new_path == self . path :
raise ValueError ( 'New path should be different from the original one.' )
update_def = self . UpdateRequestXML ( path = new_path )
if replication :
update_def . replication = replication
headers = { 'Content-Type' : 'application/xml' , 'x-odps-volume-fs-path' : self . path , }
self . _client . put ( self . parent . resource ( ) , params = { 'meta' : '' } , headers = headers , data = update_def . serialize ( ) )
self . _del_cache ( self . path )
self . path = new_path
self . reload ( )
|
def _extract_from_object ( self , selector ) :
"""Extracts all values from ` self . obj ` object addressed with a ` selector ` .
Selector can be a ` ` slice ` ` , or a singular value extractor in form of a
valid dictionary key ( hashable object ) .
Object ( operated on ) can be anything with an itemgetter or attrgetter ,
including , but limited to ` dict ` , and ` list ` .
Itemgetter is preferred over attrgetter , except when called as ` . key ` .
If ` selector ` is a singular value extractor ( like a string , integer ,
etc ) , a single value ( for a given key ) is returned if key exists , an
empty list if not .
If ` selector ` is a ` ` slice ` ` , each key from that range is extracted ;
failing - back , again , to an empty list ."""
|
if isinstance ( selector , slice ) : # we must expand the slice manually , in order to be able to apply to
# for example , to mapping types , or general objects
# ( e . g . slice ` 4 : : 2 ` will filter all even numerical keys / attrs > = 4)
start = selector . start or 0
step = selector . step or 1
if selector . stop is None :
if hasattr ( self . obj , "keys" ) : # filter keys by slice
keys = [ k for k in self . obj . keys ( ) if isinstance ( k , baseinteger ) and k >= start and ( k - start ) % step == 0 ]
elif hasattr ( self . obj , "__len__" ) : # object we slice should have a length ( _ _ len _ _ method ) ,
keys = xrange ( start , len ( self . obj ) , step )
else : # otherwise , we don ' t know how to slice , so just skip it ,
# instead of failing
keys = [ ]
else :
keys = xrange ( start , selector . stop , step )
else :
keys = [ selector ]
res = [ ]
for key in keys :
self . _append ( self . obj , key , res )
return res
|
def publish ( self , key , data ) :
'''Publishes the data to the event stream .'''
|
publish_data = { key : data }
pub = salt . utils . json . dumps ( publish_data ) + str ( '\n\n' )
# future lint : disable = blacklisted - function
self . handler . write_message ( pub )
|
def get_initial ( self ) :
"""Returns the initial data to use for forms on this view ."""
|
initial = super ( ProjectCopy , self ) . get_initial ( )
if self . copy_object :
initial . update ( { 'name' : '%s copy' % self . copy_object . name , 'description' : self . copy_object . description , 'use_repo_fabfile' : self . copy_object . use_repo_fabfile , 'fabfile_requirements' : self . copy_object . fabfile_requirements , 'repo_url' : self . copy_object . repo_url } )
return initial
|
def get_workspaces ( self ) :
"""Get a list of workspaces . Returns JSON - like data , not a Con instance .
You might want to try the : meth : ` Con . workspaces ` instead if the info
contained here is too little .
: rtype : List of : class : ` WorkspaceReply ` ."""
|
data = self . message ( MessageType . GET_WORKSPACES , '' )
return json . loads ( data , object_hook = WorkspaceReply )
|
def choose_key ( gpg_private_keys ) :
"""Displays gpg key choice and returns key"""
|
uid_strings_fp = [ ]
uid_string_fp2key = { }
current_key_index = None
for i , key in enumerate ( gpg_private_keys ) :
fingerprint = key [ 'fingerprint' ]
if fingerprint == config [ "gpg_key_fingerprint" ] :
current_key_index = i
for uid_string in key [ 'uids' ] :
uid_string_fp = '"' + uid_string + ' (' + fingerprint + ')'
uid_strings_fp . append ( uid_string_fp )
uid_string_fp2key [ uid_string_fp ] = key
msg = _ ( 'Choose a GPG key for signing pyspread save files.\n' 'The GPG key must not have a passphrase set.' )
dlg = wx . SingleChoiceDialog ( None , msg , _ ( 'Choose key' ) , uid_strings_fp , wx . CHOICEDLG_STYLE )
childlist = list ( dlg . GetChildren ( ) )
childlist [ - 3 ] . SetLabel ( _ ( "Use chosen key" ) )
childlist [ - 2 ] . SetLabel ( _ ( "Create new key" ) )
if current_key_index is not None : # Set choice to current key
dlg . SetSelection ( current_key_index )
if dlg . ShowModal ( ) == wx . ID_OK :
uid_string_fp = dlg . GetStringSelection ( )
key = uid_string_fp2key [ uid_string_fp ]
else :
key = None
dlg . Destroy ( )
return key
|
async def section ( self , sec = None ) :
"""Section / dict serialization
: return :"""
|
if self . writing :
await dump_varint ( self . iobj , len ( sec ) )
for key in sec :
await self . section_name ( key )
await self . storage_entry ( sec [ key ] )
else :
sec = { } if sec is None else sec
count = await load_varint ( self . iobj )
for idx in range ( count ) :
sec_name = await self . section_name ( )
val = await self . storage_entry ( )
sec [ sec_name ] = val
return sec
|
def _set_network_options ( self ) :
"""Set up VMware networking ."""
|
# first some sanity checks
for adapter_number in range ( 0 , self . _adapters ) : # we want the vmnet interface to be connected when starting the VM
connected = "ethernet{}.startConnected" . format ( adapter_number )
if self . _get_vmx_setting ( connected ) :
del self . _vmx_pairs [ connected ]
# then configure VMware network adapters
self . manager . refresh_vmnet_list ( )
for adapter_number in range ( 0 , self . _adapters ) : # add / update the interface
if self . _adapter_type == "default" : # force default to e1000 because some guest OS don ' t detect the adapter ( i . e . Windows 2012 server )
# when ' virtualdev ' is not set in the VMX file .
adapter_type = "e1000"
else :
adapter_type = self . _adapter_type
ethernet_adapter = { "ethernet{}.present" . format ( adapter_number ) : "TRUE" , "ethernet{}.addresstype" . format ( adapter_number ) : "generated" , "ethernet{}.generatedaddressoffset" . format ( adapter_number ) : "0" , "ethernet{}.virtualdev" . format ( adapter_number ) : adapter_type }
self . _vmx_pairs . update ( ethernet_adapter )
connection_type = "ethernet{}.connectiontype" . format ( adapter_number )
if not self . _use_any_adapter and connection_type in self . _vmx_pairs and self . _vmx_pairs [ connection_type ] in ( "nat" , "bridged" , "hostonly" ) :
continue
self . _vmx_pairs [ "ethernet{}.connectiontype" . format ( adapter_number ) ] = "custom"
# make sure we have a vmnet per adapter if we use uBridge
allocate_vmnet = False
# first check if a vmnet is already assigned to the adapter
vnet = "ethernet{}.vnet" . format ( adapter_number )
if vnet in self . _vmx_pairs :
vmnet = os . path . basename ( self . _vmx_pairs [ vnet ] )
if self . manager . is_managed_vmnet ( vmnet ) or vmnet in ( "vmnet0" , "vmnet1" , "vmnet8" ) : # vmnet already managed , try to allocate a new one
allocate_vmnet = True
else : # otherwise allocate a new one
allocate_vmnet = True
if allocate_vmnet :
try :
vmnet = self . manager . allocate_vmnet ( )
except BaseException : # clear everything up in case of error ( e . g . no enough vmnets )
self . _vmnets . clear ( )
raise
# mark the vmnet managed by us
if vmnet not in self . _vmnets :
self . _vmnets . append ( vmnet )
self . _vmx_pairs [ "ethernet{}.vnet" . format ( adapter_number ) ] = vmnet
# disable remaining network adapters
for adapter_number in range ( self . _adapters , self . _maximum_adapters ) :
if self . _get_vmx_setting ( "ethernet{}.present" . format ( adapter_number ) , "TRUE" ) :
log . debug ( "disabling remaining adapter {}" . format ( adapter_number ) )
self . _vmx_pairs [ "ethernet{}.startconnected" . format ( adapter_number ) ] = "FALSE"
|
def add_url ( self , url , line = 0 , column = 0 , page = 0 , name = u"" , base = None ) :
"""If a local webroot directory is configured , replace absolute URLs
with it . After that queue the URL data for checking ."""
|
webroot = self . aggregate . config [ "localwebroot" ]
if webroot and url and url . startswith ( u"/" ) :
url = webroot + url [ 1 : ]
log . debug ( LOG_CHECK , "Applied local webroot `%s' to `%s'." , webroot , url )
super ( FileUrl , self ) . add_url ( url , line = line , column = column , page = page , name = name , base = base )
|
def _connect_syndic ( self , opts ) :
'''Create a syndic , and asynchronously connect it to a master'''
|
auth_wait = opts [ 'acceptance_wait_time' ]
failed = False
while True :
if failed :
if auth_wait < self . max_auth_wait :
auth_wait += self . auth_wait
log . debug ( "sleeping before reconnect attempt to %s [%d/%d]" , opts [ 'master' ] , auth_wait , self . max_auth_wait , )
yield tornado . gen . sleep ( auth_wait )
# TODO : log ?
log . debug ( 'Syndic attempting to connect to %s' , opts [ 'master' ] )
try :
syndic = Syndic ( opts , timeout = self . SYNDIC_CONNECT_TIMEOUT , safe = False , io_loop = self . io_loop , )
yield syndic . connect_master ( failed = failed )
# set up the syndic to handle publishes ( specifically not event forwarding )
syndic . tune_in_no_block ( )
# Send an event to the master that the minion is live
syndic . fire_master_syndic_start ( )
log . info ( 'Syndic successfully connected to %s' , opts [ 'master' ] )
break
except SaltClientError as exc :
failed = True
log . error ( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?' , opts [ 'master' ] )
except ( KeyboardInterrupt , SystemExit ) :
raise
except Exception :
failed = True
log . critical ( 'Unexpected error while connecting to %s' , opts [ 'master' ] , exc_info = True )
raise tornado . gen . Return ( syndic )
|
def execute_command ( command ) :
"""Execute a command and return its output"""
|
command = shlex . split ( command )
try :
process = subprocess . Popen ( command , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE , )
except FileNotFoundError :
raise RuntimeError ( "Command not found: {}" . format ( repr ( command ) ) )
process . wait ( )
# TODO : may use another codec to decode
if process . returncode > 0 :
stderr = process . stderr . read ( ) . decode ( "utf-8" )
raise ValueError ( "Error executing command: {}" . format ( repr ( stderr ) ) )
return process . stdout . read ( ) . decode ( "utf-8" )
|
def cases ( self ) :
"""All test cases created by the user"""
|
import nitrate
if self . _cases is None :
log . info ( u"Searching for cases created by {0}" . format ( self . user ) )
self . _cases = [ case for case in nitrate . TestCase . search ( author__email = self . user . email , create_date__gt = str ( self . options . since ) , create_date__lt = str ( self . options . until ) ) if case . status != nitrate . CaseStatus ( "DISABLED" ) ]
return self . _cases
|
def _save ( self , fn ) :
"""Persist the notebook to the given file .
: param fn : the file name"""
|
# create JSON object
j = json . dumps ( { 'description' : self . description ( ) , 'pending' : self . _pending , 'results' : self . _results } , indent = 4 , cls = MetadataEncoder )
# write to file
with open ( fn , 'w' ) as f :
f . write ( j )
|
def delete_table_rate_rule_by_id ( cls , table_rate_rule_id , ** kwargs ) :
"""Delete TableRateRule
Delete an instance of TableRateRule by its ID .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . delete _ table _ rate _ rule _ by _ id ( table _ rate _ rule _ id , async = True )
> > > result = thread . get ( )
: param async bool
: param str table _ rate _ rule _ id : ID of tableRateRule to delete . ( required )
: return : None
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _delete_table_rate_rule_by_id_with_http_info ( table_rate_rule_id , ** kwargs )
else :
( data ) = cls . _delete_table_rate_rule_by_id_with_http_info ( table_rate_rule_id , ** kwargs )
return data
|
def _clearQuantities ( self ) :
"""Computes the cleared quantities for each offer / bid according
to the dispatched output from the OPF solution ."""
|
generators = [ g for g in self . case . generators if not g . is_load ]
vLoads = [ g for g in self . case . generators if g . is_load ]
for g in generators :
self . _clearQuantity ( self . offers , g )
for vl in vLoads :
self . _clearQuantity ( self . bids , vl )
|
def main ( ) :
"""NAME
odp _ srm _ magic . py
DESCRIPTION
converts ODP measurement format files to magic _ measurements format files
SYNTAX
odp _ srm _ magic . py [ command line options ]
OPTIONS
- h : prints the help message and quits .
- F FILE : specify output measurements file , default is magic _ measurements . txt
- Fsa FILE : specify output er _ sample . txt file , default is er _ sample . txt
- A : don ' t average replicate measurements
INPUT
put data from a single core into a directory . depths will be below core top"""
|
version_num = pmag . get_version ( )
meas_file = 'magic_measurements.txt'
samp_file = 'er_samples.txt'
ErSpecs , ErSamps , ErSites , ErLocs , ErCits = [ ] , [ ] , [ ] , [ ] , [ ]
MagRecs = [ ]
citation = "This study"
dir_path , demag = '.' , 'NRM'
args = sys . argv
noave = 0 ,
if '-WD' in args :
ind = args . index ( "-WD" )
dir_path = args [ ind + 1 ]
if "-h" in args :
print ( main . __doc__ )
sys . exit ( )
if "-A" in args :
noave = 1
if '-F' in args :
ind = args . index ( "-F" )
meas_file = args [ ind + 1 ]
if '-Fsa' in args :
ind = args . index ( "-Fsa" )
samp_file = args [ ind + 1 ]
if '-LP' in args :
ind = args . index ( "-LP" )
codelist = args [ ind + 1 ]
codes = codelist . split ( ':' )
if "AF" in codes :
demag = 'AF'
if '-dc' not in args :
methcode = "LT-AF-Z"
if '-dc' in args :
methcode = "LT-AF-I"
if "T" in codes :
demag = "T"
if '-dc' not in args :
methcode = "LT-T-Z"
if '-dc' in args :
methcode = "LT-T-I"
if "I" in codes :
methcode = "LP-IRM"
if "S" in codes :
demag = "S"
methcode = "LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield = labfield
ans = input ( "DC lab field for ARM step: [50uT] " )
if ans == "" :
arm_labfield = 50e-6
else :
arm_labfield = float ( ans ) * 1e-6
ans = input ( "temperature for total trm step: [600 C] " )
if ans == "" :
trm_peakT = 600 + 273
# convert to kelvin
else :
trm_peakT = float ( ans ) + 273
# convert to kelvin
if "G" in codes :
methcode = "LT-AF-G"
if "D" in codes :
methcode = "LT-AF-D"
if "TRM" in codes :
demag = "T"
trm = 1
if demag == "T" and "ANI" in codes :
methcode = "LP-AN-TRM"
if demag == "AF" and "ANI" in codes :
methcode = "LP-AN-ARM"
if labfield == 0 :
labfield = 50e-6
if peakfield == 0 :
peakfield = .180
samp_file = dir_path + '/' + samp_file
meas_file = dir_path + '/' + meas_file
filelist = os . listdir ( dir_path )
# read in list of files to import
specimens , samples , sites = [ ] , [ ] , [ ]
MagRecs , SpecRecs , SampRecs = [ ] , [ ] , [ ]
for file in filelist : # parse each file
if file [ - 3 : ] . lower ( ) == 'srm' :
print ( 'processing: ' , file )
Nfo = file . split ( '_' ) [ 0 ] . split ( '-' )
try :
sect = int ( Nfo [ 3 ] [ : - 1 ] )
except :
sect = 1
input = open ( file , 'r' ) . readlines ( )
MagRec , SpecRec , SampRec = { } , { } , { }
alt_spec , treatment_type , treatment_value , user = "" , "" , "" , ""
inst = "ODP-SRM"
SampRec [ 'sample_azimuth' ] = '0'
SampRec [ 'sample_dip' ] = '0'
SampRec [ 'magic_method_code' ] = 'FS-C-DRILL-IODP:SP-SS-C'
MagRec [ 'er_analyst_mail_names' ] = user
MagRec [ 'magic_method_codes' ] = 'LT-NO'
MagRec [ 'magic_software_packages' ] = version_num
MagRec [ "treatment_temp" ] = '%8.3e' % ( 273 )
# room temp in kelvin
MagRec [ "measurement_temp" ] = '%8.3e' % ( 273 )
# room temp in kelvin
MagRec [ "treatment_ac_field" ] = '0'
MagRec [ "treatment_dc_field" ] = '0'
MagRec [ "treatment_dc_field_phi" ] = '0'
MagRec [ "treatment_dc_field_theta" ] = '0'
MagRec [ "measurement_flag" ] = 'g'
# assume all data are " good "
MagRec [ "measurement_standard" ] = 'u'
# assume all data are " good "
MagRec [ "measurement_csd" ] = ''
# set csd to blank
SpecRec [ 'er_specimen_alternatives' ] = alt_spec
vol = 7e-6
# assume 7 cc samples
datestamp = input [ 1 ] . split ( )
# date time is second line of file
mmddyy = datestamp [ 0 ] . split ( '/' )
# break into month day year
date = mmddyy [ 2 ] + ':' + mmddyy [ 0 ] + ":" + mmddyy [ 1 ] + ':' + datestamp [ 1 ]
MagRec [ "measurement_date" ] = date
treatment_value , inst = "" , "ODP-SRM"
k = 0
while 1 :
fields = input [ k ] . replace ( '\n' , '' ) . split ( "=" )
if 'treatment_type' in fields [ 0 ] :
if "Alternating Frequency Demagnetization" in fields [ 1 ] :
MagRec [ 'magic_method_codes' ] = 'LT-AF-Z'
inst = inst + ':ODP-DTECH'
# measured on shipboard AF DTECH D2000
if "treatment_value" in fields [ 0 ] :
value = fields [ 1 ]
if value != " " :
treatment_value = float ( value ) * 1e-3
MagRec [ "treatment_ac_field" ] = '%8.3e' % ( treatment_value )
# AF demag in treat mT = > T
if 'user' in fields [ 0 ] :
user = fields [ - 1 ]
MagRec [ "er_analyst_mail_names" ] = user
MagRec [ "measurement_standard" ] = 'u'
# assume all data are " good "
if 'sample_area' in fields [ 0 ] :
vol = float ( fields [ 1 ] ) * 1e-6
# takes volume ( cc ) and converts to m ^ 3
if 'run_number' in fields [ 0 ] :
MagRec [ 'external_database_ids' ] = fields [ 1 ]
# run number is the LIMS measurement number
MagRec [ 'external_database_names' ] = 'LIMS'
k += 1
if input [ k ] [ 0 : 7 ] == '<MULTI>' :
break
while 1 :
k += 1
line = input [ k ]
if line [ 0 : 5 ] == '<RAW>' :
break
treatment_value = ""
rec = line . replace ( '\n' , '' ) . split ( ',' )
# list of data
if len ( rec ) > 2 :
MeasRec , SampRec = { } , { 'core_depth' : '0' , 'er_sample_name' : '0' , 'er_site_name' : '0' , 'er_location_name' : 'location' }
for key in list ( MagRec . keys ( ) ) :
MeasRec [ key ] = MagRec [ key ]
for item in rec :
items = item . split ( '=' )
if 'demag_level' in items [ 0 ] :
treat = float ( items [ 1 ] )
if treat != 0 :
MeasRec [ 'magic_method_codes' ] = 'LT-AF-Z'
inst = inst + ':ODP-SRM-AF'
MeasRec [ "treatment_ac_field" ] = '%8.3e' % ( treat * 1e-3 )
# AF demag in treat mT = > T
if 'inclination_w_tray_w_bkgrd' in items [ 0 ] :
MeasRec [ 'measurement_inc' ] = items [ 1 ]
if 'declination_w_tray_w_bkgrd' in items [ 0 ] :
MeasRec [ 'measurement_dec' ] = items [ 1 ]
if 'intensity_w_tray_w_bkgrd' in items [ 0 ] :
MeasRec [ 'measurement_magn_moment' ] = '%8.3e' % ( float ( items [ 1 ] ) * vol )
# convert intensity from A / m to Am ^ 2 using vol
MeasRec [ 'magic_instrument_codes' ] = inst
if 'offset' in items [ 0 ] :
depth = '%7.3f' % ( float ( sect - 1 ) * 1.5 + float ( items [ 1 ] ) )
SampRec [ 'core_depth' ] = depth
MeasRec [ 'er_specimen_name' ] = depth
MeasRec [ 'er_sample_name' ] = depth
MeasRec [ 'er_site_name' ] = depth
MeasRec [ 'er_location_name' ] = 'location'
SampRec [ 'er_sample_name' ] = depth
SampRec [ 'er_site_name' ] = depth
SampRec [ 'er_location_name' ] = 'location'
MeasRec [ 'measurement_number' ] = '1'
SampRecs . append ( SampRec )
MagRecs . append ( MeasRec )
pmag . magic_write ( samp_file , SampRecs , 'er_samples' )
print ( 'samples stored in ' , samp_file )
Fixed = pmag . measurements_methods ( MagRecs , noave )
pmag . magic_write ( meas_file , Fixed , 'magic_measurements' )
print ( 'data stored in ' , meas_file )
|
def acceptCompletion ( self ) :
"""Accepts the current completion and inserts the code into the edit .
: return < bool > accepted"""
|
tree = self . _completerTree
if not tree :
return False
tree . hide ( )
item = tree . currentItem ( )
if not item :
return False
# clear the previously typed code for the block
cursor = self . textCursor ( )
text = cursor . block ( ) . text ( )
col = cursor . columnNumber ( )
end = col
while col :
col -= 1
if text [ col ] in ( '.' , ' ' ) :
col += 1
break
# insert the current text
cursor . setPosition ( cursor . position ( ) - ( end - col ) , cursor . KeepAnchor )
cursor . removeSelectedText ( )
self . insertPlainText ( item . text ( 0 ) )
return True
|
def from_unix ( cls , seconds , milliseconds = 0 ) :
"""Produce a full | datetime . datetime | object from a Unix timestamp"""
|
base = list ( time . gmtime ( seconds ) ) [ 0 : 6 ]
base . append ( milliseconds * 1000 )
# microseconds
return cls ( * base )
|
def prepare_value_for_storage ( self , value , pk ) :
"""Prepare the value to be stored in the zset
We ' ll store the value and pk concatenated .
For the parameters , see BaseRangeIndex . prepare _ value _ for _ storage"""
|
value = super ( TextRangeIndex , self ) . prepare_value_for_storage ( value , pk )
return self . separator . join ( [ value , str ( pk ) ] )
|
def _extract_lengths ( ll ) :
"""Extract list of possible lengths from string"""
|
results = set ( )
if ll is None :
return [ ]
for val in ll . split ( ',' ) :
m = _NUM_RE . match ( val )
if m :
results . add ( int ( val ) )
else :
m = _RANGE_RE . match ( val )
if m is None :
raise Exception ( "Unrecognized length specification %s" % ll )
min = int ( m . group ( 'min' ) )
max = int ( m . group ( 'max' ) )
for ii in range ( min , max + 1 ) :
results . add ( ii )
return sorted ( list ( results ) )
|
def import_locations ( self , data ) :
"""Parse geonames . org country database exports .
` ` import _ locations ( ) ` ` returns a list of : class : ` trigpoints . Trigpoint `
objects generated from the data exported by geonames . org _ .
It expects data files in the following tab separated format : :
2633441Afon WyreAfon WyreRiver Wayrai , River Wyrai , Wyre52.3166667-4.1666667HSTMGBGB000-9999Europe / London1994-01-13
2633442WyreWyreViera59.1166667-2.9666667TISLGBGBV901Europe / London2004-09-24
2633443WraysburyWraysburyWyrardisbury51.45-0.55PPPLGBP9028Europe / London2006-08-21
Files containing the data in this format can be downloaded from the
geonames . org _ site in their ` database export page ` _ .
Files downloaded from the geonames site when processed by
` ` import _ locations ( ) ` ` will return ` ` list ` ` objects of the following
style : :
[ Location ( 2633441 , " Afon Wyre " , " Afon Wyre " ,
[ ' River Wayrai ' , ' River Wyrai ' , ' Wyre ' ] ,
52.3166667 , - 4.1666667 , " H " , " STM " , " GB " , [ ' GB ' ] , " 00 " ,
None , None , None , 0 , None , - 9999 , " Europe / London " ,
datetime . date ( 1994 , 1 , 13 ) ) ,
Location ( 2633442 , " Wyre " , " Wyre " , [ ' Viera ' ] , 59.1166667,
-2.9666667 , " T " , " ISL " , " GB " , [ ' GB ' ] , " V9 " , None , None ,
None , 0 , None , 1 , " Europe / London " ,
datetime . date ( 2004 , 9 , 24 ) ) ,
Location ( 2633443 , " Wraysbury " , " Wraysbury " , [ ' Wyrardisbury ' ] ,
51.45 , - 0.55 , " P " , " PPL " , " GB " , None , " P9 " , None , None ,
None , 0 , None , 28 , " Europe / London " ,
datetime . date ( 2006 , 8 , 21 ) ) ]
Args :
data ( iter ) : geonames . org locations data to read
Returns :
list : geonames . org identifiers with : class : ` Location ` objects
Raises :
FileFormatError : Unknown file format
. . _ geonames . org : http : / / www . geonames . org /
. . _ database export page : http : / / download . geonames . org / export / dump /"""
|
self . _data = data
field_names = ( 'geonameid' , 'name' , 'asciiname' , 'alt_names' , 'latitude' , 'longitude' , 'feature_class' , 'feature_code' , 'country' , 'alt_country' , 'admin1' , 'admin2' , 'admin3' , 'admin4' , 'population' , 'altitude' , 'gtopo30' , 'tzname' , 'modified_date' )
comma_split = lambda s : s . split ( ',' )
date_parse = lambda s : datetime . date ( * map ( int , s . split ( '-' ) ) )
or_none = lambda x , s : x ( s ) if s else None
str_or_none = lambda s : or_none ( str , s )
float_or_none = lambda s : or_none ( float , s )
int_or_none = lambda s : or_none ( int , s )
tz_parse = lambda s : self . timezones [ s ] [ 0 ] if self . timezones else None
field_parsers = ( int_or_none , str_or_none , str_or_none , comma_split , float_or_none , float_or_none , str_or_none , str_or_none , str_or_none , comma_split , str_or_none , str_or_none , str_or_none , str_or_none , int_or_none , int_or_none , int_or_none , tz_parse , date_parse )
data = utils . prepare_csv_read ( data , field_names , delimiter = r" " )
for row in data :
try :
for name , parser in zip ( field_names , field_parsers ) :
row [ name ] = parser ( row [ name ] )
except ValueError :
raise utils . FileFormatError ( 'geonames.org' )
self . append ( Location ( ** row ) )
|
def parse_toc ( html_content ) :
"""Parse TOC of HTML content if the SHOW _ TOC config is true .
: param html _ content : raw HTML content
: return : tuple ( processed HTML , toc list , toc HTML unordered list )"""
|
from flask import current_app
from veripress . model . toc import HtmlTocParser
if current_app . config [ 'SHOW_TOC' ] :
toc_parser = HtmlTocParser ( )
toc_parser . feed ( html_content )
toc_html = toc_parser . toc_html ( depth = current_app . config [ 'TOC_DEPTH' ] , lowest_level = current_app . config [ 'TOC_LOWEST_LEVEL' ] )
toc = toc_parser . toc ( depth = current_app . config [ 'TOC_DEPTH' ] , lowest_level = current_app . config [ 'TOC_LOWEST_LEVEL' ] )
return toc_parser . html , toc , toc_html
else :
return html_content , None , None
|
def _match_pyephem_snapshot_to_atlas_exposures ( self , pyephemDB , exposures , mjd ) :
"""* match pyephem snapshot to atlas exposures *
* * Key Arguments : * *
- ` ` pyephemDB ` ` - - the pyephem solar - system snapshot database
- ` ` exposures ` ` - - the atlas exposures to match against the snapshot
- ` ` mjd ` ` - - the MJD of the pyephem snapshot
* * Return : * *
- ` ` matchedObjects ` ` - - these objects matched in the neighbourhood of the ATLAS exposures ( list of dictionaries )"""
|
self . log . info ( 'starting the ``_match_pyephem_snapshot_to_atlas_exposures`` method' )
global DEG_TO_RAD_FACTOR
global RAD_TO_DEG_FACTOR
global moversDict
e = len ( exposures )
print "Matching %(e)s ATLAS exposures against the pyephem snapshot for MJD = %(mjd)s" % locals ( )
# MAKE SURE HEALPIX SMALL ENOUGH TO MATCH FOOTPRINTS CORRECTLY
global nside
# GRAB PARAMETERS FROM SETTINGS FILE
tileSide = float ( self . settings [ "pyephem" ] [ "atlas exposure match side" ] )
magLimit = float ( self . settings [ "pyephem" ] [ "magnitude limit" ] )
# EXPLODE OUT THE PYEPHEM DATABASE
ra = pyephemDB [ "ra_deg" ]
dec = pyephemDB [ "dec_deg" ]
healpix = pyephemDB [ "healpix" ]
objects = pyephemDB [ "object_name" ]
mpc_numbers = pyephemDB [ "mpc_number" ]
mag = pyephemDB [ "mag" ]
# INDEX PYEPHEM MOVERS IN DICTIONARY BY HEALPIX ID
moversDict = defaultdict ( list )
for ind , ( p , r , d , o , m , g ) in enumerate ( zip ( healpix , ra , dec , objects , mpc_numbers , mag ) ) :
moversDict [ p ] . append ( { "object_name" : o , "ra_deg" : r , "dec_deg" : d , "mpc_number" : m , "mag" : g } )
# MATCH THE PYEPHEM MOVERS AGAINST THE ATLAS EXPOSURES
matchedObjects = [ ]
results = fmultiprocess ( log = self . log , function = _match_single_exposure_against_pyephem_db , timeout = 120 , inputArray = exposures )
for r in results :
matchedObjects += r
self . log . info ( 'completed the ``_match_pyephem_snapshot_to_atlas_exposures`` method' )
return matchedObjects
|
def _get_acorn ( self , method , * items ) :
"""Gets either a slice or an item from an array . Used for the _ _ getitem _ _
and _ _ getslice _ _ special methods of the sub - classed array .
Args :
method ( str ) : on of [ ' slice ' , ' item ' ] ."""
|
# IMPORTANT ! ! I lost two hours because the ndarray becomes unstable if you
# don ' t call the original method first . Somehow passing the array instance to
# other methods changed its internal representation and made it unusable by
# the original numpy functions . Putting them first makes it work .
# Because we had to subclass numpy . ndarray , the original methods get
# stuck in an infinite loop ( max . recursion depth exceeded errors ) . So ,
# we instead grab the reference to the original ndarray object .
if method == "slice" :
r = np . ndarray . __acornext__ . __getslice__ ( self , * items )
else :
r = np . ndarray . __acornext__ . __getitem__ ( self , * items )
if not ( decorating or streamlining ) :
from acorn . logging . decoration import ( pre , post , _fqdn )
if method == "slice" :
fqdn = "numpy.ndarray.__getslice__"
else :
fqdn = "numpy.ndarray.__getitem__"
preres = pre ( fqdn , np . ndarray , 5 , self , * items )
entry , bound , ekey = preres
# This method can trick acorn into thinking that it is a bound
# method . We want it to behave like it ' s not .
post ( fqdn , "numpy" , r , entry , np . ndarray , ekey )
return r
|
def generate ( self , z_mu = None ) :
"""Generate data by sampling from latent space .
If z _ mu is not None , data for this point in latent space is
generated . Otherwise , z _ mu is drawn from prior in latent
space ."""
|
if z_mu is None :
z_mu = np . random . normal ( size = self . network_architecture [ "n_z" ] )
# Note : This maps to mean of distribution , we could alternatively
# sample from Gaussian distribution
return self . sess . run ( self . x_reconstr_mean , feed_dict = { self . z : z_mu } )
|
def text_log_errors ( self , request , project , pk = None ) :
"""Gets a list of steps associated with this job"""
|
try :
job = Job . objects . get ( repository__name = project , id = pk )
except Job . DoesNotExist :
return Response ( "No job with id: {0}" . format ( pk ) , status = HTTP_404_NOT_FOUND )
textlog_errors = ( TextLogError . objects . filter ( step__job = job ) . select_related ( "_metadata" , "_metadata__failure_line" ) . prefetch_related ( "classified_failures" , "matches" ) . order_by ( 'id' ) )
return Response ( serializers . TextLogErrorSerializer ( textlog_errors , many = True , read_only = True ) . data )
|
def card_bundler ( provider : Provider , deck : Deck , tx : dict ) -> CardBundle :
'''each blockchain transaction can contain multiple cards ,
wrapped in bundles . This method finds and returns those bundles .'''
|
return CardBundle ( deck = deck , blockhash = tx [ 'blockhash' ] , txid = tx [ 'txid' ] , timestamp = tx [ 'time' ] , blockseq = tx_serialization_order ( provider , tx [ "blockhash" ] , tx [ "txid" ] ) , blocknum = provider . getblock ( tx [ "blockhash" ] ) [ "height" ] , sender = find_tx_sender ( provider , tx ) , vouts = tx [ 'vout' ] , tx_confirmations = tx [ 'confirmations' ] )
|
def get_instance_property ( instance , property_name ) :
"""Retrieves property of an instance , keeps retrying until getting a non - None"""
|
name = get_name ( instance )
while True :
try :
value = getattr ( instance , property_name )
if value is not None :
break
print ( f"retrieving {property_name} on {name} produced None, retrying" )
time . sleep ( RETRY_INTERVAL_SEC )
instance . reload ( )
continue
except Exception as e :
print ( f"retrieving {property_name} on {name} failed with {e}, retrying" )
time . sleep ( RETRY_INTERVAL_SEC )
try :
instance . reload ( )
except Exception :
pass
continue
return value
|
def apply_rectwv_coeff ( reduced_image , rectwv_coeff , args_resampling = 2 , args_ignore_dtu_configuration = True , debugplot = 0 ) :
"""Compute rectification and wavelength calibration coefficients .
Parameters
reduced _ image : HDUList object
Image with preliminary basic reduction : bpm , bias , dark and
flatfield .
rectwv _ coeff : RectWaveCoeff instance
Rectification and wavelength calibration coefficients for the
particular CSU configuration .
args _ resampling : int
1 : nearest neighbour , 2 : flux preserving interpolation .
args _ ignore _ dtu _ configuration : bool
If True , ignore differences in DTU configuration .
debugplot : int
Debugging level for messages and plots . For details see
' numina . array . display . pause _ debugplot . py ' .
Returns
rectwv _ image : HDUList object
Rectified and wavelength calibrated image ."""
|
logger = logging . getLogger ( __name__ )
# header and data array ( use deepcopy to avoid modifying
# reduced _ image [ 0 ] . header as a side effect )
header = copy . deepcopy ( reduced_image [ 0 ] . header )
image2d = reduced_image [ 0 ] . data
# apply global offsets
image2d = apply_integer_offsets ( image2d = image2d , offx = rectwv_coeff . global_integer_offset_x_pix , offy = rectwv_coeff . global_integer_offset_y_pix )
# check grism and filter
filter_name = header [ 'filter' ]
logger . info ( 'Filter: ' + filter_name )
if filter_name != rectwv_coeff . tags [ 'filter' ] :
raise ValueError ( 'Filter name does not match!' )
grism_name = header [ 'grism' ]
logger . info ( 'Grism: ' + grism_name )
if grism_name != rectwv_coeff . tags [ 'grism' ] :
raise ValueError ( 'Grism name does not match!' )
# read the DTU configuration from the image header
dtu_conf = DtuConfiguration . define_from_header ( header )
# retrieve DTU configuration from RectWaveCoeff object
dtu_conf_calib = DtuConfiguration . define_from_dictionary ( rectwv_coeff . meta_info [ 'dtu_configuration' ] )
# check that the DTU configuration employed to obtain the calibration
# corresponds to the DTU configuration in the input FITS file
if dtu_conf != dtu_conf_calib :
if args_ignore_dtu_configuration :
logger . warning ( 'DTU configuration differences found!' )
else :
logger . warning ( 'DTU configuration from image header:' )
logger . warning ( dtu_conf )
logger . warning ( 'DTU configuration from master calibration:' )
logger . warning ( dtu_conf_calib )
raise ValueError ( "DTU configurations do not match!" )
else :
logger . info ( 'DTU configuration match!' )
# valid slitlet numbers
list_valid_islitlets = list ( range ( 1 , EMIR_NBARS + 1 ) )
for idel in rectwv_coeff . missing_slitlets :
list_valid_islitlets . remove ( idel )
logger . debug ( 'Valid slitlet numbers:\n' + str ( list_valid_islitlets ) )
# relevant wavelength calibration parameters for rectified and wavelength
# calibrated image
wv_parameters = set_wv_parameters ( filter_name , grism_name )
crpix1_enlarged = wv_parameters [ 'crpix1_enlarged' ]
crval1_enlarged = wv_parameters [ 'crval1_enlarged' ]
cdelt1_enlarged = wv_parameters [ 'cdelt1_enlarged' ]
naxis1_enlarged = wv_parameters [ 'naxis1_enlarged' ]
# initialize rectified and wavelength calibrated image
naxis2_enlarged = EMIR_NBARS * EMIR_NPIXPERSLIT_RECTIFIED
image2d_rectwv = np . zeros ( ( naxis2_enlarged , naxis1_enlarged ) , dtype = 'float32' )
# main loop
logger . info ( 'Applying rectification and wavelength calibration' )
logger . info ( 'RectWaveCoeff uuid={}' . format ( rectwv_coeff . uuid ) )
cout = '0'
for islitlet in range ( 1 , EMIR_NBARS + 1 ) :
if islitlet in list_valid_islitlets : # define Slitlet2D object
slt = Slitlet2D ( islitlet = islitlet , rectwv_coeff = rectwv_coeff , debugplot = debugplot )
# extract ( distorted ) slitlet from the initial image
slitlet2d = slt . extract_slitlet2d ( image2d )
# rectify slitlet
slitlet2d_rect = slt . rectify ( slitlet2d , resampling = args_resampling )
# wavelength calibration of the rectifed slitlet
slitlet2d_rect_wv = resample_image2d_flux ( image2d_orig = slitlet2d_rect , naxis1 = naxis1_enlarged , cdelt1 = cdelt1_enlarged , crval1 = crval1_enlarged , crpix1 = crpix1_enlarged , coeff = slt . wpoly )
# minimum and maximum useful row in the full 2d rectified image
# ( starting from 0)
i1 = slt . iminslt - 1
i2 = slt . imaxslt
# minimum and maximum scan in the rectified slitlet
# ( in pixels , from 1 to NAXIS2)
ii1 = slt . min_row_rectified
ii2 = slt . max_row_rectified + 1
# save rectified slitlet in its corresponding location within
# the full 2d rectified image
image2d_rectwv [ i1 : i2 , : ] = slitlet2d_rect_wv [ ii1 : ii2 , : ]
# include scan range in FITS header
header [ 'imnslt' + str ( islitlet ) . zfill ( 2 ) ] = slt . iminslt , 'minimum Y pixel of useful slitlet region'
header [ 'imxslt' + str ( islitlet ) . zfill ( 2 ) ] = slt . imaxslt , 'maximum Y pixel of useful slitlet region'
# determine useful channel region in each spectrum and include
# that information in FITS header
jminslt = [ ]
jmaxslt = [ ]
for idum in range ( ii1 , ii2 + 1 ) :
jminmax = find_pix_borders ( slitlet2d_rect_wv [ idum , : ] , sought_value = 0 )
if jminmax != ( - 1 , naxis1_enlarged ) :
jminslt . append ( jminmax [ 0 ] )
jmaxslt . append ( jminmax [ 1 ] )
if len ( jminslt ) > 0 :
slt . jminslt = min ( jminslt ) + 1
slt . jmaxslt = max ( jmaxslt ) + 1
header [ 'jmnslt' + str ( islitlet ) . zfill ( 2 ) ] = slt . jminslt , 'minimum X pixel of useful slitlet region'
header [ 'jmxslt' + str ( islitlet ) . zfill ( 2 ) ] = slt . jmaxslt , 'maximum X pixel of useful slitlet region'
cout += '.'
else : # include scan and channel range in FITS header
header [ 'imnslt' + str ( islitlet ) . zfill ( 2 ) ] = 0 , 'minimum Y pixel of useful slitlet region'
header [ 'imxslt' + str ( islitlet ) . zfill ( 2 ) ] = 0 , 'maximum Y pixel of useful slitlet region'
header [ 'jmnslt' + str ( islitlet ) . zfill ( 2 ) ] = 0 , 'minimum X pixel of useful slitlet region'
header [ 'jmxslt' + str ( islitlet ) . zfill ( 2 ) ] = 0 , 'maximum X pixel of useful slitlet region'
cout += 'i'
if islitlet % 10 == 0 :
if cout != 'i' :
cout = str ( islitlet // 10 )
logger . info ( cout )
# update wavelength calibration in FITS header
logger . info ( 'Updating image header' )
for keyword in [ 'crval1' , 'crpix1' , 'crval2' , 'crpix2' ] :
if keyword in header :
header . remove ( keyword )
header [ 'crpix1' ] = ( crpix1_enlarged , 'reference pixel' )
header [ 'crval1' ] = ( crval1_enlarged , 'central wavelength at crpix1' )
header [ 'cdelt1' ] = ( cdelt1_enlarged , 'linear dispersion (Angstrom/pixel)' )
header [ 'cunit1' ] = ( 'Angstrom' , 'units along axis1' )
header [ 'ctype1' ] = 'WAVELENGTH'
header [ 'crpix2' ] = ( 0.0 , 'reference pixel' )
header [ 'crval2' ] = ( 0.0 , 'central value at crpix2' )
header [ 'cdelt2' ] = ( 1.0 , 'increment' )
header [ 'ctype2' ] = 'PIXEL'
header [ 'cunit2' ] = ( 'Pixel' , 'units along axis2' )
for keyword in [ 'cd1_1' , 'cd1_2' , 'cd2_1' , 'cd2_2' , 'PCD1_1' , 'PCD1_2' , 'PCD2_1' , 'PCD2_2' , 'PCRPIX1' , 'PCRPIX2' ] :
if keyword in header :
header . remove ( keyword )
# update history in FITS header
header [ 'history' ] = 'Boundary parameters uuid:' + rectwv_coeff . meta_info [ 'origin' ] [ 'bound_param' ] [ 4 : ]
if 'master_rectwv' in rectwv_coeff . meta_info [ 'origin' ] :
header [ 'history' ] = 'MasterRectWave uuid:' + rectwv_coeff . meta_info [ 'origin' ] [ 'master_rectwv' ] [ 4 : ]
header [ 'history' ] = 'RectWaveCoeff uuid:' + rectwv_coeff . uuid
header [ 'history' ] = 'Rectification and wavelength calibration time ' + datetime . now ( ) . isoformat ( )
logger . info ( 'Generating rectified and wavelength calibrated image' )
rectwv_image = fits . PrimaryHDU ( data = image2d_rectwv , header = header )
return fits . HDUList ( [ rectwv_image ] )
|
def set_object ( self , object , logmsg = None ) : # @ ReservedAssignment
"""Special version which checks if the head - log needs an update as well
: return : self"""
|
oldbinsha = None
if logmsg is not None :
head = self . repo . head
if not head . is_detached and head . ref == self :
oldbinsha = self . commit . binsha
# END handle commit retrieval
# END handle message is set
super ( Reference , self ) . set_object ( object , logmsg )
if oldbinsha is not None : # / * from refs . c in git - source
# * Special hack : If a branch is updated directly and HEAD
# * points to it ( may happen on the remote side of a push
# * for example ) then logically the HEAD reflog should be
# * updated too .
# * A generic solution implies reverse symref information ,
# * but finding all symrefs pointing to the given branch
# * would be rather costly for this rare event ( the direct
# * update of a branch ) to be worth it . So let ' s cheat and
# * check with HEAD only which should cover 99 % of all usage
# * scenarios ( even 100 % of the default ones ) .
self . repo . head . log_append ( oldbinsha , logmsg )
# END check if the head
return self
|
def Run ( self ) :
"""Event loop ."""
|
if data_store . RelationalDBEnabled ( ) :
data_store . REL_DB . RegisterMessageHandler ( self . _ProcessMessageHandlerRequests , self . well_known_flow_lease_time , limit = 100 )
data_store . REL_DB . RegisterFlowProcessingHandler ( self . ProcessFlow )
try :
while 1 :
processed = self . RunOnce ( )
if processed == 0 :
if time . time ( ) - self . last_active > self . SHORT_POLL_TIME :
interval = self . POLLING_INTERVAL
else :
interval = self . SHORT_POLLING_INTERVAL
time . sleep ( interval )
else :
self . last_active = time . time ( )
except KeyboardInterrupt :
logging . info ( "Caught interrupt, exiting." )
self . thread_pool . Join ( )
|
def get_key ( self , key , request_only = False ) :
"""Get a data key value for each resolved package .
Args :
key ( str ) : String key of property , eg ' tools ' .
request _ only ( bool ) : If True , only return the key from resolved
packages that were also present in the request .
Returns :
Dict of { pkg - name : ( variant , value ) } ."""
|
values = { }
requested_names = [ x . name for x in self . _package_requests if not x . conflict ]
for pkg in self . resolved_packages :
if ( not request_only ) or ( pkg . name in requested_names ) :
value = getattr ( pkg , key )
if value is not None :
values [ pkg . name ] = ( pkg , value )
return values
|
def change_directory ( path = None ) :
"""Context manager that changes directory and resets it when existing
> > > with change _ directory ( ' / tmp ' ) :
> > > pass"""
|
if path is not None :
try :
oldpwd = getcwd ( )
logger . debug ( 'changing directory from %s to %s' % ( oldpwd , path ) )
chdir ( path )
yield
finally :
chdir ( oldpwd )
else :
yield
|
def _get_request ( self , auth = None ) :
'''Return an http request object
auth : Auth data to use
Returns :
A HSRequest object'''
|
self . request = HSRequest ( auth or self . auth , self . env )
self . request . response_callback = self . response_callback
return self . request
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'age' ) and self . age is not None :
_dict [ 'age' ] = self . age . _to_dict ( )
if hasattr ( self , 'gender' ) and self . gender is not None :
_dict [ 'gender' ] = self . gender . _to_dict ( )
if hasattr ( self , 'face_location' ) and self . face_location is not None :
_dict [ 'face_location' ] = self . face_location . _to_dict ( )
return _dict
|
def get_command ( self , command_input , docker_object = None , buffer = None , size = None ) :
"""return command instance which is the actual command to be executed
: param command _ input : str , command name and its args : " command arg arg2 = val opt "
: param docker _ object :
: param buffer :
: param size : tuple , so we can call urwid . keypress ( size , . . . )
: return : instance of Command"""
|
logger . debug ( "get command for command input %r" , command_input )
if not command_input : # noop , don ' t do anything
return
if command_input [ 0 ] in [ "/" ] : # we could add here ! , @ , . . .
command_name = command_input [ 0 ]
unparsed_command_args = shlex . split ( command_input [ 1 : ] )
else :
command_input_list = shlex . split ( command_input )
command_name = command_input_list [ 0 ]
unparsed_command_args = command_input_list [ 1 : ]
try :
CommandClass = commands_mapping [ command_name ]
except KeyError :
logger . info ( "no such command: %r" , command_name )
raise NoSuchCommand ( "There is no such command: %s" % command_name )
else :
cmd = CommandClass ( ui = self . ui , docker_backend = self . docker_backend , docker_object = docker_object , buffer = buffer , size = size )
cmd . process_args ( unparsed_command_args )
return cmd
|
def verify_constraints ( constraints ) :
"""Verify values returned from : meth : ` make _ constraints ` .
Used internally during the : meth : ` build ` process .
: param constraints : value returned from : meth : ` make _ constraints `
: type constraints : : class : ` list `
: raises ValueError : if verification fails"""
|
# verify return is a list
if not isinstance ( constraints , list ) :
raise ValueError ( "invalid type returned by make_constraints: %r (must be a list)" % constraints )
# verify each list element is a Constraint instance
for constraint in constraints :
if not isinstance ( constraint , Constraint ) :
raise ValueError ( "invalid constraint type: %r (must be a Constriant)" % constraint )
|
def _complete_parsers_with_converters ( self , parser , parser_supported_type , desired_type , matching_c_generic_to_type , matching_c_approx_to_type , matching_c_exact_to_type ) :
"""Internal method to create parsing chains made of a parser and converters from the provided lists .
Once again a JOKER for a type means ' joker ' here .
: param parser :
: param parser _ supported _ type :
: param desired _ type :
: param matching _ c _ generic _ to _ type :
: param matching _ c _ approx _ to _ type :
: param matching _ c _ exact _ to _ type :
: return :"""
|
matching_p_generic , matching_p_generic_with_approx_chain , matching_p_approx , matching_p_approx_with_approx_chain , matching_p_exact , matching_p_exact_with_approx_chain = [ ] , [ ] , [ ] , [ ] , [ ] , [ ]
# resolve Union and TypeVar
desired_types = get_alternate_types_resolving_forwardref_union_and_typevar ( desired_type )
for desired_type in desired_types : # first transform any ' Any ' type requirement into the official class for that
desired_type = get_validated_type ( desired_type , 'desired_type' , enforce_not_joker = False )
# - - - - Generic converters - only if the parsed type is not already ' any '
if not is_any_type ( parser_supported_type ) :
for cv in matching_c_generic_to_type : # if the converter can attach to this parser , we have a matching parser !
# - - start from strict
if cv . is_able_to_convert ( strict = True , from_type = parser_supported_type , to_type = desired_type ) :
if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) :
chain = ParsingChain ( parser , cv , strict = True , base_parser_chosen_dest_type = parser_supported_type )
# insert it at the beginning since it should have less priority
matching_p_generic . append ( chain )
# - - then non - strict
elif ( not self . strict ) and cv . is_able_to_convert ( strict = False , from_type = parser_supported_type , to_type = desired_type ) :
if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) :
chain = ParsingChain ( parser , cv , strict = False , base_parser_chosen_dest_type = parser_supported_type )
# insert it at the beginning since it should have less priority
matching_p_generic_with_approx_chain . append ( chain )
# - - - - Approx to _ type
for cv in matching_c_approx_to_type : # if the converter can attach to this parser , we have a matching parser !
# - - start from strict
if cv . is_able_to_convert ( strict = True , from_type = parser_supported_type , to_type = desired_type ) :
if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) :
chain = ParsingChain ( parser , cv , strict = True , base_parser_chosen_dest_type = parser_supported_type )
# insert it at the beginning since it should have less priority
matching_p_approx . append ( chain )
# then non - strict
elif ( not self . strict ) and cv . is_able_to_convert ( strict = False , from_type = parser_supported_type , to_type = desired_type ) :
if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) :
chain = ParsingChain ( parser , cv , strict = False , base_parser_chosen_dest_type = parser_supported_type )
# insert it at the beginning since it should have less priority
matching_p_approx_with_approx_chain . append ( chain )
# - - - - Exact to _ type
for cv in matching_c_exact_to_type : # if the converter can attach to this parser , we have a matching parser !
if cv . is_able_to_convert ( strict = True , from_type = parser_supported_type , to_type = desired_type ) :
if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) :
chain = ParsingChain ( parser , cv , strict = True , base_parser_chosen_dest_type = parser_supported_type )
# insert it at the beginning since it should have less priority
matching_p_exact . append ( chain )
elif ( not self . strict ) and cv . is_able_to_convert ( strict = False , from_type = parser_supported_type , to_type = desired_type ) :
if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) :
chain = ParsingChain ( parser , cv , strict = False , base_parser_chosen_dest_type = parser_supported_type )
# insert it at the beginning since it should have less priority
matching_p_exact_with_approx_chain . append ( chain )
# Preferred is LAST , so approx should be first
return matching_p_generic_with_approx_chain , matching_p_generic , matching_p_approx_with_approx_chain , matching_p_approx , matching_p_exact_with_approx_chain , matching_p_exact
|
def register_action ( * args , ** kwarg ) :
'''Decorator for an action , the arguments order is not relevant , but it ' s best
to use the same order as in the docopt for clarity .'''
|
def decorator ( fun ) :
KeywordArgumentParser . _action_dict [ frozenset ( args ) ] = fun
return fun
return decorator
|
async def _location_auth_protect ( self , location ) :
'''Checks to see if the new location is
1 . The same top level domain
2 . As or more secure than the current connection type
Returns :
True ( bool ) : If the current top level domain is the same
and the connection type is equally or more secure .
False otherwise .'''
|
netloc_sans_port = self . host . split ( ':' ) [ 0 ]
netloc_sans_port = netloc_sans_port . replace ( ( re . match ( _WWX_MATCH , netloc_sans_port ) [ 0 ] ) , '' )
base_domain = '.' . join ( netloc_sans_port . split ( '.' ) [ - 2 : ] )
l_scheme , l_netloc , _ , _ , _ , _ = urlparse ( location )
location_sans_port = l_netloc . split ( ':' ) [ 0 ]
location_sans_port = location_sans_port . replace ( ( re . match ( _WWX_MATCH , location_sans_port ) [ 0 ] ) , '' )
location_domain = '.' . join ( location_sans_port . split ( '.' ) [ - 2 : ] )
if base_domain == location_domain :
if l_scheme < self . scheme :
return False
else :
return True
|
def get_targets ( self ) :
"""Sets targets ."""
|
if self . xml_root . tag == "testcases" :
self . submit_target = self . config . get ( "testcase_taget" )
self . queue_url = self . config . get ( "testcase_queue" )
self . log_url = self . config . get ( "testcase_log" )
elif self . xml_root . tag == "testsuites" :
self . submit_target = self . config . get ( "xunit_target" )
self . queue_url = self . config . get ( "xunit_queue" )
self . log_url = self . config . get ( "xunit_log" )
elif self . xml_root . tag == "requirements" :
self . submit_target = self . config . get ( "requirement_target" )
self . queue_url = self . config . get ( "requirement_queue" )
self . log_url = self . config . get ( "requirement_log" )
else :
raise Dump2PolarionException ( "Failed to submit to Polarion - submit target not found" )
|
def parse_session_cookie ( cookie_to_cook ) :
"""cookie _ to _ cook = http _ header [ ' cookie ' ]"""
|
# print ( " cookie _ to _ cook : % s " % str ( cookie _ to _ cook ) )
session_value = None
tokens = cookie_to_cook . split ( ";" )
for tok in tokens :
if 'remi_session=' in tok : # print ( " found session id : % s " % str ( tok ) )
try :
session_value = int ( tok . replace ( 'remi_session=' , '' ) )
except :
pass
return session_value
|
def delete_node_1ton ( node_list , begin , node , end ) : # type : ( [ ] , LinkedNode , LinkedNode , LinkedNode ) - > [ ]
"""delete the node which has 1 - input and n - output"""
|
if end is None :
assert end is not None
end = node . successor
elif not isinstance ( end , list ) :
end = [ end ]
if any ( e_ . in_or_out for e_ in end ) : # if the end is output node , the output name will be kept to avoid the model output name updating .
begin . out_redirect ( node . single_input , node . single_output )
else :
for ne_ in end :
target_var_name = node . single_input
# since the output info never be updated , except the final .
assert target_var_name in begin . output . values ( )
ne_ . in_redirect ( node . single_output , target_var_name )
begin . successor = [ v_ for v_ in begin . successor if v_ != node ] + node . successor
for ne_ in end :
ne_ . precedence = [ begin if v_ == node else v_ for v_ in ne_ . precedence ]
node_list . remove ( node )
return node_list
|
def changes ( self ) :
"""Collects all changes that have been performed on the monitored path ,
returning them as a ( created , deleted ) tuple ."""
|
deleted = [ ]
for path in list ( self . files ) :
isdir = path . endswith ( os . sep )
abspath = os . path . join ( self . path , path )
try :
is_deleted = ( not os . path . exists ( abspath ) or # actually deleted
os . path . isdir ( abspath ) != isdir # changed from / to folder
)
except EnvironmentError : # file is basically inaccessible at this point so we ' re gonna
# assume that it was deleted
is_deleted = True
if is_deleted :
deleted . append ( path )
del self . files [ path ]
changed = [ ]
for folder , subfolders , subfiles in os . walk ( self . path ) :
for path in subfolders :
path = os . path . join ( folder , path )
path = os . path . normcase ( os . path . relpath ( path , self . path ) )
path += os . sep
if path not in self . files : # don ' t really care about folder mtime
self . files [ path ] = 0
changed . append ( path )
for path in subfiles :
actual_path = path = os . path . join ( folder , path )
path = os . path . normcase ( os . path . relpath ( path , self . path ) )
try :
mtime = os . path . getmtime ( actual_path )
if path not in self . files : # new file ; set its mtime to 0 because it will be
# compared in the next few lines
self . files [ path ] = 0
if mtime > self . files [ path ] : # file has been changed since last check
self . files [ path ] = mtime
changed . append ( path )
except EnvironmentError : # in 99 % of the cases the file has been deleted while
# iterating the parent folder ; if the file was previously
# being handled , then stop handling it ; otherwise ignore
if path in self . files :
deleted . append ( path )
del self . files [ path ]
self . updated = False
return changed , deleted
|
def close ( self ) :
"""Release all resources associated with this factory ."""
|
if self . mdr is None :
return
exc = ( None , None , None )
try :
self . cursor . close ( )
except :
exc = sys . exc_info ( )
try :
if self . mdr . __exit__ ( * exc ) :
exc = ( None , None , None )
except :
exc = sys . exc_info ( )
self . mdr = None
self . cursor = None
if exc != ( None , None , None ) :
six . reraise ( * exc )
|
def _get_component ( self , string , initial_pos ) :
'''given a string and a position , return both an updated position and
either a Component Object or a String back to the caller'''
|
add_code = string [ initial_pos : initial_pos + self . ADDR_CODE_LENGTH ]
if add_code == 'REM' :
raise ish_reportException ( "This is a remarks record" )
if add_code == 'EQD' :
raise ish_reportException ( "This is EQD record" )
initial_pos += self . ADDR_CODE_LENGTH
try :
useable_map = self . MAP [ add_code ]
except :
raise BaseException ( "Cannot find code %s in string %s (%d)." % ( add_code , string , initial_pos ) )
# if there is no defined length , then read next three chars to get it
# this only applies to REM types , which have 3 chars for the type , then variable
if useable_map [ 1 ] is False :
chars_to_read = string [ initial_pos + self . ADDR_CODE_LENGTH : initial_pos + ( self . ADDR_CODE_LENGTH * 2 ) ]
chars_to_read = int ( chars_to_read )
initial_pos += ( self . ADDR_CODE_LENGTH * 2 )
else :
chars_to_read = useable_map [ 1 ]
new_position = initial_pos + chars_to_read
string_value = string [ initial_pos : new_position ]
try :
object_value = useable_map [ 2 ] ( )
object_value . loads ( string_value )
except IndexError as err :
object_value = string_value
return ( new_position , [ add_code , object_value ] )
|
def build_url ( self ) :
"""Only logs that this URL is unknown ."""
|
super ( UnknownUrl , self ) . build_url ( )
if self . is_ignored ( ) :
self . add_info ( _ ( "%(scheme)s URL ignored." ) % { "scheme" : self . scheme . capitalize ( ) } )
self . set_result ( _ ( "ignored" ) )
else :
self . set_result ( _ ( "URL is unrecognized or has invalid syntax" ) , valid = False )
|
def raise_for_old_graph ( graph ) :
"""Raise an ImportVersionWarning if the BEL graph was produced by a legacy version of PyBEL .
: raises ImportVersionWarning : If the BEL graph was produced by a legacy version of PyBEL"""
|
graph_version = tokenize_version ( graph . pybel_version )
if graph_version < PYBEL_MINIMUM_IMPORT_VERSION :
raise ImportVersionWarning ( graph_version , PYBEL_MINIMUM_IMPORT_VERSION )
|
def grow ( self ) :
"Add another worker to the pool ."
|
t = self . worker_factory ( self )
t . start ( )
self . _size += 1
|
def reset ( self ) :
"Reset the internal memory ."
|
self . hidden = [ next ( self . parameters ( ) ) . data . new ( 0 ) for i in range ( self . n_layers + 1 ) ]
|
def p_inheritance ( self , p ) :
"""inheritance : EXTENDS type _ ref
| empty"""
|
if p [ 1 ] :
if p [ 2 ] . nullable :
msg = 'Reference cannot be nullable.'
self . errors . append ( ( msg , p . lineno ( 1 ) , self . path ) )
else :
p [ 0 ] = p [ 2 ]
|
def png_segno ( data = 'QR Code Symbol' ) :
"""Segno PNG"""
|
segno . make_qr ( data , error = 'm' ) . save ( 'out/segno_%s.png' % data , scale = 10 , addad = False )
|
def _lpad ( self , length , pad = ' ' ) :
"""Returns string of given length by truncating ( on right )
or padding ( on left ) original string
Parameters
length : int
pad : string , default is ' '
Examples
> > > import ibis
> > > table = ibis . table ( [ ( ' strings ' , ' string ' ) ] )
> > > expr = table . strings . lpad ( 5 , ' - ' )
> > > expr = ibis . literal ( ' a ' ) . lpad ( 5 , ' - ' ) # ' a ' becomes ' - - - - a '
> > > expr = ibis . literal ( ' abcdefg ' ) . lpad ( 5 , ' - ' ) # ' abcdefg ' becomes ' abcde ' # noqa : E501
Returns
padded : string"""
|
return ops . LPad ( self , length , pad ) . to_expr ( )
|
def block_anyfilter ( parser , token ) :
"""Turn any template filter into a blocktag .
Usage : :
{ % load libs _ tags % }
{ % block _ anyfilter django . template . defaultfilters . truncatewords _ html 15 % }
/ / Something complex that generates html output
{ % endblockanyfilter % }"""
|
bits = token . contents . split ( )
nodelist = parser . parse ( ( 'endblockanyfilter' , ) )
parser . delete_first_token ( )
return BlockAnyFilterNode ( nodelist , bits [ 1 ] , * bits [ 2 : ] )
|
def seconds_until_renew ( self ) :
"""Returns the number of seconds between the current time
and the set renew time . It can be negative if the
leader election is running late ."""
|
delta = self . renew_time - datetime . now ( self . renew_time . tzinfo )
return delta . total_seconds ( )
|
def wrap_arguments ( args = None ) :
"""Wrap a list of tuples in xml ready to pass into a SOAP request .
Args :
args ( list ) : a list of ( name , value ) tuples specifying the
name of each argument and its value , eg
` ` [ ( ' InstanceID ' , 0 ) , ( ' Speed ' , 1 ) ] ` ` . The value
can be a string or something with a string representation . The
arguments are escaped and wrapped in < name > and < value > tags .
Example :
> > > from soco import SoCo
> > > device = SoCo ( ' 192.168.1.101 ' )
> > > s = Service ( device )
> > > print ( s . wrap _ arguments ( [ ( ' InstanceID ' , 0 ) , ( ' Speed ' , 1 ) ] ) )
< InstanceID > 0 < / InstanceID > < Speed > 1 < / Speed > '"""
|
if args is None :
args = [ ]
tags = [ ]
for name , value in args :
tag = "<{name}>{value}</{name}>" . format ( name = name , value = escape ( "%s" % value , { '"' : """ } ) )
# % converts to unicode because we are using unicode literals .
# Avoids use of ' unicode ' function which does not exist in python 3
tags . append ( tag )
xml = "" . join ( tags )
return xml
|
def _compute_f1 ( self , C , mag , rrup ) :
"""Compute f1 term ( eq . 4 , page 105)"""
|
r = np . sqrt ( rrup ** 2 + C [ 'c4' ] ** 2 )
f1 = ( C [ 'a1' ] + C [ 'a12' ] * ( 8.5 - mag ) ** C [ 'n' ] + ( C [ 'a3' ] + C [ 'a13' ] * ( mag - C [ 'c1' ] ) ) * np . log ( r ) )
if mag <= C [ 'c1' ] :
f1 += C [ 'a2' ] * ( mag - C [ 'c1' ] )
else :
f1 += C [ 'a4' ] * ( mag - C [ 'c1' ] )
return f1
|
def keep ( self , diff ) :
"""Mark this diff ( or volume ) to be kept in path ."""
|
path = self . extraKeys [ diff ]
if not path . startswith ( "/" ) :
logger . debug ( "Keeping %s" , path )
del self . extraKeys [ diff ]
return
# Copy into self . userPath , if not there already
keyName = self . _keyName ( diff . toUUID , diff . fromUUID , path )
newPath = os . path . join ( self . userPath , os . path . basename ( path ) )
newName = self . _keyName ( diff . toUUID , diff . fromUUID , newPath )
if not self . _skipDryRun ( logger ) ( "Copy %s to %s" , keyName , newName ) :
self . bucket . copy_key ( newName , self . bucket . name , keyName )
|
def elapsed ( t0 = 0.0 ) :
"""get elapsed time from the give time
Returns :
now : the absolute time now
dt _ str : elapsed time in string"""
|
now = time ( )
dt = now - t0
dt_sec = Decimal ( str ( dt ) ) . quantize ( Decimal ( '.0001' ) , rounding = ROUND_DOWN )
if dt_sec <= 1 :
dt_str = str ( dt_sec ) + ' second'
else :
dt_str = str ( dt_sec ) + ' seconds'
return now , dt_str
|
def wiki_download ( url ) :
'''scrape friendly : sleep 20 seconds between each request , cache each result .'''
|
DOWNLOAD_TMPL = '../data/tv_and_movie_freqlist%s.html'
freq_range = url [ url . rindex ( '/' ) + 1 : ]
tmp_path = DOWNLOAD_TMPL % freq_range
if os . path . exists ( tmp_path ) :
print ( 'cached.......' , url )
with codecs . open ( tmp_path , 'r' , 'utf8' ) as f :
return f . read ( ) , True
with codecs . open ( tmp_path , 'w' , 'utf8' ) as f :
print ( 'downloading...' , url )
req = urllib . request . Request ( url , headers = { 'User-Agent' : 'zxcvbn' } )
response = urllib . request . urlopen ( req )
result = response . read ( ) . decode ( 'utf8' )
f . write ( result )
return result , False
|
def maps_json ( ) :
"""Generates a json object which serves as bridge between
the web interface and the map source collection .
All attributes relevant for openlayers are converted into
JSON and served through this route .
Returns :
Response : All map sources as JSON object ."""
|
map_sources = { id : { "id" : map_source . id , "name" : map_source . name , "folder" : map_source . folder , "min_zoom" : map_source . min_zoom , "max_zoom" : map_source . max_zoom , "layers" : [ { "min_zoom" : layer . min_zoom , "max_zoom" : layer . max_zoom , "tile_url" : layer . tile_url . replace ( "$" , "" ) , } for layer in map_source . layers ] } for id , map_source in app . config [ "mapsources" ] . items ( ) }
return jsonify ( map_sources )
|
def rsa_decrypt ( encrypted_data , pem , password = None ) :
"""rsa 解密
: param encrypted _ data : 待解密 bytes
: param pem : RSA private key 内容 / binary
: param password : RSA private key pass phrase
: return : 解密后的 binary"""
|
from cryptography . hazmat . backends import default_backend
from cryptography . hazmat . primitives import serialization
from cryptography . hazmat . primitives import hashes
from cryptography . hazmat . primitives . asymmetric import padding
encrypted_data = to_binary ( encrypted_data )
pem = to_binary ( pem )
private_key = serialization . load_pem_private_key ( pem , password , backend = default_backend ( ) )
data = private_key . decrypt ( encrypted_data , padding = padding . OAEP ( mgf = padding . MGF1 ( hashes . SHA1 ( ) ) , algorithm = hashes . SHA1 ( ) , label = None , ) )
return data
|
def __op ( name , val , fmt = None , const = False , consume = 0 , produce = 0 ) :
"""provides sensible defaults for a code , and registers it with the
_ _ OPTABLE for lookup ."""
|
name = name . lower ( )
# fmt can either be a str representing the struct to unpack , or a
# callable to do more complex unpacking . If it ' s a str , create a
# callable for it .
if isinstance ( fmt , str ) :
fmt = partial ( _unpack , compile_struct ( fmt ) )
operand = ( name , val , fmt , consume , produce , const )
assert ( name not in __OPTABLE )
assert ( val not in __OPTABLE )
__OPTABLE [ name ] = operand
__OPTABLE [ val ] = operand
return val
|
def to_dataframe ( self ) :
"""Build a dataframe from the effect collection"""
|
# list of properties to extract from Variant objects if they ' re
# not None
variant_properties = [ "contig" , "start" , "ref" , "alt" , "is_snv" , "is_transversion" , "is_transition" ]
def row_from_effect ( effect ) :
row = OrderedDict ( )
row [ 'variant' ] = str ( effect . variant . short_description )
for field_name in variant_properties : # if effect . variant is None then this column value will be None
row [ field_name ] = getattr ( effect . variant , field_name , None )
row [ 'gene_id' ] = effect . gene_id
row [ 'gene_name' ] = effect . gene_name
row [ 'transcript_id' ] = effect . transcript_id
row [ 'transcript_name' ] = effect . transcript_name
row [ 'effect_type' ] = effect . __class__ . __name__
row [ 'effect' ] = effect . short_description
return row
return pd . DataFrame . from_records ( [ row_from_effect ( effect ) for effect in self ] )
|
def _input_as_multiline_string ( self , data ) :
"""Write multiline string to temp file , return filename
data : a multiline string to be written to a file ."""
|
self . _input_filename = self . getTmpFilename ( self . WorkingDir , suffix = '.fasta' )
with open ( self . _input_filename , 'w' ) as f :
f . write ( data )
return self . _input_filename
|
def codegen ( lang , # type : str
i , # type : List [ Dict [ Text , Any ] ]
schema_metadata , # type : Dict [ Text , Any ]
loader # type : Loader
) : # type : ( . . . ) - > None
"""Generate classes with loaders for the given Schema Salad description ."""
|
j = schema . extend_and_specialize ( i , loader )
gen = None
# type : Optional [ CodeGenBase ]
if lang == "python" :
gen = PythonCodeGen ( sys . stdout )
elif lang == "java" :
gen = JavaCodeGen ( schema_metadata . get ( "$base" , schema_metadata . get ( "id" ) ) )
else :
raise Exception ( "Unsupported code generation language '%s'" % lang )
assert gen is not None
gen . prologue ( )
document_roots = [ ]
for rec in j :
if rec [ "type" ] in ( "enum" , "record" ) :
gen . type_loader ( rec )
gen . add_vocab ( shortname ( rec [ "name" ] ) , rec [ "name" ] )
for rec in j :
if rec [ "type" ] == "enum" :
for symbol in rec [ "symbols" ] :
gen . add_vocab ( shortname ( symbol ) , symbol )
if rec [ "type" ] == "record" :
if rec . get ( "documentRoot" ) :
document_roots . append ( rec [ "name" ] )
field_names = [ ]
for field in rec . get ( "fields" , [ ] ) :
field_names . append ( shortname ( field [ "name" ] ) )
idfield = ""
for field in rec . get ( "fields" , [ ] ) :
if field . get ( "jsonldPredicate" ) == "@id" :
idfield = field . get ( "name" )
gen . begin_class ( rec [ "name" ] , aslist ( rec . get ( "extends" , [ ] ) ) , rec . get ( "doc" , "" ) , rec . get ( "abstract" , False ) , field_names , idfield )
gen . add_vocab ( shortname ( rec [ "name" ] ) , rec [ "name" ] )
for field in rec . get ( "fields" , [ ] ) :
if field . get ( "jsonldPredicate" ) == "@id" :
fieldpred = field [ "name" ]
optional = bool ( "https://w3id.org/cwl/salad#null" in field [ "type" ] )
uri_loader = gen . uri_loader ( gen . type_loader ( field [ "type" ] ) , True , False , None )
gen . declare_id_field ( fieldpred , uri_loader , field . get ( "doc" ) , optional )
break
for field in rec . get ( "fields" , [ ] ) :
optional = bool ( "https://w3id.org/cwl/salad#null" in field [ "type" ] )
type_loader = gen . type_loader ( field [ "type" ] )
jld = field . get ( "jsonldPredicate" )
fieldpred = field [ "name" ]
if isinstance ( jld , MutableMapping ) :
ref_scope = jld . get ( "refScope" )
if jld . get ( "typeDSL" ) :
type_loader = gen . typedsl_loader ( type_loader , ref_scope )
elif jld . get ( "_type" ) == "@id" :
type_loader = gen . uri_loader ( type_loader , jld . get ( "identity" , False ) , False , ref_scope )
elif jld . get ( "_type" ) == "@vocab" :
type_loader = gen . uri_loader ( type_loader , False , True , ref_scope )
map_subject = jld . get ( "mapSubject" )
if map_subject :
type_loader = gen . idmap_loader ( field [ "name" ] , type_loader , map_subject , jld . get ( "mapPredicate" ) )
if "_id" in jld and jld [ "_id" ] [ 0 ] != "@" :
fieldpred = jld [ "_id" ]
if jld == "@id" :
continue
gen . declare_field ( fieldpred , type_loader , field . get ( "doc" ) , optional )
gen . end_class ( rec [ "name" ] , field_names )
root_type = list ( document_roots )
root_type . append ( { "type" : "array" , "items" : document_roots } )
gen . epilogue ( gen . type_loader ( root_type ) )
|
def _read_header ( filename ) :
"""Read the text header for each file
Parameters
channel _ file : Path
path to single filename with the header
Returns
dict
header"""
|
with filename . open ( 'rb' ) as f :
h = f . read ( HDR_LENGTH ) . decode ( )
header = { }
for line in h . split ( '\n' ) :
if '=' in line :
key , value = line . split ( ' = ' )
key = key . strip ( ) [ 7 : ]
value = value . strip ( ) [ : - 1 ]
header [ key ] = value
return header
|
def margin_to_exchange ( self , symbol , currency , amount ) :
"""借贷账户划出至现货账户
: param amount :
: param currency :
: param symbol :
: return :"""
|
params = { 'symbol' : symbol , 'currency' : currency , 'amount' : amount }
path = '/v1/dw/transfer-out/margin'
def _wrapper ( _func ) :
@ wraps ( _func )
def handle ( ) :
_func ( api_key_post ( params , path ) )
return handle
return _wrapper
|
def get_cheapest_quotes ( self , ** params ) :
"""{ API _ HOST } / apiservices / browsequotes / v1.0 / { market } / { currency } / { locale } /
{ originPlace } / { destinationPlace } /
{ outboundPartialDate } / { inboundPartialDate }
? apiKey = { apiKey }"""
|
service_url = "{url}/{params_path}" . format ( url = self . BROWSE_QUOTES_SERVICE_URL , params_path = self . _construct_params ( params , self . _REQ_PARAMS , self . _OPT_PARAMS ) )
return self . make_request ( service_url , headers = self . _headers ( ) , ** params )
|
async def disable ( self , service , * , reason = None ) :
"""Enters maintenance mode for service
Parameters :
service ( ObjectID ) : Service ID
reason ( str ) : Text string explaining the reason for placing the
service into maintenance mode .
Returns :
bool : ` ` True ` ` on success
Places a given service into " maintenance mode " .
During maintenance mode , the service will be marked as unavailable
and will not be present in DNS or API queries .
Maintenance mode is persistent and will be automatically restored on
agent restart ."""
|
return await self . maintenance ( service , False , reason = reason )
|
def delete_router ( self , router ) :
'''Delete the specified router'''
|
router_id = self . _find_router_id ( router )
ret = self . network_conn . delete_router ( router = router_id )
return ret if ret else True
|
def get_json_content ( file_path ) :
"""Load json file content
Parameters
file _ path :
path to the file
Raises
TypeError
Error with the file path"""
|
try :
with open ( file_path , 'r' ) as file :
return json . load ( file )
except TypeError as err :
print ( 'Error: ' , err )
return None
|
def _load_nucmer_hits ( self , infile ) :
'''Returns dict ref name = > list of nucmer hits from infile'''
|
hits = { }
file_reader = pymummer . coords_file . reader ( infile )
for al in file_reader :
if al . ref_name not in hits :
hits [ al . ref_name ] = [ ]
hits [ al . ref_name ] . append ( al )
return hits
|
def register_user ( self , user , allow_login = None , send_email = None , _force_login_without_confirmation = False ) :
"""Service method to register a user .
Sends signal ` user _ registered ` .
Returns True if the user has been logged in , False otherwise ."""
|
should_login_user = ( not self . security . confirmable or self . security . login_without_confirmation or _force_login_without_confirmation )
should_login_user = ( should_login_user if allow_login is None else allow_login and should_login_user )
if should_login_user :
user . active = True
# confirmation token depends on having user . id set , which requires
# the user be committed to the database
self . user_manager . save ( user , commit = True )
confirmation_link , token = None , None
if self . security . confirmable and not _force_login_without_confirmation :
token = self . security_utils_service . generate_confirmation_token ( user )
confirmation_link = url_for ( 'security_controller.confirm_email' , token = token , _external = True )
user_registered . send ( app . _get_current_object ( ) , user = user , confirm_token = token )
if ( send_email or ( send_email is None and app . config . SECURITY_SEND_REGISTER_EMAIL ) ) :
self . send_mail ( _ ( 'flask_unchained.bundles.security:email_subject.register' ) , to = user . email , template = 'security/email/welcome.html' , user = user , confirmation_link = confirmation_link )
if should_login_user :
return self . login_user ( user , force = _force_login_without_confirmation )
return False
|
def to_csv ( self , dest : str ) -> None :
"Save ` self . to _ df ( ) ` to a CSV file in ` self . path ` / ` dest ` ."
|
self . to_df ( ) . to_csv ( self . path / dest , index = False )
|
def show_system_monitor_output_switch_status_component_status_component_state ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
show_system_monitor = ET . Element ( "show_system_monitor" )
config = show_system_monitor
output = ET . SubElement ( show_system_monitor , "output" )
switch_status = ET . SubElement ( output , "switch-status" )
component_status = ET . SubElement ( switch_status , "component-status" )
component_state = ET . SubElement ( component_status , "component-state" )
component_state . text = kwargs . pop ( 'component_state' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _process_long_opt ( self , rargs , values ) :
"""SCons - specific processing of long options .
This is copied directly from the normal
optparse . _ process _ long _ opt ( ) method , except that , if configured
to do so , we catch the exception thrown when an unknown option
is encountered and just stick it back on the " leftover " arguments
for later ( re - ) processing ."""
|
arg = rargs . pop ( 0 )
# Value explicitly attached to arg ? Pretend it ' s the next
# argument .
if "=" in arg :
( opt , next_arg ) = arg . split ( "=" , 1 )
rargs . insert ( 0 , next_arg )
had_explicit_value = True
else :
opt = arg
had_explicit_value = False
try :
opt = self . _match_long_opt ( opt )
except optparse . BadOptionError :
if self . preserve_unknown_options : # SCons - specific : if requested , add unknown options to
# the " leftover arguments " list for later processing .
self . largs . append ( arg )
if had_explicit_value : # The unknown option will be re - processed later ,
# so undo the insertion of the explicit value .
rargs . pop ( 0 )
return
raise
option = self . _long_opt [ opt ]
if option . takes_value ( ) :
nargs = option . nargs
if nargs == '?' :
if had_explicit_value :
value = rargs . pop ( 0 )
else :
value = option . const
elif len ( rargs ) < nargs :
if nargs == 1 :
if not option . choices :
self . error ( _ ( "%s option requires an argument" ) % opt )
else :
msg = _ ( "%s option requires an argument " % opt )
msg += _ ( "(choose from %s)" % ', ' . join ( option . choices ) )
self . error ( msg )
else :
self . error ( _ ( "%s option requires %d arguments" ) % ( opt , nargs ) )
elif nargs == 1 :
value = rargs . pop ( 0 )
else :
value = tuple ( rargs [ 0 : nargs ] )
del rargs [ 0 : nargs ]
elif had_explicit_value :
self . error ( _ ( "%s option does not take a value" ) % opt )
else :
value = None
option . process ( opt , value , values , self )
|
def _Reg2Py ( data , size , data_type ) :
"""Converts a Windows Registry value to the corresponding Python data type ."""
|
if data_type == winreg . REG_DWORD :
if size == 0 :
return 0
# DWORD is an unsigned 32 - bit integer , see :
# https : / / docs . microsoft . com / en - us / openspecs / windows _ protocols / ms - dtyp / 262627d8-3418-4627-9218-4ffe110850b2
return ctypes . cast ( data , ctypes . POINTER ( ctypes . c_uint32 ) ) . contents . value
elif data_type == winreg . REG_SZ or data_type == winreg . REG_EXPAND_SZ :
return ctypes . wstring_at ( data , size // 2 ) . rstrip ( u"\x00" )
elif data_type == winreg . REG_MULTI_SZ :
return ctypes . wstring_at ( data , size // 2 ) . rstrip ( u"\x00" ) . split ( u"\x00" )
else :
if size == 0 :
return None
return ctypes . string_at ( data , size )
|
def qos_map_dscp_cos_mark_to ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
qos = ET . SubElement ( config , "qos" , xmlns = "urn:brocade.com:mgmt:brocade-qos" )
map = ET . SubElement ( qos , "map" )
dscp_cos = ET . SubElement ( map , "dscp-cos" )
dscp_cos_map_name_key = ET . SubElement ( dscp_cos , "dscp-cos-map-name" )
dscp_cos_map_name_key . text = kwargs . pop ( 'dscp_cos_map_name' )
mark = ET . SubElement ( dscp_cos , "mark" )
dscp_in_values_key = ET . SubElement ( mark , "dscp-in-values" )
dscp_in_values_key . text = kwargs . pop ( 'dscp_in_values' )
to = ET . SubElement ( mark , "to" )
to . text = kwargs . pop ( 'to' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def average ( self , default = None ) :
"""Calculate the average value over the time series .
: param default : Value to return as a default should the calculation not be possible .
: return : Float representing the average value or ` None ` ."""
|
return numpy . asscalar ( numpy . average ( self . values ) ) if self . values else default
|
def temporal_from_rdf ( period_of_time ) :
'''Failsafe parsing of a temporal coverage'''
|
try :
if isinstance ( period_of_time , Literal ) :
return temporal_from_literal ( str ( period_of_time ) )
elif isinstance ( period_of_time , RdfResource ) :
return temporal_from_resource ( period_of_time )
except Exception : # There are a lot of cases where parsing could / should fail
# but we never want to break the whole dataset parsing
# so we log the error for future investigation and improvement
log . warning ( 'Unable to parse temporal coverage' , exc_info = True )
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'gateway_id' ) and self . gateway_id is not None :
_dict [ 'gateway_id' ] = self . gateway_id
if hasattr ( self , 'name' ) and self . name is not None :
_dict [ 'name' ] = self . name
if hasattr ( self , 'status' ) and self . status is not None :
_dict [ 'status' ] = self . status
if hasattr ( self , 'token' ) and self . token is not None :
_dict [ 'token' ] = self . token
if hasattr ( self , 'token_id' ) and self . token_id is not None :
_dict [ 'token_id' ] = self . token_id
return _dict
|
def parse_grid_xml ( self ) :
"""Parse the grid xyz and calculate the bounding box of the event .
: raises : GridXmlParseError
The grid xyz dataset looks like this : :
< ? xml version = " 1.0 " encoding = " US - ASCII " standalone = " yes " ? >
< shakemap _ grid xmlns : xsi = " http : / / www . w3 . org / 2001 / XMLSchema - instance "
xmlns = " http : / / earthquake . usgs . gov / eqcenter / shakemap "
xsi : schemaLocation = " http : / / earthquake . usgs . gov
http : / / earthquake . usgs . gov / eqcenter / shakemap / xml / schemas /
shakemap . xsd " event _ id = " 20120807015938 " shakemap _ id = " 20120807015938"
shakemap _ version = " 1 " code _ version = " 3.5"
process _ timestamp = " 2012-08-06T18:28:37Z " shakemap _ originator = " us "
map _ status = " RELEASED " shakemap _ event _ type = " ACTUAL " >
< event magnitude = " 5.1 " depth = " 206 " lat = " 2.800000"
lon = " 128.290000 " event _ timestamp = " 2012-08-07T01:55:12WIB "
event _ network = " " event _ description = " Halmahera , Indonesia " / >
< grid _ specification lon _ min = " 126.290000 " lat _ min = " 0.802000"
lon _ max = " 130.290000 " lat _ max = " 4.798000"
nominal _ lon _ spacing = " 0.025000 " nominal _ lat _ spacing = " 0.024975"
nlon = " 161 " nlat = " 161 " / >
< grid _ field index = " 1 " name = " LON " units = " dd " / >
< grid _ field index = " 2 " name = " LAT " units = " dd " / >
< grid _ field index = " 3 " name = " PGA " units = " pctg " / >
< grid _ field index = " 4 " name = " PGV " units = " cms " / >
< grid _ field index = " 5 " name = " MMI " units = " intensity " / >
< grid _ field index = " 6 " name = " PSA03 " units = " pctg " / >
< grid _ field index = " 7 " name = " PSA10 " units = " pctg " / >
< grid _ field index = " 8 " name = " PSA30 " units = " pctg " / >
< grid _ field index = " 9 " name = " STDPGA " units = " pctg " / >
< grid _ field index = " 10 " name = " URAT " units = " " / >
< grid _ field index = " 11 " name = " SVEL " units = " ms " / >
< grid _ data >
126.2900 04.7980 0.01 0.02 1.16 0.05 0.02 0 0.5 1 600
126.3150 04.7980 0.01 0.02 1.16 0.05 0.02 0 0.5 1 600
126.3400 04.7980 0.01 0.02 1.17 0.05 0.02 0 0.5 1 600
126.3650 04.7980 0.01 0.02 1.17 0.05 0.02 0 0.5 1 600
. . . etc
. . note : : We could have also obtained some of this data from the
grid . xyz and event . xml but the * * grid . xml * * is preferred because it
contains clear and unequivical metadata describing the various
fields and attributes . Also it provides all the data we need in a
single file ."""
|
LOGGER . debug ( 'ParseGridXml requested.' )
grid_path = self . grid_file_path ( )
try :
document = minidom . parse ( grid_path )
shakemap_grid_element = document . getElementsByTagName ( 'shakemap_grid' )
shakemap_grid_element = shakemap_grid_element [ 0 ]
self . event_id = shakemap_grid_element . attributes [ 'event_id' ] . nodeValue
event_element = document . getElementsByTagName ( 'event' )
event_element = event_element [ 0 ]
self . magnitude = float ( event_element . attributes [ 'magnitude' ] . nodeValue )
self . longitude = float ( event_element . attributes [ 'lon' ] . nodeValue )
self . latitude = float ( event_element . attributes [ 'lat' ] . nodeValue )
self . location = event_element . attributes [ 'event_description' ] . nodeValue . strip ( )
self . depth = float ( event_element . attributes [ 'depth' ] . nodeValue )
# Get the date - it ' s going to look something like this :
# 2012-08-07T01:55:12WIB
time_stamp = event_element . attributes [ 'event_timestamp' ] . nodeValue
# Note the timezone here is inconsistent with YZ from grid . xml
# use the latter
self . time_zone = time_stamp [ 19 : ]
self . extract_date_time ( time_stamp )
specification_element = document . getElementsByTagName ( 'grid_specification' )
specification_element = specification_element [ 0 ]
self . x_minimum = float ( specification_element . attributes [ 'lon_min' ] . nodeValue )
self . x_maximum = float ( specification_element . attributes [ 'lon_max' ] . nodeValue )
self . y_minimum = float ( specification_element . attributes [ 'lat_min' ] . nodeValue )
self . y_maximum = float ( specification_element . attributes [ 'lat_max' ] . nodeValue )
self . grid_bounding_box = QgsRectangle ( self . x_minimum , self . y_maximum , self . x_maximum , self . y_minimum )
self . rows = int ( float ( specification_element . attributes [ 'nlat' ] . nodeValue ) )
self . columns = int ( float ( specification_element . attributes [ 'nlon' ] . nodeValue ) )
data_element = document . getElementsByTagName ( 'grid_data' )
data_element = data_element [ 0 ]
data = data_element . firstChild . nodeValue
# Extract the 1,2 and 5th ( MMI ) columns and populate mmi _ data
longitude_column = 0
latitude_column = 1
mmi_column = 4
lon_list = [ ]
lat_list = [ ]
mmi_list = [ ]
for line in data . split ( '\n' ) :
if not line :
continue
tokens = line . split ( ' ' )
longitude = tokens [ longitude_column ]
latitude = tokens [ latitude_column ]
mmi = tokens [ mmi_column ]
lon_list . append ( float ( longitude ) )
lat_list . append ( float ( latitude ) )
mmi_list . append ( float ( mmi ) )
if self . smoothing_method == NUMPY_SMOOTHING :
LOGGER . debug ( 'We are using NUMPY smoothing' )
ncols = len ( np . where ( np . array ( lon_list ) == lon_list [ 0 ] ) [ 0 ] )
nrows = len ( np . where ( np . array ( lat_list ) == lat_list [ 0 ] ) [ 0 ] )
# reshape mmi _ list to 2D array to apply gaussian filter
Z = np . reshape ( mmi_list , ( nrows , ncols ) )
# smooth MMI matrix
mmi_list = convolve ( Z , gaussian_kernel ( self . smoothing_sigma ) )
# reshape array back to 1D long list of mmi
mmi_list = np . reshape ( mmi_list , ncols * nrows )
elif self . smoothing_method == SCIPY_SMOOTHING :
LOGGER . debug ( 'We are using SCIPY smoothing' )
from scipy . ndimage . filters import gaussian_filter
ncols = len ( np . where ( np . array ( lon_list ) == lon_list [ 0 ] ) [ 0 ] )
nrows = len ( np . where ( np . array ( lat_list ) == lat_list [ 0 ] ) [ 0 ] )
# reshape mmi _ list to 2D array to apply gaussian filter
Z = np . reshape ( mmi_list , ( nrows , ncols ) )
# smooth MMI matrix
# Help from Hadi Ghasemi
mmi_list = gaussian_filter ( Z , self . smoothing_sigma )
# reshape array back to 1D long list of mmi
mmi_list = np . reshape ( mmi_list , ncols * nrows )
# zip lists as list of tuples
self . mmi_data = list ( zip ( lon_list , lat_list , mmi_list ) )
except Exception as e :
LOGGER . exception ( 'Event parse failed' )
raise GridXmlParseError ( 'Failed to parse grid file.\n%s\n%s' % ( e . __class__ , str ( e ) ) )
|
def metrics ( * metrics ) :
"""Given a list of metrics , provides a builder that it turns computes metrics from a column .
See the documentation of [ [ Summarizer ] ] for an example .
The following metrics are accepted ( case sensitive ) :
- mean : a vector that contains the coefficient - wise mean .
- variance : a vector tha contains the coefficient - wise variance .
- count : the count of all vectors seen .
- numNonzeros : a vector with the number of non - zeros for each coefficients
- max : the maximum for each coefficient .
- min : the minimum for each coefficient .
- normL2 : the Euclidean norm for each coefficient .
- normL1 : the L1 norm of each coefficient ( sum of the absolute values ) .
: param metrics :
metrics that can be provided .
: return :
an object of : py : class : ` pyspark . ml . stat . SummaryBuilder `
Note : Currently , the performance of this interface is about 2x ~ 3x slower then using the RDD
interface ."""
|
sc = SparkContext . _active_spark_context
js = JavaWrapper . _new_java_obj ( "org.apache.spark.ml.stat.Summarizer.metrics" , _to_seq ( sc , metrics ) )
return SummaryBuilder ( js )
|
def serve_forever ( self ) :
"""Run the DAAP server . Start by advertising the server via Bonjour . Then
serve requests until CTRL + C is received ."""
|
# Verify that the provider has a server .
if self . provider . server is None :
raise ValueError ( "Cannot start server because the provider has no server to " "publish." )
# Verify that the provider has a database to advertise .
if not self . provider . server . databases :
raise ValueError ( "Cannot start server because the provider has no databases to " "publish." )
# Create WSGI server and run it .
self . server = WSGIServer ( ( self . ip , self . port ) , application = self . app )
# Register Bonjour .
if self . bonjour :
self . bonjour . publish ( self )
# Start server until finished
try :
self . server . serve_forever ( )
except KeyboardInterrupt :
pass
finally : # Unregister Bonjour
if self . bonjour :
self . bonjour . unpublish ( self )
|
def geohash ( self , key , member , * members , ** kwargs ) :
"""Returns members of a geospatial index as standard geohash strings .
: rtype : list [ str or bytes or None ]"""
|
return self . execute ( b'GEOHASH' , key , member , * members , ** kwargs )
|
def get_config_value ( self , section_name , option , default_option = "default" ) :
"""Read a value from the configuration , with a default .
Args :
section _ name ( str ) : name of the section in the configuration from which
the option should be found .
option ( str ) : name of the configuration option .
default _ option ( str ) : name of the default configuration option whose
value should be returned if the requested option is not found .
Returns :
str : the value from the ini file ."""
|
if self . config is None :
self . config = configparser . ConfigParser ( )
self . config . read ( self . ini_file_name )
if option :
try :
return self . config . get ( section_name , option )
except configparser . NoOptionError :
log . debug ( "Didn't find a configuration option for '%s' section and '%s' option" , section_name , option , )
return self . config . get ( section_name , default_option )
|
def sentiment ( self ) :
"""Returns average sentiment of document . Must have sentiment enabled in XML output .
: getter : returns average sentiment of the document
: type : float"""
|
if self . _sentiment is None :
results = self . _xml . xpath ( '/root/document/sentences' )
self . _sentiment = float ( results [ 0 ] . get ( "averageSentiment" , 0 ) ) if len ( results ) > 0 else None
return self . _sentiment
|
def doWaitWebRequest ( url , method = "GET" , data = None , headers = { } ) :
"""Same as doWebRequest , but with built in wait - looping"""
|
completed = False
while not completed :
completed = True
try :
response , content = doWebRequest ( url , method , data , headers )
except urllib2 . URLError :
completed = False
waitForURL ( url )
return response , content
|
def dataframe ( self ) :
"""Returns a pandas DataFrame containing all other class properties and
values . The index for the DataFrame is the string abbreviation of the
team , such as ' HOU ' ."""
|
fields_to_include = { 'abbreviation' : self . abbreviation , 'at_bats' : self . at_bats , 'average_batter_age' : self . average_batter_age , 'average_pitcher_age' : self . average_pitcher_age , 'away_losses' : self . away_losses , 'away_record' : self . away_record , 'away_wins' : self . away_wins , 'balks' : self . balks , 'bases_on_balls' : self . bases_on_balls , 'bases_on_walks_given' : self . bases_on_walks_given , 'bases_on_walks_given_per_nine_innings' : self . bases_on_walks_given_per_nine_innings , 'batters_faced' : self . batters_faced , 'batting_average' : self . batting_average , 'complete_game_shutouts' : self . complete_game_shutouts , 'complete_games' : self . complete_games , 'doubles' : self . doubles , 'earned_runs_against' : self . earned_runs_against , 'earned_runs_against_plus' : self . earned_runs_against_plus , 'extra_inning_losses' : self . extra_inning_losses , 'extra_inning_record' : self . extra_inning_record , 'extra_inning_wins' : self . extra_inning_wins , 'fielding_independent_pitching' : self . fielding_independent_pitching , 'games' : self . games , 'games_finished' : self . games_finished , 'grounded_into_double_plays' : self . grounded_into_double_plays , 'hit_pitcher' : self . hit_pitcher , 'hits' : self . hits , 'hits_allowed' : self . hits_allowed , 'hits_per_nine_innings' : self . hits_per_nine_innings , 'home_losses' : self . home_losses , 'home_record' : self . home_record , 'home_runs' : self . home_runs , 'home_runs_against' : self . home_runs_against , 'home_runs_per_nine_innings' : self . home_runs_per_nine_innings , 'home_wins' : self . home_wins , 'innings_pitched' : self . innings_pitched , 'intentional_bases_on_balls' : self . intentional_bases_on_balls , 'interleague_record' : self . interleague_record , 'last_ten_games_record' : self . last_ten_games_record , 'last_thirty_games_record' : self . last_thirty_games_record , 'last_twenty_games_record' : self . last_twenty_games_record , 'league' : self . league , 'losses' : self . losses , 'losses_last_ten_games' : self . losses_last_ten_games , 'losses_last_thirty_games' : self . losses_last_thirty_games , 'losses_last_twenty_games' : self . losses_last_twenty_games , 'losses_vs_left_handed_pitchers' : self . losses_vs_left_handed_pitchers , 'losses_vs_right_handed_pitchers' : self . losses_vs_right_handed_pitchers , 'losses_vs_teams_over_500' : self . losses_vs_teams_over_500 , 'losses_vs_teams_under_500' : self . losses_vs_teams_under_500 , 'luck' : self . luck , 'name' : self . name , 'number_of_pitchers' : self . number_of_pitchers , 'number_players_used' : self . number_players_used , 'on_base_percentage' : self . on_base_percentage , 'on_base_plus_slugging_percentage' : self . on_base_plus_slugging_percentage , 'on_base_plus_slugging_percentage_plus' : self . on_base_plus_slugging_percentage_plus , 'opposing_runners_left_on_base' : self . opposing_runners_left_on_base , 'plate_appearances' : self . plate_appearances , 'pythagorean_win_loss' : self . pythagorean_win_loss , 'rank' : self . rank , 'record_vs_left_handed_pitchers' : self . record_vs_left_handed_pitchers , 'record_vs_right_handed_pitchers' : self . record_vs_right_handed_pitchers , 'record_vs_teams_over_500' : self . record_vs_teams_over_500 , 'record_vs_teams_under_500' : self . record_vs_teams_under_500 , 'run_difference' : self . run_difference , 'runners_left_on_base' : self . runners_left_on_base , 'runs' : self . runs , 'runs_against' : self . runs_against , 'runs_allowed_per_game' : self . runs_allowed_per_game , 'runs_batted_in' : self . runs_batted_in , 'sacrifice_flies' : self . sacrifice_flies , 'sacrifice_hits' : self . sacrifice_hits , 'saves' : self . saves , 'shutouts' : self . shutouts , 'simple_rating_system' : self . simple_rating_system , 'single_run_losses' : self . single_run_losses , 'single_run_record' : self . single_run_record , 'single_run_wins' : self . single_run_wins , 'slugging_percentage' : self . slugging_percentage , 'stolen_bases' : self . stolen_bases , 'streak' : self . streak , 'strength_of_schedule' : self . strength_of_schedule , 'strikeouts' : self . strikeouts , 'strikeouts_per_base_on_balls' : self . strikeouts_per_base_on_balls , 'strikeouts_per_nine_innings' : self . strikeouts_per_nine_innings , 'times_caught_stealing' : self . times_caught_stealing , 'times_hit_by_pitch' : self . times_hit_by_pitch , 'times_struck_out' : self . times_struck_out , 'total_bases' : self . total_bases , 'total_runs' : self . total_runs , 'triples' : self . triples , 'whip' : self . whip , 'wild_pitches' : self . wild_pitches , 'win_percentage' : self . win_percentage , 'wins' : self . wins , 'wins_last_ten_games' : self . wins_last_ten_games , 'wins_last_thirty_games' : self . wins_last_thirty_games , 'wins_last_twenty_games' : self . wins_last_twenty_games , 'wins_vs_left_handed_pitchers' : self . wins_vs_left_handed_pitchers , 'wins_vs_right_handed_pitchers' : self . wins_vs_right_handed_pitchers , 'wins_vs_teams_over_500' : self . wins_vs_teams_over_500 , 'wins_vs_teams_under_500' : self . wins_vs_teams_under_500 }
return pd . DataFrame ( [ fields_to_include ] , index = [ self . _abbreviation ] )
|
def update ( self , client = None , unique_writer_identity = False ) :
"""API call : update sink configuration via a PUT request
See
https : / / cloud . google . com / logging / docs / reference / v2 / rest / v2 / projects . sinks / update
: type client : : class : ` ~ google . cloud . logging . client . Client ` or
` ` NoneType ` `
: param client : the client to use . If not passed , falls back to the
` ` client ` ` stored on the current sink .
: type unique _ writer _ identity : bool
: param unique _ writer _ identity : ( Optional ) determines the kind of
IAM identity returned as
writer _ identity in the new sink ."""
|
client = self . _require_client ( client )
resource = client . sinks_api . sink_update ( self . project , self . name , self . filter_ , self . destination , unique_writer_identity = unique_writer_identity , )
self . _update_from_api_repr ( resource )
|
def get_form ( self , request , obj = None , ** kwargs ) :
"""Extend the form for the given plugin with the form SharableCascadeForm"""
|
Form = type ( str ( 'ExtSharableForm' ) , ( SharableCascadeForm , kwargs . pop ( 'form' , self . form ) ) , { } )
Form . base_fields [ 'shared_glossary' ] . limit_choices_to = dict ( plugin_type = self . __class__ . __name__ )
kwargs . update ( form = Form )
return super ( SharableGlossaryMixin , self ) . get_form ( request , obj , ** kwargs )
|
def send_events ( self , events ) :
"""Sends multiple events to Riemann in a single message
: param events : A list or iterable of ` ` Event ` ` objects
: returns : The response message from Riemann"""
|
message = riemann_client . riemann_pb2 . Msg ( )
for event in events :
message . events . add ( ) . MergeFrom ( event )
return self . transport . send ( message )
|
def delete_entry ( self , fit = None , index = None ) :
"""deletes the single item from the logger of this object that corrisponds to either the passed in fit or index . Note this function mutaits the logger of this object if deleting more than one entry be sure to pass items to delete in from highest index to lowest or else odd things can happen .
@ param : fit - > Fit object to delete from this objects logger
@ param : index - > integer index of the entry to delete from this objects logger"""
|
if type ( index ) == int and not fit :
fit , specimen = self . fit_list [ index ]
if fit and type ( index ) == int :
for i , ( f , s ) in enumerate ( self . fit_list ) :
if fit == f :
index , specimen = i , s
break
if index == self . current_fit_index :
self . current_fit_index = None
if fit not in self . parent . pmag_results_data [ 'specimens' ] [ specimen ] :
print ( ( "cannot remove item (entry #: " + str ( index ) + ") as it doesn't exist, this is a dumb bug contact devs" ) )
self . logger . DeleteItem ( index )
return
self . parent . pmag_results_data [ 'specimens' ] [ specimen ] . remove ( fit )
del self . fit_list [ index ]
self . logger . DeleteItem ( index )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.