text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf8 -*-
import MapModel as Mm
from MapObjects.MovingObject import MovingObject
import Statistic
class Enemy(MovingObject):
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
def __init__(self, health, width, height):
super().__init__()
self.gold = 2
self.able_to_go = {Mm.Player, Mm.Ground, Mm.Arrow,
Mm.HeartStone, Mm.Trap, Mm.Wall, Mm.SpiralTower}
self.unpretty = 10000
self.damage = 1
self.health = health
self.field = [[-1 for _ in range(height)] for _ in range(width)]
self.lazy_collision_init = self.collision_init
def on_dead(self):
Statistic.total_killed_enemies += 1
Statistic.player_gold += self.gold
return self.get_from_below()
def get_info(self):
info = "Панда\n"
info += "Здоровье: {0}\n".format(self.health)
info += "Урон: {0}\n".format(self.damage)
return info
def collision_init(self):
# noinspection PyUnusedLocal
@self.collide_registrar(Mm.Ground)
def ground_collide(obj, ground):
return None, obj
@self.collide_registrar(Mm.HeartStone)
def heartstone_collide(obj, heartstone):
heartstone.attack(obj.damage * (obj.health / heartstone.defence))
return None, heartstone
@self.collide_registrar(Mm.Arrow)
def arrow_collide(obj, arrow):
obj.health -= arrow.damage
if self.health > 0:
return None, obj
else:
self.on_dead()
return None, obj.get_from_below()
@self.collide_registrar(Mm.Player)
def player_collide(obj, player):
player.health -= obj.damage * (obj.health / player.damage)
obj.health -= player.damage * (obj.health / player.damage)
if player.health > 0:
obj.on_dead()
return None, player
else:
return None, obj
@self.collide_registrar(Mm.Trap)
def trap_collide(obj, structure):
structure.act_on_movable(obj)
if obj.health > 0:
obj.from_below = structure
return None, obj
else:
obj.on_dead()
return None, structure.check()
@self.collide_registrar(Mm.SpiralTower)
@self.collide_registrar(Mm.Wall)
def wall_collide(obj, wall):
damage = obj.damage * obj.health
if damage > wall.health:
obj.health -= wall.health / obj.damage
return None, obj
else:
wall.health -= damage
return None, wall.check()
|
v-samodelkin/TowerDefence
|
MapObjects/Enemy.py
|
Python
|
mit
| 2,746 | 0 |
import os, requests, time
import mydropbox
edmunds = mydropbox.get_keys('edmunds')
api_key = edmunds['api_key']
api_secret = edmunds['api_secret']
vin = mydropbox.read_dropbox_file(os.path.join('Records', 'Financials', 'Car', 'VIN')).strip()
r = requests.get("https://api.edmunds.com/api/vehicle/v2/vins/%s?&fmt=json&api_key=%s" % (vin, api_key))
car = r.json()
time.sleep(1)
# Pulled from above query
styleid = str(car['years'][0]['styles'][0]['id'])
optionids = []
for optcat in car['options']:
for opt in optcat['options']:
optionids.append(str(opt['id']))
colorids = []
for colorcat in car['colors']:
for opt in colorcat['options']:
colorids.append(str(opt['id']))
# User-supplied
condition = "Clean"
mileage = "6000"
zipcode = "60613"
r = requests.get(
"https://api.edmunds.com/v1/api/tmv/tmvservice/calculateusedtmv" +
"?styleid=%s" % styleid +
''.join(map(lambda optionid: "&optionid=%s" % optionid, optionids)) +
''.join(map(lambda colorid: "&colorid=%s" % colorid, colorids)) +
"&condition=%s" % condition +
"&mileage=%s" % mileage +
"&zip=%s" % zipcode +
"&fmt=json&api_key=%s" % api_key
)
data = r.json()
totalWithOptions = data['tmv']['totalWithOptions']
disp = [
('Used Trade-in', 'usedTradeIn'),
('Used Private Party', 'usedPrivateParty'),
('Used TMV Retail', 'usedTmvRetail')
]
total = 0.0
for label, key in disp:
total += totalWithOptions[key]
print("%s: %f" % (label, totalWithOptions[key]))
total /= 3
print("Average: %f" % total)
|
smiley325/accounter
|
ref/edmunds.py
|
Python
|
epl-1.0
| 1,537 | 0.001952 |
from django.template.loader import get_template
from . import BaseNotification
class EmailNotification(BaseNotification):
def get_message(self):
template = get_template('user_profile/notification/email/issue.txt')
return template.render({
'issue': self.issue,
'notification_setting': self.notification
})
def send_issue_notification(self):
self.notification.user.email_user(
'New Issue #%s created: %s' % (self.issue.pk, self.issue.title),
self.get_message()
)
|
mcallistersean/b2-issue-tracker
|
toucan/user_profile/notifications/email.py
|
Python
|
mit
| 561 | 0.005348 |
#!/usr/bin/env python3
import sys
import dbus
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'-t',
'--trunclen',
type=int,
metavar='trunclen'
)
parser.add_argument(
'-f',
'--format',
type=str,
metavar='custom format',
dest='custom_format'
)
parser.add_argument(
'-p',
'--playpause',
type=str,
metavar='play-pause indicator',
dest='play_pause'
)
parser.add_argument(
'-d',
'--default',
type=str,
metavar='string to return when spotify is off',
dest='default')
parser.add_argument(
'--font',
type=str,
metavar='the index of the font to use for the main label',
dest='font'
)
parser.add_argument(
'--playpause-font',
type=str,
metavar='the index of the font to use to display the playpause indicator',
dest='play_pause_font'
)
args = parser.parse_args()
def fix_string(string):
# corrects encoding for the python version used
if sys.version_info.major == 3:
return string
else:
return string.encode('utf-8')
# Default parameters
default = ""
output = fix_string(u'{play_pause} {artist}: {song}')
trunclen = 25
play_pause = fix_string(u'\u25B6,\u23F8') # first character is play, second is paused
label_with_font = '%{{T{font}}}{label}%{{T-}}'
font = args.font
play_pause_font = args.play_pause_font
# parameters can be overwritten by args
if args.trunclen is not None:
trunclen = args.trunclen
if args.custom_format is not None:
output = args.custom_format
if args.play_pause is not None:
play_pause = args.play_pause
if args.default is not None:
default = args.default
try:
session_bus = dbus.SessionBus()
spotify_bus = session_bus.get_object(
'org.mpris.MediaPlayer2.spotify',
'/org/mpris/MediaPlayer2'
)
spotify_properties = dbus.Interface(
spotify_bus,
'org.freedesktop.DBus.Properties'
)
metadata = spotify_properties.Get('org.mpris.MediaPlayer2.Player', 'Metadata')
status = spotify_properties.Get('org.mpris.MediaPlayer2.Player', 'PlaybackStatus')
# Handle play/pause label
play_pause = play_pause.split(',')
if status == 'Playing':
play_pause = play_pause[0]
elif status == 'Paused':
play_pause = play_pause[1]
else:
play_pause = str()
if play_pause_font:
play_pause = label_with_font.format(font=play_pause_font, label=play_pause)
# Handle main label
artist = fix_string(metadata['xesam:artist'][0]) if metadata['xesam:artist'] else ''
song = fix_string(metadata['xesam:title']) if metadata['xesam:title'] else ''
album = fix_string(metadata['xesam:album']) if metadata['xesam:album'] else ''
if not artist and not song and not album:
print('')
else:
if len(song) > trunclen:
song = song[0:trunclen]
song += '...'
if ('(' in song) and (')' not in song):
song += ')'
if font:
artist = label_with_font.format(font=font, label=artist)
song = label_with_font.format(font=font, label=song)
album = label_with_font.format(font=font, label=album)
print(output.format(artist=artist, song=song, play_pause=play_pause, album=album))
except Exception as e:
if isinstance(e, dbus.exceptions.DBusException):
print(default)
else:
print(e)
|
naegi/dotfiles
|
home/spotify_status.py
|
Python
|
unlicense
| 3,414 | 0.003515 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Tests for ItemAdder
"""
from random import Random
from hamcrest import (assert_that,
greater_than, greater_than_or_equal_to, has_length,
less_than)
from pyherc.data import Model, get_items, add_location_tag
from pyherc.data.effects import EffectHandle
from pyherc.generators.item import (ItemConfiguration, ItemConfigurations,
ItemGenerator, WeaponConfiguration)
#from pyherc.generators.level import ItemAdder, item_by_name, item_by_type
from pyherc.test.matchers import does_have_item, located_in_room
from pyherc.test.builders import LevelBuilder
#TODO: enable later
class ItemAdder():
"""
Tests for ItemAdder
"""
def __init__(self):
"""
Default constructor
"""
self.rng = None
self.level = None
self.item_generator = None
self.configuration = None
self.item_adder = None
self.floor_rock = None
self.wall_empty = None
def setup(self):
"""
Setup the test case
"""
self.floor_rock = 1
self.wall_empty = None
self.rng = Random()
self.level = (LevelBuilder()
.with_size((60, 40))
.with_floor_tile(self.floor_rock)
.with_wall_tile(self.wall_empty)
.build())
add_location_tag(self.level, (10, 10), 'room')
for x_loc in range(11, 30):
add_location_tag(self.level, (x_loc, 10), 'corridor')
item_config = ItemConfigurations(Random())
item_config.add_item(
ItemConfiguration(name = 'dagger',
cost = 2,
weight = 1,
icons = [500],
types = ['weapon',
'light weapon',
'melee',
'simple weapon'],
rarity = 'common',
weapon_configration = WeaponConfiguration(
damage = [(2, 'piercing'),
(2, 'slashing')],
critical_range = 11,
critical_damage = 2,
weapon_class = 'simple')))
item_config.add_item(
ItemConfiguration(name = 'red potion',
cost = 150,
weight = 1,
icons = [100],
types = ['potion'],
rarity = 'rare',
effect_handles = [EffectHandle(
trigger = 'on drink',
effect = 'cure medium wounds',
parameters = None,
charges = 1)]))
self.item_generator = ItemGenerator(item_config)
self.configuration = [item_by_name(3, 4, 'dagger'),
item_by_type(1, 1, 'potion')]
self.item_adder = ItemAdder(self.item_generator,
self.configuration,
self.rng)
self.item_adder.add_items(self.level)
def test_adding_items(self):
"""
Test basic case of adding items on the level
"""
assert_that(list(get_items(self.level)), has_length(greater_than(3)))
assert_that(list(get_items(self.level)), has_length(less_than(6)))
assert_that(self.level, does_have_item('dagger',
greater_than_or_equal_to(3)))
assert_that(self.level, does_have_item('red potion', 1))
def test_adding_to_location(self):
"""
Test that ItemAdder will use location types passed to it
"""
potion = [x for x in get_items(self.level)
if x.name == 'red potion'][0]
location = potion.location
assert_that(located_in_room(potion))
|
tuturto/pyherc
|
src/pyherc/test/unit/test_itemadder.py
|
Python
|
mit
| 5,532 | 0.009219 |
from threading import Thread
import sys
import imaplib
import time
class Can(Thread):
def __init__(self, id, config, opener, key):
Thread.__init__(self)
self.key = key
self.config = config
self.id = id
self.opener = opener
self.running = True
pass
def run(self):
try:
user = self.config.get('email', 'username')
passwd = self.config.get('email', 'password')
except:
# failed look up: abore running
return
while self.running:
m = imaplib.IMAP4_SSL('imap.gmail.com')
m.login(user, passwd)
count = m.select('Inbox')[1][0]
r, messages = m.search(None, '(UNSEEN)')
#print messages
for uid in messages[0].split(" "):
r, data = m.fetch(uid, '(ENVELOPE)')
data = data[0]
subject = data.split('"')[3]
if str(self.key) in subject:
r, body = m.fetch(uid, '(BODY[TEXT])')
body = body[0][1].strip()
#print subject
#print body
self.opener(self.id, body)
m.logout()
time.sleep(15)
def stop(self):
self.running = False
def setKey(self, key):
self.key = key
if __name__ == "__main__":
cans = {}
def opener(id, ans):
if id not in cans:
return
if ans == "quit":
cans[id].stop()
c = Can(1, opener, "non-real")
cans[c.id] = c
c.start()
|
scommab/can-opener
|
cans/gmail.py
|
Python
|
apache-2.0
| 1,364 | 0.026393 |
"""
Students grade peer submissions.
"""
from __future__ import absolute_import
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise
class PeerGradePage(PageObject):
"""
Students grade peer submissions.
"""
url = None
def is_browser_on_page(self):
def _is_correct_page():
is_present = (
self.q(css='div.peer-grading-tools').present or
self.q(css='div.grading-panel.current-state').present
)
return is_present, is_present
return Promise(_is_correct_page, 'On the peer grading page.').fulfill()
@property
def problem_list(self):
"""
Return the list of available problems to peer grade.
"""
return self.q(css='a.problem-button').text
def select_problem(self, problem_name):
"""
Choose the problem with `problem_name` to start grading or calibrating.
"""
index = self.problem_list.index(problem_name) + 1
self.q(css='a.problem-button:nth-of-type({})'.format(index)).first.click()
|
ESOedX/edx-platform
|
common/test/acceptance/pages/lms/peer_grade.py
|
Python
|
agpl-3.0
| 1,101 | 0.000908 |
import os, re, sys
import read_dicts
from collections import Counter
import pandas as pd
import numpy as np
import operator
import random
sys.path.append('.')
ENABLE_WRITE = 1
INDEX_NAMES_FILES = '../../data/aaindex/list_of_indices.txt'
def getscores(d, aalist, seq):
score_list = list()
char_freq = dict()
for c in seq:
if c in char_freq:
char_freq[c] += 1
else:
char_freq[c] = 1
for aa in aalist:
score = 0
for k in d[aa].iterkeys():
try:
freq = char_freq[k]
except KeyError:
freq = 0
score += d[aa][k] * freq
score_list.append(str(score))
return '|'.join(score_list)
def get_specific_label(line):
location_search = re.search(r"(.+(\[)(?P<location1>.+?)(\])( |)$)", line)
location = location_search.group('location1').rstrip().split(',')[0]
if location == 'Plasma membrane':
location = 'Membrane'
return location
def get_general_label(line):
location_search = re.search(r"(.+(\[)(?P<location1>.+?)(\])( |)$)", line)
try:
location = location_search.group('location1')
# funny looking because animal and plants formatting differs
general_location = location.split('(')[0].rstrip().split(',')[0]
except AttributeError:
print('line: ' + line)
# print('location: ' + location)
assert False
if general_location == 'Plasma membrane':
general_location = 'Membrane'
return general_location
def get_general_label_test(file_in):
d = dict()
with open(file_in, 'r') as ifile:
for line in ifile:
if line[0] == '>':
# print(line)
location = get_general_label(line)
if location in d.keys():
d[location] += 1
else:
d[location] = 1
for k, v in d.items():
print('k: %-30s v: %d' % (k, v))
def get_species(line):
location_search = re.search(r"\(sp: (?P<location1>.+?)\)", line)
location = location_search.group('location1').rstrip()
return location
def get_dict_loc_to_score(file_in, group_similar_labels=True):
score_d, corr_d = read_dicts.construct_dicts("../../data/aaindex/aaindex1.txt")
aalist = read_dicts.get_aaindex_list("../../data/aaindex/aaindex_used.txt")
d = dict()
count = 0
entry_count = 0
uniques = set()
with open(file_in, 'r') as ifile:
for i, l in enumerate(ifile):
count = i + 1
print('raw data lines: %d' % count)
with open(file_in, 'r') as ifile:
for i in range(count):
# print "%d of %d lines" % (i+1, count)
l = ifile.readline()
# if i == 1000:
# break
if l[0] == '>':
if group_similar_labels:
location = get_general_label(l)
else:
location = get_specific_label(l)
sp = get_species(l)
else:
seq = ''
seq += l.rstrip()
while True:
x = ifile.tell()
l = ifile.readline()
if l == '': # EOF
# do something
# print seq
if (location != 'NULL') and (location != '\N') and (seq not in uniques):
# try:
# d_sp[sp] += 1
# except KeyError:
# d_sp[sp] = 1
uniques.add(seq)
scores = getscores(score_d, aalist, seq)
try:
d[location].append(scores)
except KeyError:
d[location] = [scores]
entry_count += 1
print('number of entries: %d' % entry_count)
del seq
return d
elif l[0] == '>':
ifile.seek(x)
break
else:
seq += l.rstrip()
# if seq in uniques:
# duplicate_count += 1
# print 'found dup:' + location + ' ' + seq
# print duplicate_count
if (location != 'NULL') and ('\N' not in location) and (seq not in uniques):
uniques.add(seq)
scores = getscores(score_d, aalist, seq)
try:
d[location].append(scores)
except KeyError:
d[location] = [scores]
entry_count += 1
print('number of entries: %d' % entry_count)
del seq
def write_label_score_file(file_in, file_out, write_file=0, outsize='all', group_similar_labels=True,
species='all'):
print('building and writing %s' % file_out)
count = 0
entry_count = 0
duplicate_count = 0
uniques = set()
d_sp = dict()
score_d, corr_d = read_dicts.construct_dicts("../../data/aaindex/aaindex1.txt")
aalist = read_dicts.get_aaindex_list("../../data/aaindex/aaindex_used.txt")
with open(file_in, 'r') as ifile:
for i, l in enumerate(ifile):
count = i + 1
print('raw data lines: %d' % count)
with open(file_in, 'r') as ifile:
with open(file_out, 'a') as ofile:
for i in range(count):
# print "%d of %d lines" % (i+1, count)
l = ifile.readline()
# if i == 1000:
# break
if l[0] == '>':
if group_similar_labels:
location = get_general_label(l)
else:
location = get_specific_label(l)
sp = get_species(l)
else:
seq = ''
seq += l.rstrip()
while True:
x = ifile.tell()
l = ifile.readline()
if l == '': # EOF
# do something
# print seq
if (location != 'NULL') and (location != '\N') and (seq not in uniques) and (
write_file != 0):
if species == 'all' or species == sp:
try:
d_sp[sp] += 1
except KeyError:
d_sp[sp] = 1
uniques.add(seq)
scores = getscores(score_d, aalist, seq)
ofile.write('%s|%s\n' % (location, scores))
entry_count += 1
print('number of entries: %d' % entry_count)
del seq
return
elif l[0] == '>':
ifile.seek(x)
break
else:
seq += l.rstrip()
# if seq in uniques:
# duplicate_count += 1
# print 'found dup:' + location + ' ' + seq
# print duplicate_count
if (location != 'NULL') and ('\N' not in location) and (seq not in uniques) and (write_file != 0):
if species == 'all' or species == sp:
try:
d_sp[sp] += 1
except KeyError:
d_sp[sp] = 1
uniques.add(seq)
scores = getscores(score_d, aalist, seq)
ofile.write('%s|%s\n' % (location, scores))
entry_count += 1
print('number of entries: %d' % entry_count)
if outsize != 'all':
if entry_count == outsize:
print 'dic:'
sorted_x = sorted(d_sp.items(), key=operator.itemgetter(1))
print sorted_x
break
del seq
def write_sequence_file(file_in, file_out, write_file=0, outsize='all', group_similar_labels=True, species='all'):
print('building and writing %s' % file_out)
location_set, max_len, seq_count, long_count = set(), 0, 0, 0
count = 0
entry_count = 0
uniques = set()
duplicate_count = 0
d_sp = dict()
with open(file_in, 'r') as ifile:
for i, l in enumerate(ifile):
count = i + 1
print('raw data lines: %d' % count)
with open(file_in, 'r') as ifile:
with open(file_out, 'a') as ofile:
for i in range(count):
l = ifile.readline()
if l[0] == '>':
if group_similar_labels:
location = get_general_label(l)
else:
location = get_specific_label(l)
sp = get_species(l)
else:
seq = ''
seq += l.rstrip()
while True:
x = ifile.tell()
l = ifile.readline()
if l == '': # EOF
if (location != 'NULL') and (location != '\N') and (seq not in uniques) and (
write_file != 0):
if species == 'all' or species == sp:
try:
d_sp[sp] += 1
except KeyError:
d_sp[sp] = 1
uniques.add(seq)
ofile.write('%s|%s\n' % (location, seq))
if len(seq) <= 500:
long_count += 1
location_set.add(location)
entry_count += 1
print('number of entries: %d' % entry_count)
del seq
return
elif l[0] == '>':
ifile.seek(x)
break
else:
seq += l.rstrip()
# if seq in uniques:
# duplicate_count += 1
# print 'found dup:' + location + ' ' + seq
# print duplicate_count
if (location != 'NULL') and ('\N' not in location) and (seq not in uniques) and (write_file != 0):
if species == 'all' or species == sp:
try:
d_sp[sp] += 1
except KeyError:
d_sp[sp] = 1
uniques.add(seq)
ofile.write('%s|%s\n' % (location, seq))
if len(seq) <= 500:
long_count += 1
location_set.add(location)
entry_count += 1
print('number of entries: %d' % entry_count)
if outsize != 'all':
if entry_count == outsize:
print 'dic:'
sorted_x = sorted(d_sp.items(), key=operator.itemgetter(1))
print sorted_x
break
del seq
print("locations: " + str(location_set))
print("maximum sequence length: " + str(max_len))
print("Total sequences: " + str(entry_count))
print("Long sequences: " + str(long_count))
# def write_label_seq_file(file_in, file_out, write_file=0):
# count = 0
# with open(file_in, 'r') as ifile:
# for i, l in enumerate(ifile):
# count = i + 1
# print('num lines: %d' % count)
# with open(file_in, 'r') as ifile:
# with open(file_out, 'a') as ofile:
# for i in range(count):
# l = ifile.readline()
#
# # if i == 1000:
# # break
# if l[0] == '>':
#
# location_search = re.search(r".+(\[)(?P<location>.+?)(\])$", l)
# location = location_search.group('location').rstrip()
# print(location)
#
# else:
# seq = ''
# seq += l.rstrip()
# while True:
# x = ifile.tell()
# l = ifile.readline()
#
# if l == '': # EOF
# # do something
# # print seq
# if location != 'NULL' and write_file != 0:
# ofile.write('%s|%s\n' % (location, seq))
# del seq
#
# return
# elif l[0] == '>':
# ifile.seek(x)
# break
# else:
# seq += l.rstrip()
# # do something
# # print seq + '\n'
# if location != 'NULL' and write_file != 0:
# ofile.write('%s|%s\n' % (location, seq))
# del seq
def find_unique_labels(filename):
with open(filename, 'r') as ifile:
d = dict()
for l in ifile:
label = l.strip().split('|')[0]
if label in d:
d[label] += 1
else:
d[label] = 1
for k, v in d.iteritems():
print('k: %-30s v: %d' % (k, v))
def check_label_seq_file_validity(filename):
print("\nchecking validity of output file...")
non_alpha_count = 0
invalid_label_count = 0
invalid_chars = ['[', ']', '-', ',', '.', '|', '\\']
with open(filename, 'r') as ifile:
for l in ifile:
label, seq = l.strip().split('|')
if not seq.isalpha():
print("non alpha detected in seq!")
non_alpha_count += 1
for i in seq:
if not i.isalpha():
print(i)
if any(c in label for c in invalid_chars):
invalid_label_count += 1
print(label)
if non_alpha_count != 0 or invalid_label_count != 0:
raise Exception("output file not valid")
else:
print("\noutput file seems fine\n")
def read_preprocessed_data(input_file, features_file, exclude_labels_less_than=0, format='default'):
"""
reads in label_scores.txt file and returns the labels and features as lists or dataframes
:param input_file: directory of label_scores.txt file
:param exclude_labels_less_than: skip labels with occurrence less than this value
:param format default or df: what format to return the labels and features in
:return: (labels, features)
"""
with open(input_file, 'r') as ifile:
lines = [line.rstrip().split('|') for line in ifile.readlines()]
with open(features_file, 'r') as f:
features_used = [line.strip() for line in f.readlines()]
all_labels = [line[0] for line in lines]
occurrences = Counter(all_labels)
labeled_data = [(lines[i][0], map(float, lines[i][1:])) for i in range(len(lines)) if
occurrences[all_labels[i]] >= exclude_labels_less_than]
labels, feature_matrix = zip(*labeled_data)
if format == 'default':
return list(labels), list(feature_matrix) # tuples can make some things harder
elif format == 'df':
data = pd.DataFrame(data=list(feature_matrix), columns=features_used)
labels = pd.DataFrame(data=np.array(labels))
# labels = np.array(labels)
return labels, data
else:
raise Exception('Unknown format %s' % format)
def get_index_names(index_code_list, index_info_file=INDEX_NAMES_FILES):
"""
Returns the names and descriptions for a list of index codes
:param index_code_list: List of index codes, e.g. ['RADA880101','WILM950102']
:param index_info_file: File containing the names and descriptions of the indices.
:return: names and descriptions of the index codes in `index_code_list`
>>> ind = get_index_names(['RADA880101', 'BIOV880101', 'SNEP660102','WILM950102']); get_index_names(ind)
"""
with open(index_info_file, 'r') as f:
lines = [line.strip().split(' ', 1) for line in f.readlines()]
lines = [line for line in lines if len(line) > 0]
code_dict = {code: desc for code, desc in lines}
return {name: code_dict[name] for name in index_code_list}
if __name__ == '__main__':
if len(sys.argv) > 1:
dataset = sys.argv[1]
mode = sys.argv[2]
else:
sys.exit(1)
if dataset == 'plants':
data_folder = '../../data/plants'
input_file = '%s/all_plants.fas_updated04152015' % data_folder
elif dataset == 'animals':
data_folder = '../../data/animals'
input_file = '%s/metazoa_proteins.fas' % data_folder
else:
raise Exception('Please enter a valid dataset to use. Accepted: \'plants\' and \'animals\'')
if mode == 'scores':
pass
elif mode == 'sequences':
pass
elif mode == 'kscores':
pass
else:
raise Exception('Please enter either "scores" or "sequences" as the second argument')
output_file_0 = '%s/label_seq.txt' % data_folder
output_file_1 = '%s/label_scores.txt' % data_folder
output_file_2 = '%s/label_sequences.txt' % data_folder
# number of entries to output in the label & scores file.... max is 1257123
# testing
# get_general_label_test(input_file)
# UNCOMMENT THIS BLOCK TO OUTPUT LABEL & SEQUENCE file
# if os.path.exists(output_file_0) and ENABLE_WRITE != 0:
# os.remove(output_file_0)
# write_label_seq_file(input_file, output_file_0, write_file=0)
# # find_unique_labels(output_file_0)
# check_label_seq_file_validity(output_file_0)
# UNCOMMENT THIS BLOCK TO OUTPUT LABEL & SCORES file
if mode == 'scores':
if os.path.exists(output_file_1) and ENABLE_WRITE != 0:
os.remove(output_file_1)
size = 100000
# species = 'all' for everything
# 'Rattus norvegicus', 7071), ('Mus musculus', 15461), ('Homo sapiens', 23931) are popular species
write_label_score_file(input_file, output_file_1, write_file=ENABLE_WRITE, outsize=size,
group_similar_labels=True, species='all')
print('\n%s contains these labels:' % output_file_1)
# d = get_dict_loc_to_score(input_file)
# for k, v in d.items():
# print k
# print len(v)
find_unique_labels(output_file_1)
elif mode == 'sequences':
if os.path.exists(output_file_2) and ENABLE_WRITE != 0:
os.remove(output_file_2)
size = 100000
write_sequence_file(input_file, output_file_2, write_file=ENABLE_WRITE, outsize=size, group_similar_labels=True,
species='all')
elif mode == 'kscores':
if os.path.exists(output_file_1) and ENABLE_WRITE != 0:
os.remove(output_file_1)
d = get_dict_loc_to_score(input_file)
for k in d.keys():
random.shuffle(d[k])
size = 150000
with open(output_file_1[:-4] + '_150k.txt', 'a') as ofile:
for k in d.keys():
if len(d[k]) < size:
continue
for i in range(0, size):
ofile.write('%s|%s\n' % (k, d[k][i]))
size = 100000
with open(output_file_1[:-4] + '_100k.txt', 'a') as ofile:
for k in d.keys():
if len(d[k]) < size:
continue
for i in range(0, size):
ofile.write('%s|%s\n' % (k, d[k][i]))
size = 50000
with open(output_file_1[:-4] + '_50k.txt', 'a') as ofile:
for k in d.keys():
if len(d[k]) < size:
continue
for i in range(0, size):
ofile.write('%s|%s\n' % (k, d[k][i]))
size = 14000
with open(output_file_1[:-4] + '_14k.txt', 'a') as ofile:
for k in d.keys():
if len(d[k]) < size:
continue
for i in range(0, size):
ofile.write('%s|%s\n' % (k, d[k][i]))
size = 10000
with open(output_file_1[:-4] + '_10k.txt', 'a') as ofile:
for k in d.keys():
if len(d[k]) < size:
continue
for i in range(0, size):
ofile.write('%s|%s\n' % (k, d[k][i]))
# Vacuole
# 38
# Golgi
# apparatus
# 5320
# Secreted
# 596676
# Cytoplasm
# 1038408
# Mitochondria
# 513750
# Peroxisome
# 17827
# Nucleus
# 785219
# GPI
# anchored
# 14189
# Membrane
# 152219
# Cytoskeleton
# 44307
# Lysosome
# 2557
# Plasma
# membrane
# 465332
# ER
# 41470
|
seokjunbing/cs75
|
src/data_processing/read_data.py
|
Python
|
gpl-3.0
| 22,577 | 0.00186 |
# Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries of Keras metrics."""
import tensorflow as tf
def _apply_mask(y_true, sample_weight, masked_tokens, dtype):
if sample_weight is None:
sample_weight = tf.ones_like(y_true, dtype)
else:
sample_weight = tf.cast(sample_weight, dtype)
for token in masked_tokens:
mask = tf.cast(tf.not_equal(y_true, token), dtype)
sample_weight = sample_weight * mask
return sample_weight
class NumTokensCounter(tf.keras.metrics.Sum):
"""A `tf.keras.metrics.Metric` that counts tokens seen after masking."""
def __init__(self, masked_tokens=None, name='num_tokens', dtype=tf.int64):
self._masked_tokens = masked_tokens or []
super().__init__(name, dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
sample_weight = _apply_mask(y_true, sample_weight, self._masked_tokens,
self._dtype)
sample_weight = tf.reshape(sample_weight, [-1])
super().update_state(sample_weight)
def get_config(self):
config = super().get_config()
config['masked_tokens'] = tuple(self._masked_tokens)
return config
class MaskedCategoricalAccuracy(tf.keras.metrics.SparseCategoricalAccuracy):
"""An accuracy metric that masks some tokens."""
def __init__(self, masked_tokens=None, name='accuracy', dtype=None):
self._masked_tokens = masked_tokens or []
super().__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
sample_weight = _apply_mask(y_true, sample_weight, self._masked_tokens,
self._dtype)
num_classes = tf.shape(y_pred)[-1]
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1, num_classes])
sample_weight = tf.reshape(sample_weight, [-1])
super().update_state(y_true, y_pred, sample_weight)
def get_config(self):
config = super().get_config()
config['masked_tokens'] = tuple(self._masked_tokens)
return config
|
google-research/federated
|
utils/keras_metrics.py
|
Python
|
apache-2.0
| 2,516 | 0.004769 |
from math import sqrt
def euclidean_distance(p1, p2):
"""
Compute euclidean distance for two points
:param p1:
:param p2:
:return:
"""
dx, dy = p2[0] - p1[0], p2[1] - p1[1]
# Magnitude. Coulomb law.
return sqrt(dx ** 2 + dy ** 2)
|
dsaldana/roomba_sensor_network
|
roomba_sensor/src/roomba_sensor/util/geo.py
|
Python
|
gpl-3.0
| 268 | 0.003731 |
from __future__ import print_function, division, absolute_import
import warnings
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import numpy as np
import imgaug as ia
import imgaug.random as iarandom
from imgaug.augmentables.bbs import _LabelOnImageDrawer
from imgaug.testutils import wrap_shift_deprecation, assertWarns
class TestBoundingBox_project_(unittest.TestCase):
@property
def _is_inplace(self):
return True
def _func(self, cba, *args, **kwargs):
return cba.project_(*args, **kwargs)
def test_project_same_shape(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (10, 10))
assert np.isclose(bb2.y1, 10)
assert np.isclose(bb2.x1, 20)
assert np.isclose(bb2.y2, 30)
assert np.isclose(bb2.x2, 40)
def test_project_upscale_by_2(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (20, 20))
assert np.isclose(bb2.y1, 10*2)
assert np.isclose(bb2.x1, 20*2)
assert np.isclose(bb2.y2, 30*2)
assert np.isclose(bb2.x2, 40*2)
def test_project_downscale_by_2(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (5, 5))
assert np.isclose(bb2.y1, 10*0.5)
assert np.isclose(bb2.x1, 20*0.5)
assert np.isclose(bb2.y2, 30*0.5)
assert np.isclose(bb2.x2, 40*0.5)
def test_project_onto_wider_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (10, 20))
assert np.isclose(bb2.y1, 10*1)
assert np.isclose(bb2.x1, 20*2)
assert np.isclose(bb2.y2, 30*1)
assert np.isclose(bb2.x2, 40*2)
def test_project_onto_higher_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (20, 10))
assert np.isclose(bb2.y1, 10*2)
assert np.isclose(bb2.x1, 20*1)
assert np.isclose(bb2.y2, 30*2)
assert np.isclose(bb2.x2, 40*1)
def test_inplaceness(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (10, 10), (10, 10))
if self._is_inplace:
assert bb2 is bb
else:
assert bb2 is not bb
class TestBoundingBox_project(TestBoundingBox_project_):
@property
def _is_inplace(self):
return False
def _func(self, cba, *args, **kwargs):
return cba.project(*args, **kwargs)
class TestBoundingBox_extend_(unittest.TestCase):
@property
def _is_inplace(self):
return True
def _func(self, cba, *args, **kwargs):
return cba.extend_(*args, **kwargs)
def test_extend_all_sides_by_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, all_sides=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+1
assert bb2.x1 == 20-1
assert bb2.x2 == 40+1
def test_extend_all_sides_by_minus_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, all_sides=-1)
assert bb2.y1 == 10-(-1)
assert bb2.y2 == 30+(-1)
assert bb2.x1 == 20-(-1)
assert bb2.x2 == 40+(-1)
def test_extend_top_by_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, top=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
def test_extend_right_by_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, right=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+1
def test_extend_bottom_by_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, bottom=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+1
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
def test_extend_left_by_1(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, left=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-1
assert bb2.x2 == 40+0
def test_inplaceness(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, all_sides=1)
if self._is_inplace:
assert bb2 is bb
else:
assert bb2 is not bb
class TestBoundingBox_extend(TestBoundingBox_extend_):
@property
def _is_inplace(self):
return False
def _func(self, cba, *args, **kwargs):
return cba.extend(*args, **kwargs)
class TestBoundingBox_clip_out_of_image_(unittest.TestCase):
@property
def _is_inplace(self):
return True
def _func(self, cba, *args, **kwargs):
return cba.clip_out_of_image_(*args, **kwargs)
def test_clip_out_of_image_with_bb_fully_inside_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_cut = self._func(bb, (100, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
def test_clip_out_of_image_with_array_as_shape(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
image = np.zeros((100, 100, 3), dtype=np.uint8)
bb_cut = bb.clip_out_of_image(image)
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
def test_clip_out_of_image_with_bb_too_high(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_cut = self._func(bb, (20, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert np.isclose(bb_cut.y2, 20)
assert bb_cut.x2 == 40
def test_clip_out_of_image_with_bb_too_wide(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_cut = self._func(bb, (100, 30, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert np.isclose(bb_cut.x2, 30)
def test_inplaceness(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, (100, 100, 3))
if self._is_inplace:
assert bb2 is bb
else:
assert bb2 is not bb
class TestBoundingBox_clip_out_of_image(TestBoundingBox_clip_out_of_image_):
@property
def _is_inplace(self):
return False
def _func(self, cba, *args, **kwargs):
return cba.clip_out_of_image(*args, **kwargs)
class TestBoundingBox_shift_(unittest.TestCase):
@property
def _is_inplace(self):
return True
def _func(self, cba, *args, **kwargs):
def _func_impl():
return cba.shift_(*args, **kwargs)
return wrap_shift_deprecation(_func_impl, *args, **kwargs)
def test_shift_by_x(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_top = self._func(bb, x=1)
assert bb_top.y1 == 10
assert bb_top.x1 == 20 + 1
assert bb_top.y2 == 30
assert bb_top.x2 == 40 + 1
def test_shift_by_y(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_top = self._func(bb, y=1)
assert bb_top.y1 == 10 + 1
assert bb_top.x1 == 20
assert bb_top.y2 == 30 + 1
assert bb_top.x2 == 40
def test_inplaceness(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = self._func(bb, y=0)
if self._is_inplace:
assert bb2 is bb
else:
assert bb2 is not bb
class TestBoundingBox_shift(TestBoundingBox_shift_):
@property
def _is_inplace(self):
return False
def _func(self, cba, *args, **kwargs):
def _func_impl():
return cba.shift(*args, **kwargs)
return wrap_shift_deprecation(_func_impl, *args, **kwargs)
def test_shift_top_by_zero(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_top = self._func(bb, top=0)
assert bb_top.y1 == 10
assert bb_top.x1 == 20
assert bb_top.y2 == 30
assert bb_top.x2 == 40
def test_shift_right_by_zero(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_right = self._func(bb, right=0)
assert bb_right.y1 == 10
assert bb_right.x1 == 20
assert bb_right.y2 == 30
assert bb_right.x2 == 40
def test_shift_bottom_by_zero(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_bottom = self._func(bb, bottom=0)
assert bb_bottom.y1 == 10
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30
assert bb_bottom.x2 == 40
def test_shift_left_by_zero(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_left = self._func(bb, left=0)
assert bb_left.y1 == 10
assert bb_left.x1 == 20
assert bb_left.y2 == 30
assert bb_left.x2 == 40
def test_shift_top_by_one(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_top = self._func(bb, top=1)
assert bb_top.y1 == 10+1
assert bb_top.x1 == 20
assert bb_top.y2 == 30+1
assert bb_top.x2 == 40
def test_shift_right_by_one(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_right = self._func(bb, right=1)
assert bb_right.y1 == 10
assert bb_right.x1 == 20-1
assert bb_right.y2 == 30
assert bb_right.x2 == 40-1
def test_shift_bottom_by_one(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_bottom = self._func(bb, bottom=1)
assert bb_bottom.y1 == 10-1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30-1
assert bb_bottom.x2 == 40
def test_shift_left_by_one(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_left = self._func(bb, left=1)
assert bb_left.y1 == 10
assert bb_left.x1 == 20+1
assert bb_left.y2 == 30
assert bb_left.x2 == 40+1
def test_shift_top_by_minus_one(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_top = self._func(bb, top=-1)
assert bb_top.y1 == 10-1
assert bb_top.x1 == 20
assert bb_top.y2 == 30-1
assert bb_top.x2 == 40
def test_shift_right_by_minus_one(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_right = self._func(bb, right=-1)
assert bb_right.y1 == 10
assert bb_right.x1 == 20+1
assert bb_right.y2 == 30
assert bb_right.x2 == 40+1
def test_shift_bottom_by_minus_one(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_bottom = self._func(bb, bottom=-1)
assert bb_bottom.y1 == 10+1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30+1
assert bb_bottom.x2 == 40
def test_shift_left_by_minus_one(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_left = self._func(bb, left=-1)
assert bb_left.y1 == 10
assert bb_left.x1 == 20-1
assert bb_left.y2 == 30
assert bb_left.x2 == 40-1
def test_shift_all_sides_by_individual_amounts(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb_mix = self._func(bb, top=1, bottom=2, left=3, right=4)
assert bb_mix.y1 == 10+1-2
assert bb_mix.x1 == 20+3-4
assert bb_mix.y2 == 30+3-4
assert bb_mix.x2 == 40+1-2
class TestBoundingBox(unittest.TestCase):
def test___init__(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
assert bb.y1 == 10
assert bb.x1 == 20
assert bb.y2 == 30
assert bb.x2 == 40
assert bb.label is None
def test___init___floats(self):
bb = ia.BoundingBox(y1=10.1, x1=20.2, y2=30.3, x2=40.4)
assert np.isclose(bb.y1, 10.1)
assert np.isclose(bb.x1, 20.2)
assert np.isclose(bb.y2, 30.3)
assert np.isclose(bb.x2, 40.4)
assert bb.label is None
def test___init___label(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label="foo")
assert bb.y1 == 10
assert bb.x1 == 20
assert bb.y2 == 30
assert bb.x2 == 40
assert bb.label == "foo"
def test___init___wrong_x1_x2_order(self):
bb = ia.BoundingBox(y1=10, x1=40, y2=30, x2=20)
assert bb.y1 == 10
assert bb.x1 == 20
assert bb.y2 == 30
assert bb.x2 == 40
def test___init___wrong_y1_y2_order(self):
bb = ia.BoundingBox(y1=30, x1=20, y2=10, x2=40)
assert bb.y1 == 10
assert bb.x1 == 20
assert bb.y2 == 30
assert bb.x2 == 40
def test_coords_property_ints(self):
bb = ia.BoundingBox(x1=10, y1=20, x2=30, y2=40)
coords = bb.coords
assert np.allclose(coords, [[10, 20], [30, 40]],
atol=1e-4, rtol=0)
def test_coords_property_floats(self):
bb = ia.BoundingBox(x1=10.1, y1=20.2, x2=30.3, y2=40.4)
coords = bb.coords
assert np.allclose(coords, [[10.1, 20.2], [30.3, 40.4]],
atol=1e-4, rtol=0)
def test_xy_int_properties(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
def test_xy_int_properties_floats(self):
bb = ia.BoundingBox(y1=10.1, x1=20.2, y2=30.6, x2=40.7)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 31
assert bb.x2_int == 41
def test_width(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
assert bb.width == 40 - 20
def test_width_floats(self):
bb = ia.BoundingBox(y1=10.1, x1=20.2, y2=30.3, x2=40.4)
assert np.isclose(bb.width, 40.4 - 20.2)
def test_height(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
assert bb.height == 30 - 10
def test_height_floats(self):
bb = ia.BoundingBox(y1=10.1, x1=20.2, y2=30.3, x2=40.4)
assert np.isclose(bb.height, 30.3 - 10.1)
def test_center_x(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
expected = 20 + (40 - 20)/2
assert np.isclose(bb.center_x, expected)
def test_center_x_floats(self):
bb = ia.BoundingBox(y1=10.1, x1=20.2, y2=30.3, x2=40.4)
expected = 20.2 + (40.4 - 20.2)/2
assert np.isclose(bb.center_x, expected)
def test_center_y(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
expected = 10 + (30 - 10)/2
assert np.isclose(bb.center_y, expected)
def test_center_y_floats(self):
bb = ia.BoundingBox(y1=10.1, x1=20.2, y2=30.3, x2=40.4)
expected = 10.1 + (30.3 - 10.1)/2
assert np.isclose(bb.center_y, expected)
def test_area(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
assert bb.area == (30-10) * (40-20)
def test_area_floats(self):
bb = ia.BoundingBox(y1=10.1, x1=20.2, y2=30.3, x2=40.4)
assert np.isclose(bb.area, (30.3-10.1) * (40.4-20.2))
def test_contains(self):
bb = ia.BoundingBox(y1=1, x1=2, y2=1+4, x2=2+5, label=None)
assert bb.contains(ia.Keypoint(x=2.5, y=1.5)) is True
assert bb.contains(ia.Keypoint(x=2, y=1)) is True
assert bb.contains(ia.Keypoint(x=0, y=0)) is False
def test_intersection(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59)
bb_inter = bb1.intersection(bb2)
assert bb_inter.x1 == 39
assert bb_inter.x2 == 40
assert bb_inter.y1 == 10
assert bb_inter.y2 == 30
def test_intersection_of_non_overlapping_bbs(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61)
bb_inter = bb1.intersection(bb2, default=False)
assert bb_inter is False
def test_union(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59)
bb_union = bb1.union(bb2)
assert bb_union.x1 == 20
assert bb_union.x2 == 59
assert bb_union.y1 == 10
assert bb_union.y2 == 30
def test_iou_of_identical_bbs(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
iou = bb1.iou(bb2)
assert np.isclose(iou, 1.0)
def test_iou_of_non_overlapping_bbs(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61)
iou = bb1.iou(bb2)
assert np.isclose(iou, 0.0)
def test_iou_of_partially_overlapping_bbs(self):
bb1 = ia.BoundingBox(y1=10, x1=10, y2=20, x2=20)
bb2 = ia.BoundingBox(y1=15, x1=15, y2=25, x2=25)
iou = bb1.iou(bb2)
area_union = 10 * 10 + 10 * 10 - 5 * 5
area_intersection = 5 * 5
iou_expected = area_intersection / area_union
assert np.isclose(iou, iou_expected)
def test_compute_out_of_image_area__fully_inside(self):
bb = ia.BoundingBox(y1=10.1, x1=20.2, y2=30.3, x2=40.4)
image_shape = (100, 200, 3)
area_ooi = bb.compute_out_of_image_area(image_shape)
assert np.isclose(area_ooi, 0.0)
def test_compute_out_of_image_area__partially_ooi(self):
bb = ia.BoundingBox(y1=10, x1=-20, y2=30, x2=40)
image_shape = (100, 200, 3)
area_ooi = bb.compute_out_of_image_area(image_shape)
assert np.isclose(area_ooi, (0-(-20))*(30-10))
def test_compute_out_of_image_area__fully_ooi(self):
bb = ia.BoundingBox(y1=10, x1=-20, y2=30, x2=-10)
image_shape = (100, 200, 3)
area_ooi = bb.compute_out_of_image_area(image_shape)
assert np.isclose(area_ooi, 20*10)
def test_compute_out_of_image_area__zero_sized_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
image_shape = (0, 0, 3)
area_ooi = bb.compute_out_of_image_area(image_shape)
assert np.isclose(area_ooi, bb.area)
def test_compute_out_of_image_area__bb_has_zero_sized_area(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=10, x2=20)
image_shape = (100, 200, 3)
area_ooi = bb.compute_out_of_image_area(image_shape)
assert np.isclose(area_ooi, 0.0)
def test_compute_out_of_image_fraction__inside_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
image_shape = (100, 200, 3)
factor = bb.compute_out_of_image_fraction(image_shape)
assert np.isclose(factor, 0.0)
def test_compute_out_of_image_fraction__partially_ooi(self):
bb = ia.BoundingBox(y1=10, x1=-20, y2=30, x2=40)
image_shape = (100, 200, 3)
factor = bb.compute_out_of_image_fraction(image_shape)
expected = (20 * 20) / (20 * 60)
assert np.isclose(factor, expected)
def test_compute_out_of_image_fraction__fully_ooi(self):
bb = ia.BoundingBox(y1=10, x1=-20, y2=30, x2=0)
image_shape = (100, 200, 3)
factor = bb.compute_out_of_image_fraction(image_shape)
assert np.isclose(factor, 1.0)
def test_compute_out_of_image_fraction__zero_area_inside_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=10, x2=20)
image_shape = (100, 200, 3)
factor = bb.compute_out_of_image_fraction(image_shape)
assert np.isclose(factor, 0.0)
def test_compute_out_of_image_fraction__zero_area_ooi(self):
bb = ia.BoundingBox(y1=-10, x1=20, y2=-10, x2=20)
image_shape = (100, 200, 3)
factor = bb.compute_out_of_image_fraction(image_shape)
assert np.isclose(factor, 1.0)
def test_is_fully_within_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_fully_within_image((100, 100, 3)) is True
assert bb.is_fully_within_image((20, 100, 3)) is False
assert bb.is_fully_within_image((100, 30, 3)) is False
assert bb.is_fully_within_image((1, 1, 3)) is False
def test_is_partly_within_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_partly_within_image((100, 100, 3)) is True
assert bb.is_partly_within_image((20, 100, 3)) is True
assert bb.is_partly_within_image((100, 30, 3)) is True
assert bb.is_partly_within_image((1, 1, 3)) is False
def test_is_out_of_image(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
subtests = [
((100, 100, 3), True, True, False),
((100, 100, 3), False, True, False),
((100, 100, 3), True, False, False),
((20, 100, 3), True, True, True),
((20, 100, 3), False, True, False),
((20, 100, 3), True, False, True),
((100, 30, 3), True, True, True),
((100, 30, 3), False, True, False),
((100, 30, 3), True, False, True),
((1, 1, 3), True, True, True),
((1, 1, 3), False, True, True),
((1, 1, 3), True, False, False)
]
for shape, partly, fully, expected in subtests:
with self.subTest(shape=shape, partly=partly, fully=fully):
observed = bb.is_out_of_image(shape,
partly=partly, fully=fully)
assert observed is expected
@mock.patch("imgaug.augmentables.bbs._LabelOnImageDrawer")
def test_draw_label_on_image_mocked(self, mock_drawer):
mock_drawer.return_value = mock_drawer
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3)
result = bb.draw_label_on_image(image)
kwargs = mock_drawer.call_args_list[0][1]
assert kwargs["color"] == (0, 255, 0)
assert kwargs["color_text"] is None
assert kwargs["color_bg"] is None
assert np.isclose(kwargs["alpha"], 1.0)
assert kwargs["size"] == 1
assert kwargs["size_text"] == 20
assert kwargs["height"] == 30
assert kwargs["raise_if_out_of_image"] is False
assert mock_drawer.draw_on_image.call_count == 1
@mock.patch("imgaug.augmentables.bbs._LabelOnImageDrawer")
def test_draw_label_on_image_mocked_inplace(self, mock_drawer):
mock_drawer.return_value = mock_drawer
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3)
result = bb.draw_label_on_image(image, copy=False)
kwargs = mock_drawer.call_args_list[0][1]
assert kwargs["color"] == (0, 255, 0)
assert kwargs["color_text"] is None
assert kwargs["color_bg"] is None
assert np.isclose(kwargs["alpha"], 1.0)
assert kwargs["size"] == 1
assert kwargs["size_text"] == 20
assert kwargs["height"] == 30
assert kwargs["raise_if_out_of_image"] is False
assert mock_drawer.draw_on_image_.call_count == 1
def test_draw_label_on_image(self):
image = np.zeros((100, 70, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=40, x1=10, y2=50, x2=40)
result = bb.draw_label_on_image(image,
color_bg=(123, 123, 123),
color_text=(222, 222, 222))
color_bg = np.uint8([123, 123, 123]).reshape((1, 1, -1))
color_text = np.uint8([222, 222, 222]).reshape((1, 1, -1))
matches_bg = np.min(result == color_bg, axis=-1)
matches_text = np.min(result == color_text, axis=-1)
assert np.any(matches_bg > 0)
assert np.any(matches_text > 0)
@classmethod
def _get_standard_draw_box_on_image_vars(cls):
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[1:3+1, 1] = True
bb_mask[1:3+1, 3] = True
bb_mask[1, 1:3+1] = True
bb_mask[3, 1:3+1] = True
return image, bb, bb_mask
def test_draw_box_on_image(self):
image, bb, bb_mask = self._get_standard_draw_box_on_image_vars()
image_bb = bb.draw_box_on_image(
image, color=[255, 255, 255], alpha=1.0, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image == 0)
def test_draw_box_on_image_red_color(self):
image, bb, bb_mask = self._get_standard_draw_box_on_image_vars()
image_bb = bb.draw_box_on_image(
image, color=[255, 0, 0], alpha=1.0, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 0, 0])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
def test_draw_box_on_image_single_int_as_color(self):
image, bb, bb_mask = self._get_standard_draw_box_on_image_vars()
image_bb = bb.draw_box_on_image(
image, color=128, alpha=1.0, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [128, 128, 128])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
def test_draw_box_on_image_alpha_at_50_percent(self):
image, bb, bb_mask = self._get_standard_draw_box_on_image_vars()
image_bb = bb.draw_box_on_image(
image + 100, color=[200, 200, 200], alpha=0.5, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [150, 150, 150])
assert np.all(image_bb[~bb_mask] == [100, 100, 100])
def test_draw_box_on_image_alpha_at_50_percent_and_float32_image(self):
image, bb, bb_mask = self._get_standard_draw_box_on_image_vars()
image_bb = bb.draw_box_on_image(
(image+100).astype(np.float32),
color=[200, 200, 200], alpha=0.5, size=1,
copy=True, raise_if_out_of_image=False)
assert np.sum(np.abs((image_bb - [150, 150, 150])[bb_mask])) < 0.1
assert np.sum(np.abs((image_bb - [100, 100, 100])[~bb_mask])) < 0.1
def test_draw_box_on_image_no_copy(self):
image, bb, bb_mask = self._get_standard_draw_box_on_image_vars()
image_bb = bb.draw_box_on_image(
image, color=[255, 255, 255], alpha=1.0, size=1, copy=False,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image[bb_mask] == [255, 255, 255])
assert np.all(image[~bb_mask] == [0, 0, 0])
def test_draw_box_on_image_bb_outside_of_image(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=-1, x1=-1, y2=2, x2=2)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[2, 0:3] = True
bb_mask[0:3, 2] = True
image_bb = bb.draw_box_on_image(
image, color=[255, 255, 255], alpha=1.0, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
def test_draw_box_on_image_bb_outside_of_image_and_very_small(self):
image, bb, bb_mask = self._get_standard_draw_box_on_image_vars()
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:1+1, 1] = True
bb_mask[1, 0:1+1] = True
image_bb = bb.draw_box_on_image(
image, color=[255, 255, 255], alpha=1.0, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
def test_draw_box_on_image_size_2(self):
image, bb, _ = self._get_standard_draw_box_on_image_vars()
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:5, 0:5] = True
bb_mask[2, 2] = False
image_bb = bb.draw_box_on_image(
image, color=[255, 255, 255], alpha=1.0, size=2, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
def test_draw_box_on_image_raise_true_but_bb_partially_inside_image(self):
image, bb, bb_mask = self._get_standard_draw_box_on_image_vars()
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1)
_ = bb.draw_box_on_image(
image, color=[255, 255, 255], alpha=1.0, size=1, copy=True,
raise_if_out_of_image=True)
def test_draw_box_on_image_raise_true_and_bb_fully_outside_image(self):
image, bb, bb_mask = self._get_standard_draw_box_on_image_vars()
bb = ia.BoundingBox(y1=-5, x1=-5, y2=-1, x2=-1)
with self.assertRaises(Exception) as context:
_ = bb.draw_box_on_image(
image, color=[255, 255, 255], alpha=1.0, size=1, copy=True,
raise_if_out_of_image=True)
assert "Cannot draw bounding box" in str(context.exception)
def test_draw_on_image_label_is_none(self):
# if label is None, no label box should be drawn, only the rectangle
# box below the label
image = np.zeros((100, 70, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=40, x1=10, y2=50, x2=40, label=None)
image_drawn = bb.draw_on_image(image)
expected = bb.draw_box_on_image(image)
assert np.array_equal(image_drawn, expected)
def test_draw_on_image_label_is_str(self):
# if label is None, no label box should be drawn, only the rectangle
# box below the label
image = np.zeros((100, 70, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=40, x1=10, y2=50, x2=40, label="Foo")
image_drawn = bb.draw_on_image(image)
expected = bb.draw_box_on_image(image)
expected = bb.draw_label_on_image(expected)
assert np.array_equal(image_drawn, expected)
def test_extract_from_image(self):
image = iarandom.RNG(1234).integers(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3, :])
def test_extract_from_image_no_channels(self):
image = iarandom.RNG(1234).integers(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
def test_extract_from_image_bb_partially_out_of_image(self):
image = iarandom.RNG(1234).integers(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=8, y2=11, x1=8, x2=11)
image_sub = bb.extract_from_image(image)
image_pad = np.pad(
image,
((0, 1), (0, 1), (0, 0)),
mode="constant",
constant_values=0) # pad at bottom and right each 1px (black)
assert np.array_equal(image_sub, image_pad[8:11, 8:11, :])
def test_extract_from_image_bb_partially_out_of_image_no_channels(self):
image = iarandom.RNG(1234).integers(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=8, y2=11, x1=8, x2=11)
image_sub = bb.extract_from_image(image)
image_pad = np.pad(
image,
((0, 1), (0, 1)),
mode="constant",
constant_values=0) # pad at bottom and right each 1px (black)
assert np.array_equal(image_sub, image_pad[8:11, 8:11])
def test_extract_from_image_bb_partially_out_of_image_top_left(self):
image = iarandom.RNG(1234).integers(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=-1, y2=3, x1=-1, x2=4)
image_sub = bb.extract_from_image(image)
image_pad = np.pad(
image,
((1, 0), (1, 0), (0, 0)),
mode="constant",
constant_values=0) # pad at top and left each 1px (black)
assert np.array_equal(image_sub, image_pad[0:4, 0:5, :])
def test_extract_from_image_float_coords(self):
image = iarandom.RNG(1234).integers(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=1.99999, x1=1, x2=1.99999)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:1+1, 1:1+1, :])
def test_extract_from_image_bb_height_is_zero(self):
image = iarandom.RNG(1234).integers(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=1, x1=2, x2=4)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:1+1, 2:4, :])
def test_extract_from_image_bb_width_is_zero(self):
image = iarandom.RNG(1234).integers(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=1, x1=2, x2=2)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:1+1, 2:2+1, :])
def test_to_keypoints(self):
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3)
kps = bb.to_keypoints()
assert len(kps) == 4
assert kps[0].y == 1
assert kps[0].x == 1
assert kps[1].y == 1
assert kps[1].x == 3
assert kps[2].y == 3
assert kps[2].x == 3
assert kps[3].y == 3
assert kps[3].x == 1
def test_to_polygon(self):
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3)
poly = bb.to_polygon()
assert poly.coords_almost_equals([
(1, 1),
(3, 1),
(3, 3,),
(1, 3)
])
def test_coords_almost_equals(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
other = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
equal = bb.coords_almost_equals(other)
assert equal
def test_coords_almost_equals__unequal(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
other = ia.BoundingBox(x1=1+1, y1=3+1, x2=1+1, y2=3+1)
equal = bb.coords_almost_equals(other)
assert not equal
def test_coords_almost_equals__dist_below_max_distance(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
other = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3+1e-5)
equal = bb.coords_almost_equals(other, max_distance=1e-4)
assert equal
def test_coords_almost_equals__dist_above_max_distance(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
other = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3+1e-3)
equal = bb.coords_almost_equals(other, max_distance=1e-4)
assert not equal
def test_coords_almost_equals__input_is_array(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
other = np.float32([[1, 3], [1, 3]])
equal = bb.coords_almost_equals(other)
assert equal
def test_coords_almost_equals__input_is_array_not_equal(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
other = np.float32([[1, 3], [1, 3+0.5]])
equal = bb.coords_almost_equals(other)
assert not equal
def test_coords_almost_equals__input_is_list(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
other = [[1, 3], [1, 3]]
equal = bb.coords_almost_equals(other)
assert equal
def test_coords_almost_equals__input_is_list_not_equal(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
other = [[1, 3], [1, 3+0.5]]
equal = bb.coords_almost_equals(other)
assert not equal
def test_coords_almost_equals__bad_datatype(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
with self.assertRaises(ValueError) as cm:
_ = bb.coords_almost_equals(False)
assert "Expected 'other'" in str(cm.exception)
@mock.patch("imgaug.augmentables.bbs.BoundingBox.coords_almost_equals")
def test_almost_equals(self, mock_cae):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
other = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
equal = bb.almost_equals(other, max_distance=1)
assert equal
mock_cae.assert_called_once_with(other, max_distance=1)
def test_almost_equals__labels_none_vs_string(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3, label="foo")
other = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3)
equal = bb.almost_equals(other)
assert not equal
def test_almost_equals__labels_different_strings(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3, label="foo")
other = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3, label="bar")
equal = bb.almost_equals(other)
assert not equal
def test_almost_equals__same_string(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3, label="foo")
other = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3, label="foo")
equal = bb.almost_equals(other)
assert equal
def test_almost_equals__distance_above_threshold(self):
bb = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3, label="foo")
other = ia.BoundingBox(x1=1, y1=3, x2=1, y2=3+1e-1, label="foo")
equal = bb.almost_equals(other, max_distance=1e-2)
assert not equal
def test_from_point_soup__empty_list(self):
with self.assertRaises(AssertionError) as ctx:
_ = ia.BoundingBox.from_point_soup([])
assert "Expected to get at least one point" in str(ctx.exception)
def test_from_point_soup__empty_array(self):
with self.assertRaises(AssertionError) as ctx:
_ = ia.BoundingBox.from_point_soup(np.zeros((0, 2)))
assert "Expected to get at least one point" in str(ctx.exception)
def test_from_point_soup__list_with_single_point(self):
points = [(1, 2)]
bb = ia.BoundingBox.from_point_soup(points)
assert bb.x1 == 1
assert bb.y1 == 2
assert bb.x2 == 1
assert bb.y2 == 2
def test_from_point_soup__list_with_single_point__single_level(self):
points = [1, 2]
bb = ia.BoundingBox.from_point_soup(points)
assert bb.x1 == 1
assert bb.y1 == 2
assert bb.x2 == 1
assert bb.y2 == 2
def test_from_point_soup__list_with_two_points(self):
points = [(1, 2), (3, 4)]
bb = ia.BoundingBox.from_point_soup(points)
assert bb.x1 == 1
assert bb.y1 == 2
assert bb.x2 == 3
assert bb.y2 == 4
def test_from_point_soup__list_with_three_points(self):
points = [(1, 4), (3, 2), (15, 16)]
bb = ia.BoundingBox.from_point_soup(points)
assert bb.x1 == 1
assert bb.y1 == 2
assert bb.x2 == 15
assert bb.y2 == 16
def test_from_point_soup__array_with_single_point(self):
points = np.float32([(1, 2)])
bb = ia.BoundingBox.from_point_soup(points)
assert bb.x1 == 1
assert bb.y1 == 2
assert bb.x2 == 1
assert bb.y2 == 2
def test_from_point_soup__array_with_single_point__single_level(self):
points = np.float32([1, 2])
bb = ia.BoundingBox.from_point_soup(points)
assert bb.x1 == 1
assert bb.y1 == 2
assert bb.x2 == 1
assert bb.y2 == 2
def test_from_point_soup__array_with_two_points__single_level(self):
points = np.float32([1, 2, 3, 4])
bb = ia.BoundingBox.from_point_soup(points)
assert bb.x1 == 1
assert bb.y1 == 2
assert bb.x2 == 3
assert bb.y2 == 4
def test_from_point_soup__array_with_two_points(self):
points = np.float32([(1, 2), (3, 4)])
bb = ia.BoundingBox.from_point_soup(points)
assert bb.x1 == 1
assert bb.y1 == 2
assert bb.x2 == 3
assert bb.y2 == 4
def test_from_point_soup__array_with_three_points(self):
points = np.float32([(1, 4), (3, 2), (15, 16)])
bb = ia.BoundingBox.from_point_soup(points)
assert bb.x1 == 1
assert bb.y1 == 2
assert bb.x2 == 15
assert bb.y2 == 16
def test_copy(self):
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test")
bb2 = bb.copy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label == "test"
def test_copy_and_replace_attributes(self):
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test")
bb2 = bb.copy(y1=10, x1=20, y2=30, x2=40, label="test2")
assert bb2.y1 == 10
assert bb2.x1 == 20
assert bb2.y2 == 30
assert bb2.x2 == 40
assert bb2.label == "test2"
def test_deepcopy(self):
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=["test"])
bb2 = bb.deepcopy()
bb2.label[0] = "foo"
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label[0] == "foo"
assert bb.label[0] == "test"
def test_deepcopy_and_replace_attributes(self):
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test")
bb2 = bb.deepcopy(y1=10, y2=30, x1=15, x2=35, label="asd")
assert bb2.y1 == 10
assert bb2.y2 == 30
assert bb2.x1 == 15
assert bb2.x2 == 35
assert bb2.label == "asd"
assert bb.label == "test"
def test___getitem__(self):
cba = ia.BoundingBox(x1=1, y1=2, x2=3, y2=4)
assert np.allclose(cba[0], (1, 2))
assert np.allclose(cba[1], (3, 4))
def test___iter__(self):
cba = ia.BoundingBox(x1=1, y1=2, x2=3, y2=4)
for i, xy in enumerate(cba):
assert i in [0, 1]
if i == 0:
assert np.allclose(xy, (1, 2))
elif i == 1:
assert np.allclose(xy, (3, 4))
assert i == 1
def test_string_conversion(self):
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3)
assert (
bb.__str__()
== bb.__repr__()
== "BoundingBox("
"x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, "
"label=None)"
)
def test_string_conversion_with_label(self):
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="foo")
assert (
bb.__str__()
== bb.__repr__()
== "BoundingBox("
"x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, "
"label=foo)"
)
class TestBoundingBoxesOnImage_items_setter(unittest.TestCase):
def test_with_list_of_bounding_boxes(self):
bbs = [ia.BoundingBox(x1=1, y1=2, x2=3, y2=4),
ia.BoundingBox(x1=3, y1=4, x2=5, y2=6)]
bbsoi = ia.BoundingBoxesOnImage([], shape=(10, 20, 3))
bbsoi.items = bbs
assert np.all([
(bb_i.x1 == bb_j.x1
and bb_i.y1 == bb_j.y1
and bb_i.x2 == bb_j.x2
and bb_i.y2 == bb_j.y2)
for bb_i, bb_j
in zip(bbsoi.bounding_boxes, bbs)
])
class TestBoundingBoxesOnImage_on_(unittest.TestCase):
@property
def _is_inplace(self):
return True
def _func(self, cbaoi, *args, **kwargs):
return cbaoi.on_(*args, **kwargs)
def test_on_same_height_width(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_projected = self._func(bbsoi, (40, 50))
assert bbsoi_projected.bounding_boxes[0].y1 == 10
assert bbsoi_projected.bounding_boxes[0].x1 == 20
assert bbsoi_projected.bounding_boxes[0].y2 == 30
assert bbsoi_projected.bounding_boxes[0].x2 == 40
assert bbsoi_projected.bounding_boxes[1].y1 == 15
assert bbsoi_projected.bounding_boxes[1].x1 == 25
assert bbsoi_projected.bounding_boxes[1].y2 == 35
assert bbsoi_projected.bounding_boxes[1].x2 == 45
assert bbsoi_projected.shape == (40, 50)
def test_on_upscaled_by_2(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_projected = self._func(bbsoi, (40*2, 50*2, 3))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
assert bbsoi_projected.shape == (40*2, 50*2, 3)
def test_on_upscaled_by_2_with_shape_given_as_array(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_projected = self._func(bbsoi, np.zeros((40*2, 50*2, 3), dtype=np.uint8))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
assert bbsoi_projected.shape == (40*2, 50*2, 3)
def test_inplaceness(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi2 = self._func(bbsoi, (40, 50))
if self._is_inplace:
assert bbsoi2 is bbsoi
else:
assert bbsoi2 is not bbsoi
class TestBoundingBoxesOnImage_on(TestBoundingBoxesOnImage_on_):
@property
def _is_inplace(self):
return False
def _func(self, cbaoi, *args, **kwargs):
return cbaoi.on(*args, **kwargs)
class TestBoundingBoxesOnImage_clip_out_of_image_(unittest.TestCase):
@property
def _is_inplace(self):
return True
def _func(self, cbaoi, *args, **kwargs):
return cbaoi.clip_out_of_image_()
def test_clip_out_of_image(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_clip = self._func(bbsoi)
assert len(bbsoi_clip.bounding_boxes) == 2
assert bbsoi_clip.bounding_boxes[0].y1 == 10
assert bbsoi_clip.bounding_boxes[0].x1 == 20
assert bbsoi_clip.bounding_boxes[0].y2 == 30
assert bbsoi_clip.bounding_boxes[0].x2 == 40
assert bbsoi_clip.bounding_boxes[1].y1 == 15
assert bbsoi_clip.bounding_boxes[1].x1 == 25
assert bbsoi_clip.bounding_boxes[1].y2 == 35
assert np.isclose(bbsoi_clip.bounding_boxes[1].x2, 50)
def test_inplaceness(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi2 = self._func(bbsoi, (40, 50))
if self._is_inplace:
assert bbsoi2 is bbsoi
else:
assert bbsoi2 is not bbsoi
class TestBoundingBoxesOnImage_clip_out_of_image(TestBoundingBoxesOnImage_clip_out_of_image_):
@property
def _is_inplace(self):
return False
def _func(self, cbaoi, *args, **kwargs):
return cbaoi.clip_out_of_image()
class TestBoundingBoxesOnImage(unittest.TestCase):
def test___init__(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
assert bbsoi.bounding_boxes == [bb1, bb2]
assert bbsoi.shape == (40, 50, 3)
def test___init___array_as_shape(self):
image = np.zeros((40, 50, 3), dtype=np.uint8)
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
with assertWarns(self, ia.DeprecationWarning):
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=image)
assert bbsoi.bounding_boxes == [bb1, bb2]
assert bbsoi.shape == (40, 50, 3)
def test_items(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
items = bbsoi.items
assert items == [bb1, bb2]
def test_items_empty(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(40, 50, 3))
items = bbsoi.items
assert items == []
def test_height(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
assert bbsoi.height == 40
def test_width(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
assert bbsoi.width == 50
def test_empty_when_bbs_not_empty(self):
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bbsoi = ia.BoundingBoxesOnImage([bb], shape=(40, 50, 3))
assert not bbsoi.empty
def test_empty_when_bbs_actually_empty(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(40, 50, 3))
assert bbsoi.empty
def test_from_xyxy_array_float(self):
xyxy = np.float32([
[0.0, 0.0, 1.0, 1.0],
[1.0, 2.0, 3.0, 4.0]
])
bbsoi = ia.BoundingBoxesOnImage.from_xyxy_array(xyxy, shape=(40, 50, 3))
assert len(bbsoi.bounding_boxes) == 2
assert np.allclose(bbsoi.bounding_boxes[0].x1, 0.0)
assert np.allclose(bbsoi.bounding_boxes[0].y1, 0.0)
assert np.allclose(bbsoi.bounding_boxes[0].x2, 1.0)
assert np.allclose(bbsoi.bounding_boxes[0].y2, 1.0)
assert np.allclose(bbsoi.bounding_boxes[1].x1, 1.0)
assert np.allclose(bbsoi.bounding_boxes[1].y1, 2.0)
assert np.allclose(bbsoi.bounding_boxes[1].x2, 3.0)
assert np.allclose(bbsoi.bounding_boxes[1].y2, 4.0)
assert bbsoi.shape == (40, 50, 3)
def test_from_xyxy_array_float_3d(self):
xyxy = np.float32([
[
[0.0, 0.0],
[1.0, 1.0]
],
[
[1.0, 2.0],
[3.0, 4.0]
]
])
bbsoi = ia.BoundingBoxesOnImage.from_xyxy_array(xyxy, shape=(40, 50, 3))
assert len(bbsoi.bounding_boxes) == 2
assert np.allclose(bbsoi.bounding_boxes[0].x1, 0.0)
assert np.allclose(bbsoi.bounding_boxes[0].y1, 0.0)
assert np.allclose(bbsoi.bounding_boxes[0].x2, 1.0)
assert np.allclose(bbsoi.bounding_boxes[0].y2, 1.0)
assert np.allclose(bbsoi.bounding_boxes[1].x1, 1.0)
assert np.allclose(bbsoi.bounding_boxes[1].y1, 2.0)
assert np.allclose(bbsoi.bounding_boxes[1].x2, 3.0)
assert np.allclose(bbsoi.bounding_boxes[1].y2, 4.0)
assert bbsoi.shape == (40, 50, 3)
def test_from_xyxy_array_int32(self):
xyxy = np.int32([
[0, 0, 1, 1],
[1, 2, 3, 4]
])
bbsoi = ia.BoundingBoxesOnImage.from_xyxy_array(xyxy, shape=(40, 50, 3))
assert len(bbsoi.bounding_boxes) == 2
assert np.allclose(bbsoi.bounding_boxes[0].x1, 0.0)
assert np.allclose(bbsoi.bounding_boxes[0].y1, 0.0)
assert np.allclose(bbsoi.bounding_boxes[0].x2, 1.0)
assert np.allclose(bbsoi.bounding_boxes[0].y2, 1.0)
assert np.allclose(bbsoi.bounding_boxes[1].x1, 1.0)
assert np.allclose(bbsoi.bounding_boxes[1].y1, 2.0)
assert np.allclose(bbsoi.bounding_boxes[1].x2, 3.0)
assert np.allclose(bbsoi.bounding_boxes[1].y2, 4.0)
assert bbsoi.shape == (40, 50, 3)
def test_from_xyxy_array_empty_array(self):
xyxy = np.zeros((0, 4), dtype=np.float32)
bbsoi = ia.BoundingBoxesOnImage.from_xyxy_array(xyxy, shape=(40, 50, 3))
assert len(bbsoi.bounding_boxes) == 0
assert bbsoi.shape == (40, 50, 3)
def test_from_point_soups__2d_array(self):
xy = np.float32([
[7, 3,
11, 5,
1, 7,
12, 19]
])
bbsoi = ia.BoundingBoxesOnImage.from_point_soups(
xy, shape=(40, 50, 3))
assert len(bbsoi.bounding_boxes) == 1
assert bbsoi.bounding_boxes[0].x1 == 1
assert bbsoi.bounding_boxes[0].y1 == 3
assert bbsoi.bounding_boxes[0].x2 == 12
assert bbsoi.bounding_boxes[0].y2 == 19
assert bbsoi.shape == (40, 50, 3)
def test_from_point_soups__3d_array(self):
xy = np.float32([
[
[7, 3],
[11, 5],
[1, 7],
[12, 19]
]
])
bbsoi = ia.BoundingBoxesOnImage.from_point_soups(
xy, shape=(40, 50, 3))
assert len(bbsoi.bounding_boxes) == 1
assert bbsoi.bounding_boxes[0].x1 == 1
assert bbsoi.bounding_boxes[0].y1 == 3
assert bbsoi.bounding_boxes[0].x2 == 12
assert bbsoi.bounding_boxes[0].y2 == 19
assert bbsoi.shape == (40, 50, 3)
def test_from_point_soups__2d_list(self):
xy = [
[7, 3,
11, 5,
1, 7,
12, 19]
]
bbsoi = ia.BoundingBoxesOnImage.from_point_soups(
xy, shape=(40, 50, 3))
assert len(bbsoi.bounding_boxes) == 1
assert bbsoi.bounding_boxes[0].x1 == 1
assert bbsoi.bounding_boxes[0].y1 == 3
assert bbsoi.bounding_boxes[0].x2 == 12
assert bbsoi.bounding_boxes[0].y2 == 19
assert bbsoi.shape == (40, 50, 3)
def test_from_point_soups__empty_array(self):
xy = np.zeros((0, 4), dtype=np.float32)
bbsoi = ia.BoundingBoxesOnImage.from_point_soups(
xy, shape=(40, 50, 3))
assert len(bbsoi.bounding_boxes) == 0
assert bbsoi.shape == (40, 50, 3)
def test_from_point_soups__empty_list(self):
xy = []
bbsoi = ia.BoundingBoxesOnImage.from_point_soups(
xy, shape=(40, 50, 3))
assert len(bbsoi.bounding_boxes) == 0
assert bbsoi.shape == (40, 50, 3)
def test_to_xyxy_array(self):
xyxy = np.float32([
[0.0, 0.0, 1.0, 1.0],
[1.0, 2.0, 3.0, 4.0]
])
bbsoi = ia.BoundingBoxesOnImage.from_xyxy_array(xyxy, shape=(40, 50, 3))
xyxy_out = bbsoi.to_xyxy_array()
assert np.allclose(xyxy, xyxy_out)
assert xyxy_out.dtype.name == "float32"
def test_to_xyxy_array_convert_to_int32(self):
xyxy = np.float32([
[0.0, 0.0, 1.0, 1.0],
[1.0, 2.0, 3.0, 4.0]
])
bbsoi = ia.BoundingBoxesOnImage.from_xyxy_array(xyxy, shape=(40, 50, 3))
xyxy_out = bbsoi.to_xyxy_array(dtype=np.int32)
assert np.allclose(xyxy.astype(np.int32), xyxy_out)
assert xyxy_out.dtype.name == "int32"
def test_to_xyxy_array_no_bbs_to_convert(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(40, 50, 3))
xyxy_out = bbsoi.to_xyxy_array(dtype=np.int32)
assert xyxy_out.shape == (0, 4)
def test_to_xy_array(self):
xyxy = np.float32([
[0.0, 0.0, 1.0, 1.0],
[1.0, 2.0, 3.0, 4.0]
])
bbsoi = ia.BoundingBoxesOnImage.from_xyxy_array(xyxy, shape=(40, 50, 3))
xy_out = bbsoi.to_xy_array()
expected = np.float32([
[0.0, 0.0],
[1.0, 1.0],
[1.0, 2.0],
[3.0, 4.0]
])
assert xy_out.shape == (4, 2)
assert np.allclose(xy_out, expected)
assert xy_out.dtype.name == "float32"
def test_to_xy_array__empty_instance(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(1, 2, 3))
xy_out = bbsoi.to_xy_array()
assert xy_out.shape == (0, 2)
assert xy_out.dtype.name == "float32"
def test_fill_from_xyxy_array___empty_array(self):
xyxy = np.zeros((0, 4), dtype=np.float32)
bbsoi = ia.BoundingBoxesOnImage([], shape=(2, 2, 3))
bbsoi = bbsoi.fill_from_xyxy_array_(xyxy)
assert len(bbsoi.bounding_boxes) == 0
def test_fill_from_xyxy_array___empty_list(self):
xyxy = []
bbsoi = ia.BoundingBoxesOnImage([], shape=(2, 2, 3))
bbsoi = bbsoi.fill_from_xyxy_array_(xyxy)
assert len(bbsoi.bounding_boxes) == 0
def test_fill_from_xyxy_array___array_with_two_coords(self):
xyxy = np.array(
[(100, 101, 102, 103),
(200, 201, 202, 203)], dtype=np.float32)
bbsoi = ia.BoundingBoxesOnImage(
[ia.BoundingBox(1, 2, 3, 4),
ia.BoundingBox(10, 20, 30, 40)],
shape=(2, 2, 3))
bbsoi = bbsoi.fill_from_xyxy_array_(xyxy)
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi.bounding_boxes[0].x1 == 100
assert bbsoi.bounding_boxes[0].y1 == 101
assert bbsoi.bounding_boxes[0].x2 == 102
assert bbsoi.bounding_boxes[0].y2 == 103
assert bbsoi.bounding_boxes[1].x1 == 200
assert bbsoi.bounding_boxes[1].y1 == 201
assert bbsoi.bounding_boxes[1].x2 == 202
assert bbsoi.bounding_boxes[1].y2 == 203
def test_fill_from_xyxy_array___list_with_two_coords(self):
xyxy = [(100, 101, 102, 103),
(200, 201, 202, 203)]
bbsoi = ia.BoundingBoxesOnImage(
[ia.BoundingBox(1, 2, 3, 4),
ia.BoundingBox(10, 20, 30, 40)],
shape=(2, 2, 3))
bbsoi = bbsoi.fill_from_xyxy_array_(xyxy)
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi.bounding_boxes[0].x1 == 100
assert bbsoi.bounding_boxes[0].y1 == 101
assert bbsoi.bounding_boxes[0].x2 == 102
assert bbsoi.bounding_boxes[0].y2 == 103
assert bbsoi.bounding_boxes[1].x1 == 200
assert bbsoi.bounding_boxes[1].y1 == 201
assert bbsoi.bounding_boxes[1].x2 == 202
assert bbsoi.bounding_boxes[1].y2 == 203
def test_fill_from_xy_array___empty_array(self):
xy = np.zeros((0, 2), dtype=np.float32)
bbsoi = ia.BoundingBoxesOnImage([], shape=(2, 2, 3))
bbsoi = bbsoi.fill_from_xy_array_(xy)
assert len(bbsoi.bounding_boxes) == 0
def test_fill_from_xy_array___empty_list(self):
xy = []
bbsoi = ia.BoundingBoxesOnImage([], shape=(2, 2, 3))
bbsoi = bbsoi.fill_from_xy_array_(xy)
assert len(bbsoi.bounding_boxes) == 0
def test_fill_from_xy_array___array_with_two_coords(self):
xy = np.array(
[(100, 101),
(102, 103),
(200, 201),
(202, 203)], dtype=np.float32)
bbsoi = ia.BoundingBoxesOnImage(
[ia.BoundingBox(1, 2, 3, 4),
ia.BoundingBox(10, 20, 30, 40)],
shape=(2, 2, 3))
bbsoi = bbsoi.fill_from_xy_array_(xy)
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi.bounding_boxes[0].x1 == 100
assert bbsoi.bounding_boxes[0].y1 == 101
assert bbsoi.bounding_boxes[0].x2 == 102
assert bbsoi.bounding_boxes[0].y2 == 103
assert bbsoi.bounding_boxes[1].x1 == 200
assert bbsoi.bounding_boxes[1].y1 == 201
assert bbsoi.bounding_boxes[1].x2 == 202
assert bbsoi.bounding_boxes[1].y2 == 203
def test_fill_from_xy_array___list_with_two_coords(self):
xy = [(100, 101),
(102, 103),
(200, 201),
(202, 203)]
bbsoi = ia.BoundingBoxesOnImage(
[ia.BoundingBox(1, 2, 3, 4),
ia.BoundingBox(10, 20, 30, 40)],
shape=(2, 2, 3))
bbsoi = bbsoi.fill_from_xy_array_(xy)
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi.bounding_boxes[0].x1 == 100
assert bbsoi.bounding_boxes[0].y1 == 101
assert bbsoi.bounding_boxes[0].x2 == 102
assert bbsoi.bounding_boxes[0].y2 == 103
assert bbsoi.bounding_boxes[1].x1 == 200
assert bbsoi.bounding_boxes[1].y1 == 201
assert bbsoi.bounding_boxes[1].x2 == 202
assert bbsoi.bounding_boxes[1].y2 == 203
def test_draw_on_image(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
image = np.zeros(bbsoi.shape, dtype=np.uint8)
image_drawn = bbsoi.draw_on_image(
image,
color=[0, 255, 0], alpha=1.0, size=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_drawn[10-1, 20-1, :] == [0, 0, 0])
assert np.all(image_drawn[10-1, 20-0, :] == [0, 0, 0])
assert np.all(image_drawn[10-0, 20-1, :] == [0, 0, 0])
assert np.all(image_drawn[10-0, 20-0, :] == [0, 255, 0])
assert np.all(image_drawn[10+1, 20+1, :] == [0, 0, 0])
assert np.all(image_drawn[30-1, 40-1, :] == [0, 0, 0])
assert np.all(image_drawn[30+1, 40-0, :] == [0, 0, 0])
assert np.all(image_drawn[30+0, 40+1, :] == [0, 0, 0])
assert np.all(image_drawn[30+0, 40+0, :] == [0, 255, 0])
assert np.all(image_drawn[30+1, 40+1, :] == [0, 0, 0])
assert np.all(image_drawn[15-1, 25-1, :] == [0, 0, 0])
assert np.all(image_drawn[15-1, 25-0, :] == [0, 0, 0])
assert np.all(image_drawn[15-0, 25-1, :] == [0, 0, 0])
assert np.all(image_drawn[15-0, 25-0, :] == [0, 255, 0])
assert np.all(image_drawn[15+1, 25+1, :] == [0, 0, 0])
assert np.all(image_drawn[35-1, 45-1, :] == [0, 0, 0])
assert np.all(image_drawn[35+1, 45+0, :] == [0, 0, 0])
assert np.all(image_drawn[35+0, 45+1, :] == [0, 0, 0])
assert np.all(image_drawn[35+0, 45+0, :] == [0, 255, 0])
assert np.all(image_drawn[35+1, 45+1, :] == [0, 0, 0])
def test_remove_out_of_image_(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_removed = bbsoi.remove_out_of_image_(fully=True, partly=True)
assert len(bbsoi_removed.bounding_boxes) == 1
assert bbsoi_removed.bounding_boxes[0] == bb1
assert bbsoi_removed is bbsoi
def test_remove_out_of_image(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_removed = bbsoi.remove_out_of_image(fully=True, partly=True)
assert len(bbsoi_removed.bounding_boxes) == 1
assert bbsoi_removed.bounding_boxes[0] == bb1
assert bbsoi_removed is not bbsoi
def test_remove_out_of_image_fraction_(self):
item1 = ia.BoundingBox(y1=1, x1=5, y2=6, x2=9)
item2 = ia.BoundingBox(y1=1, x1=5, y2=6, x2=15)
item3 = ia.BoundingBox(y1=1, x1=15, y2=6, x2=25)
cbaoi = ia.BoundingBoxesOnImage([item1, item2, item3],
shape=(10, 10, 3))
cbaoi_reduced = cbaoi.remove_out_of_image_fraction_(0.6)
assert len(cbaoi_reduced.items) == 2
assert cbaoi_reduced.items == [item1, item2]
assert cbaoi_reduced is cbaoi
def test_remove_out_of_image_fraction(self):
item1 = ia.BoundingBox(y1=1, x1=5, y2=6, x2=9)
item2 = ia.BoundingBox(y1=1, x1=5, y2=6, x2=15)
item3 = ia.BoundingBox(y1=1, x1=15, y2=6, x2=25)
cbaoi = ia.BoundingBoxesOnImage([item1, item2, item3],
shape=(10, 10, 3))
cbaoi_reduced = cbaoi.remove_out_of_image_fraction(0.6)
assert len(cbaoi_reduced.items) == 2
assert cbaoi_reduced.items == [item1, item2]
assert cbaoi_reduced is not cbaoi
def test_shift_(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_shifted = bbsoi.shift_(y=2)
assert len(bbsoi_shifted.bounding_boxes) == 2
assert bbsoi_shifted.bounding_boxes[0].y1 == 10 + 2
assert bbsoi_shifted.bounding_boxes[0].x1 == 20
assert bbsoi_shifted.bounding_boxes[0].y2 == 30 + 2
assert bbsoi_shifted.bounding_boxes[0].x2 == 40
assert bbsoi_shifted.bounding_boxes[1].y1 == 15 + 2
assert bbsoi_shifted.bounding_boxes[1].x1 == 25
assert bbsoi_shifted.bounding_boxes[1].y2 == 35 + 2
assert bbsoi_shifted.bounding_boxes[1].x2 == 51
assert bbsoi_shifted is bbsoi
def test_shift(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_shifted = bbsoi.shift(y=2)
assert len(bbsoi_shifted.bounding_boxes) == 2
assert bbsoi_shifted.bounding_boxes[0].y1 == 10 + 2
assert bbsoi_shifted.bounding_boxes[0].x1 == 20
assert bbsoi_shifted.bounding_boxes[0].y2 == 30 + 2
assert bbsoi_shifted.bounding_boxes[0].x2 == 40
assert bbsoi_shifted.bounding_boxes[1].y1 == 15 + 2
assert bbsoi_shifted.bounding_boxes[1].x1 == 25
assert bbsoi_shifted.bounding_boxes[1].y2 == 35 + 2
assert bbsoi_shifted.bounding_boxes[1].x2 == 51
assert bbsoi_shifted is not bbsoi
def test_shift__deprecated_args(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
bbsoi_shifted = bbsoi.shift(right=1)
assert len(bbsoi_shifted.bounding_boxes) == 2
assert bbsoi_shifted.bounding_boxes[0].y1 == 10
assert bbsoi_shifted.bounding_boxes[0].x1 == 20 - 1
assert bbsoi_shifted.bounding_boxes[0].y2 == 30
assert bbsoi_shifted.bounding_boxes[0].x2 == 40 - 1
assert bbsoi_shifted.bounding_boxes[1].y1 == 15
assert bbsoi_shifted.bounding_boxes[1].x1 == 25 - 1
assert bbsoi_shifted.bounding_boxes[1].y2 == 35
assert bbsoi_shifted.bounding_boxes[1].x2 == 51 - 1
assert bbsoi_shifted is not bbsoi
assert (
"These are deprecated. Use `x` and `y` instead."
in str(caught_warnings[-1].message)
)
def test_to_keypoints_on_image(self):
bbsoi = ia.BoundingBoxesOnImage(
[ia.BoundingBox(0, 1, 2, 3),
ia.BoundingBox(10, 20, 30, 40)],
shape=(1, 2, 3))
kpsoi = bbsoi.to_keypoints_on_image()
assert len(kpsoi.keypoints) == 2*4
assert kpsoi.keypoints[0].x == 0
assert kpsoi.keypoints[0].y == 1
assert kpsoi.keypoints[1].x == 2
assert kpsoi.keypoints[1].y == 1
assert kpsoi.keypoints[2].x == 2
assert kpsoi.keypoints[2].y == 3
assert kpsoi.keypoints[3].x == 0
assert kpsoi.keypoints[3].y == 3
assert kpsoi.keypoints[4].x == 10
assert kpsoi.keypoints[4].y == 20
assert kpsoi.keypoints[5].x == 30
assert kpsoi.keypoints[5].y == 20
assert kpsoi.keypoints[6].x == 30
assert kpsoi.keypoints[6].y == 40
assert kpsoi.keypoints[7].x == 10
assert kpsoi.keypoints[7].y == 40
def test_to_keypoints_on_image__empty_instance(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(1, 2, 3))
kpsoi = bbsoi.to_keypoints_on_image()
assert len(kpsoi.keypoints) == 0
def test_invert_to_keypoints_on_image_(self):
bbsoi = ia.BoundingBoxesOnImage(
[ia.BoundingBox(0, 1, 2, 3),
ia.BoundingBox(10, 20, 30, 40)],
shape=(1, 2, 3))
kpsoi = ia.KeypointsOnImage(
[ia.Keypoint(100, 101), ia.Keypoint(102, 103),
ia.Keypoint(104, 105), ia.Keypoint(106, 107),
ia.Keypoint(110, 120), ia.Keypoint(130, 140),
ia.Keypoint(150, 160), ia.Keypoint(170, 180)],
shape=(10, 20, 30))
bbsoi_inv = bbsoi.invert_to_keypoints_on_image_(kpsoi)
assert len(bbsoi_inv.bounding_boxes) == 2
assert bbsoi_inv.shape == (10, 20, 30)
assert bbsoi_inv.bounding_boxes[0].x1 == 100
assert bbsoi_inv.bounding_boxes[0].y1 == 101
assert bbsoi_inv.bounding_boxes[0].x2 == 106
assert bbsoi_inv.bounding_boxes[0].y2 == 107
assert bbsoi_inv.bounding_boxes[1].x1 == 110
assert bbsoi_inv.bounding_boxes[1].y1 == 120
assert bbsoi_inv.bounding_boxes[1].x2 == 170
assert bbsoi_inv.bounding_boxes[1].y2 == 180
def test_invert_to_keypoints_on_image___empty_instance(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(1, 2, 3))
kpsoi = ia.KeypointsOnImage([], shape=(10, 20, 30))
bbsoi_inv = bbsoi.invert_to_keypoints_on_image_(kpsoi)
assert len(bbsoi_inv.bounding_boxes) == 0
assert bbsoi_inv.shape == (10, 20, 30)
def test_to_polygons_on_image(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
psoi = bbsoi.to_polygons_on_image()
assert psoi.shape == (40, 50, 3)
assert len(psoi.items) == 2
assert psoi.items[0].coords_almost_equals([
(20, 10),
(40, 10),
(40, 30),
(20, 30)
])
assert psoi.items[1].coords_almost_equals([
(25, 15),
(51, 15),
(51, 35),
(25, 35)
])
def test_to_polygons_on_image__empty_instance(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(40, 50, 3))
psoi = bbsoi.to_polygons_on_image()
assert psoi.shape == (40, 50, 3)
assert len(psoi.items) == 0
def test_copy(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.copy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi_copy.bounding_boxes[0].y1 = 0
assert bbsoi.bounding_boxes[0].y1 == 0
assert bbsoi_copy.bounding_boxes[0].y1 == 0
def test_copy_bounding_boxes_set(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bb3 = ia.BoundingBox(y1=15+1, x1=25+1, y2=35+1, x2=51+1)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.copy(bounding_boxes=[bb3])
assert bbsoi_copy is not bbsoi
assert bbsoi_copy.shape == (40, 50, 3)
assert bbsoi_copy.bounding_boxes == [bb3]
def test_copy_shape_set(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.copy(shape=(40+1, 50+1, 3))
assert bbsoi_copy is not bbsoi
assert bbsoi_copy.shape == (40+1, 50+1, 3)
assert bbsoi_copy.bounding_boxes == [bb1, bb2]
def test_deepcopy(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.deepcopy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi_copy.bounding_boxes[0].y1 = 0
assert bbsoi.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].y1 == 0
def test_deepcopy_bounding_boxes_set(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bb3 = ia.BoundingBox(y1=15+1, x1=25+1, y2=35+1, x2=51+1)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.deepcopy(bounding_boxes=[bb3])
assert bbsoi_copy is not bbsoi
assert bbsoi_copy.shape == (40, 50, 3)
assert bbsoi_copy.bounding_boxes == [bb3]
def test_deepcopy_shape_set(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.deepcopy(shape=(40+1, 50+1, 3))
assert bbsoi_copy is not bbsoi
assert bbsoi_copy.shape == (40+1, 50+1, 3)
assert len(bbsoi_copy.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].coords_almost_equals(bb1)
assert bbsoi_copy.bounding_boxes[1].coords_almost_equals(bb2)
def test___getitem__(self):
cbas = [
ia.BoundingBox(x1=1, y1=2, x2=3, y2=4),
ia.BoundingBox(x1=2, y1=3, x2=4, y2=5)
]
cbasoi = ia.BoundingBoxesOnImage(cbas, shape=(3, 4, 3))
assert cbasoi[0] is cbas[0]
assert cbasoi[1] is cbas[1]
assert cbasoi[0:2] == cbas
def test___iter__(self):
cbas = [ia.BoundingBox(x1=0, y1=0, x2=2, y2=2),
ia.BoundingBox(x1=1, y1=2, x2=3, y2=4)]
cbasoi = ia.BoundingBoxesOnImage(cbas, shape=(40, 50, 3))
for i, cba in enumerate(cbasoi):
assert cba is cbas[i]
def test___iter___empty(self):
cbasoi = ia.BoundingBoxesOnImage([], shape=(40, 50, 3))
i = 0
for _cba in cbasoi:
i += 1
assert i == 0
def test___len__(self):
cbas = [ia.BoundingBox(x1=0, y1=0, x2=2, y2=2),
ia.BoundingBox(x1=1, y1=2, x2=3, y2=4)]
cbasoi = ia.BoundingBoxesOnImage(cbas, shape=(40, 50, 3))
assert len(cbasoi) == 2
def test_string_conversion(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bb1_expected = "BoundingBox(x1=20.0000, y1=10.0000, " \
"x2=40.0000, y2=30.0000, label=None)"
bb2_expected = "BoundingBox(x1=25.0000, y1=15.0000, " \
"x2=51.0000, y2=35.0000, label=None)"
expected = "BoundingBoxesOnImage([%s, %s], shape=(40, 50, 3))" % (
bb1_expected, bb2_expected)
assert (
bbsoi.__repr__()
== bbsoi.__str__()
== expected
)
def test_string_conversion_labels_are_not_none(self):
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label="foo")
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label="bar")
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bb1_expected = "BoundingBox(x1=20.0000, y1=10.0000, " \
"x2=40.0000, y2=30.0000, label=foo)"
bb2_expected = "BoundingBox(x1=25.0000, y1=15.0000, " \
"x2=51.0000, y2=35.0000, label=bar)"
expected = "BoundingBoxesOnImage([%s, %s], shape=(40, 50, 3))" % (
bb1_expected, bb2_expected)
assert (
bbsoi.__repr__()
== bbsoi.__str__()
== expected
)
class Test_LabelOnImageDrawer(unittest.TestCase):
def test_draw_on_image_(self):
height = 30
image = np.full((100, 50, 3), 100, dtype=np.uint8)
bb = ia.BoundingBox(x1=5, x2=20, y1=50, y2=60)
drawer = _LabelOnImageDrawer(color_text=(255, 255, 255),
color_bg=(0, 0, 0),
height=height)
image_drawn = drawer.draw_on_image_(np.copy(image), bb)
frac_colors_as_expected = np.average(
np.logical_or(image_drawn[50-1-height:50-1, 5-1:20+1, :] == 0,
image_drawn[50-1-height:50-1, 5-1:20+1, :] == 255)
)
assert np.all(image_drawn[:50-1-height, :, :] == 100)
assert np.all(image_drawn[50-1:, :, :] == 100)
assert np.all(image_drawn[:, :5-1, :] == 100)
assert np.all(image_drawn[:, 20+1:, :] == 100)
assert frac_colors_as_expected > 0.75
def test_draw_on_image(self):
image = np.full((20, 30, 3), 100, dtype=np.uint8)
bb = ia.BoundingBox(x1=1, x2=6, y1=2, y2=10)
drawer = _LabelOnImageDrawer(color_text=(255, 255, 255),
color_bg=(0, 0, 0))
image_drawn_inplace = drawer.draw_on_image_(np.copy(image), bb)
image_drawn = drawer.draw_on_image_(image, bb)
assert np.array_equal(image_drawn, image_drawn_inplace)
def test__do_raise_if_out_of_image__bb_is_fully_inside(self):
drawer = _LabelOnImageDrawer(raise_if_out_of_image=True)
image = np.zeros((20, 30, 3), dtype=np.uint8)
bb = ia.BoundingBox(x1=1, x2=6, y1=2, y2=10)
# assert no exception
drawer._do_raise_if_out_of_image(image, bb)
def test__do_raise_if_out_of_image__bb_is_partially_outside(self):
drawer = _LabelOnImageDrawer(raise_if_out_of_image=True)
image = np.zeros((20, 30, 3), dtype=np.uint8)
bb = ia.BoundingBox(x1=30-5, x2=30+1, y1=2, y2=10)
# assert no exception
drawer._do_raise_if_out_of_image(image, bb)
def test__do_raise_if_out_of_image__bb_is_fully_outside(self):
drawer = _LabelOnImageDrawer(raise_if_out_of_image=True)
image = np.zeros((20, 30, 3), dtype=np.uint8)
bb = ia.BoundingBox(x1=30+1, x2=30+6, y1=2, y2=10)
with self.assertRaises(Exception):
drawer._do_raise_if_out_of_image(image, bb)
def test__preprocess_colors__only_main_color_set(self):
drawer = _LabelOnImageDrawer(color=(0, 255, 0))
color_text, color_bg = drawer._preprocess_colors()
assert np.array_equal(color_text, [0, 0, 0])
assert np.array_equal(color_bg, [0, 255, 0])
def test__preprocess_colors__subcolors_set(self):
drawer = _LabelOnImageDrawer(color_text=(128, 129, 130),
color_bg=(131, 132, 133))
color_text, color_bg = drawer._preprocess_colors()
assert np.array_equal(color_text, [128, 129, 130])
assert np.array_equal(color_bg, [131, 132, 133])
def test__preprocess_colors__text_not_set_must_be_black(self):
drawer = _LabelOnImageDrawer(color=(255, 255, 255),
color_bg=(255, 255, 255))
color_text, color_bg = drawer._preprocess_colors()
assert np.array_equal(color_text, [0, 0, 0])
assert np.array_equal(color_bg, [255, 255, 255])
def test__compute_bg_corner_coords__standard_bb(self):
height = 30
for size in [1, 2]:
with self.subTest(size=size):
drawer = _LabelOnImageDrawer(size=size, height=height)
bb = ia.BoundingBox(x1=10, x2=30, y1=60, y2=90)
image = np.zeros((100, 200, 3), dtype=np.uint8)
x1, y1, x2, y2 = drawer._compute_bg_corner_coords(image, bb)
assert np.isclose(x1, max(bb.x1 - size + 1, 0))
assert np.isclose(y1, max(bb.y1 - 1 - height, 0))
assert np.isclose(x2, min(bb.x2 + size, image.shape[1]-1))
assert np.isclose(y2, min(bb.y1 - 1, image.shape[0]-1))
def test__compute_bg_corner_coords__zero_sized_bb(self):
height = 30
size = 1
drawer = _LabelOnImageDrawer(size=1, height=height)
bb = ia.BoundingBox(x1=10, x2=10, y1=60, y2=90)
image = np.zeros((100, 200, 3), dtype=np.uint8)
x1, y1, x2, y2 = drawer._compute_bg_corner_coords(image, bb)
assert np.isclose(x1, bb.x1 - size + 1)
assert np.isclose(y1, bb.y1 - 1 - height)
assert np.isclose(x2, bb.x2 + size)
assert np.isclose(y2, bb.y1 - 1)
def test__draw_label_arr__label_is_none(self):
drawer = _LabelOnImageDrawer()
height = 50
width = 100
nb_channels = 3
color_text = np.uint8([0, 255, 0])
color_bg = np.uint8([255, 0, 0])
size_text = 20
label_arr = drawer._draw_label_arr(None, height, width, nb_channels,
np.uint8,
color_text, color_bg, size_text)
frac_textcolor = np.average(
np.min(label_arr == color_text.reshape((1, 1, -1)), axis=-1)
)
frac_bgcolor = np.average(
np.min(label_arr == color_bg.reshape((1, 1, -1)), axis=-1)
)
assert label_arr.dtype.name == "uint8"
assert label_arr.shape == (height, width, nb_channels)
assert frac_textcolor > 0.02
assert frac_bgcolor > 0.8
# not all pixels of the text might be drawn with exactly the text
# color
assert frac_textcolor + frac_bgcolor > 0.75
def test__draw_label_arr__label_is_str(self):
drawer = _LabelOnImageDrawer()
height = 50
width = 100
nb_channels = 3
color_text = np.uint8([0, 255, 0])
color_bg = np.uint8([255, 0, 0])
size_text = 20
label_arr = drawer._draw_label_arr("Fooo", height, width, nb_channels,
np.uint8,
color_text, color_bg, size_text)
frac_textcolor = np.average(
np.min(label_arr == color_text.reshape((1, 1, -1)), axis=-1)
)
frac_bgcolor = np.average(
np.min(label_arr == color_bg.reshape((1, 1, -1)), axis=-1)
)
assert label_arr.dtype.name == "uint8"
assert label_arr.shape == (height, width, nb_channels)
assert frac_textcolor > 0.02
assert frac_bgcolor > 0.8
# not all pixels of the text might be drawn with exactly the text
# color
assert frac_textcolor + frac_bgcolor > 0.75
def test__blend_label_arr__alpha_is_1(self):
drawer = _LabelOnImageDrawer(alpha=1)
image = np.full((50, 60, 3), 100, dtype=np.uint8)
label_arr = np.full((10, 20, 3), 200, dtype=np.uint8)
x1 = 15
x2 = 15 + 20
y1 = 10
y2 = 10 + 10
image_blend = drawer._blend_label_arr_with_image_(image, label_arr,
x1, y1, x2, y2)
assert np.all(image_blend[:, :15, :] == 100)
assert np.all(image_blend[:, 15+20:, :] == 100)
assert np.all(image_blend[:10, :, :] == 100)
assert np.all(image_blend[10+10:, :, :] == 100)
assert np.all(image_blend[10:10+10, 15:15+20, :] == 200)
def test__blend_label_arr__alpha_is_075(self):
drawer = _LabelOnImageDrawer(alpha=0.75)
image = np.full((50, 60, 3), 100, dtype=np.uint8)
label_arr = np.full((10, 20, 3), 200, dtype=np.uint8)
x1 = 15
x2 = 15 + 20
y1 = 10
y2 = 10 + 10
image_blend = drawer._blend_label_arr_with_image_(image, label_arr,
x1, y1, x2, y2)
assert np.all(image_blend[:, :15, :] == 100)
assert np.all(image_blend[:, 15+20:, :] == 100)
assert np.all(image_blend[:10, :, :] == 100)
assert np.all(image_blend[10+10:, :, :] == 100)
assert np.all(image_blend[10:10+10, 15:15+20, :] == 100+75)
|
aleju/ImageAugmenter
|
test/augmentables/test_bbs.py
|
Python
|
mit
| 85,736 | 0.000105 |
# coding: utf-8
# Copyright (C) 1994-2016 Altair Engineering, Inc.
# For more information, contact Altair at www.altair.com.
#
# This file is part of the PBS Professional ("PBS Pro") software.
#
# Open Source License Information:
#
# PBS Pro is free software. You can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# PBS Pro is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Commercial License Information:
#
# The PBS Pro software is licensed under the terms of the GNU Affero General
# Public License agreement ("AGPL"), except where a separate commercial license
# agreement for PBS Pro version 14 or later has been executed in writing with
# Altair.
#
# Altair’s dual-license business model allows companies, individuals, and
# organizations to create proprietary derivative works of PBS Pro and
# distribute them - whether embedded or bundled with other software - under
# a commercial license agreement.
#
# Use of Altair’s trademarks, including but not limited to "PBS™",
# "PBS Professional®", and "PBS Pro™" and Altair’s logos is subject to Altair's
# trademark licensing policies.
import sys
import os
import socket
import pwd
import grp
import logging
import time
import re
import random
import string
import tempfile
import cPickle
import copy
import datetime
import traceback
import threading
from operator import itemgetter
from collections import OrderedDict
from distutils.version import LooseVersion
try:
import psycopg2
PSYCOPG = True
except:
PSYCOPG = False
try:
from ptl.lib.pbs_ifl import *
API_OK = True
except:
try:
from ptl.lib.pbs_ifl_mock import *
except:
sys.stderr.write("failed to import pbs_ifl, run pbs_swigify " +
"to make it\n")
raise ImportError
API_OK = False
from ptl.lib.pbs_api_to_cli import api_to_cli
from ptl.utils.pbs_dshutils import DshUtils
from ptl.utils.pbs_procutils import ProcUtils
from ptl.utils.pbs_cliutils import CliUtils
from ptl.utils.pbs_fileutils import FileUtils, FILE_TAIL
# suppress logging exceptions
logging.raiseExceptions = False
# Various mappings and aliases
MGR_OBJ_VNODE = MGR_OBJ_NODE
VNODE = MGR_OBJ_VNODE
NODE = MGR_OBJ_NODE
HOST = MGR_OBJ_HOST
JOB = MGR_OBJ_JOB
RESV = MGR_OBJ_RESV
SERVER = MGR_OBJ_SERVER
QUEUE = MGR_OBJ_QUEUE
SCHED = MGR_OBJ_SCHED
HOOK = MGR_OBJ_HOOK
RSC = MGR_OBJ_RSC
PBS_HOOK = MGR_OBJ_PBS_HOOK
# the order of these symbols matters, see pbs_ifl.h
(SET, UNSET, INCR, DECR, EQ, NE, GE, GT,
LE, LT, MATCH, MATCH_RE, NOT, DFLT) = range(14)
(PTL_OR, PTL_AND) = [0, 1]
(IFL_SUBMIT, IFL_SELECT, IFL_TERMINATE, IFL_ALTER,
IFL_MSG, IFL_DELETE) = [0, 1, 2, 3, 4, 5]
(PTL_API, PTL_CLI) = ['api', 'cli']
(PTL_COUNTER, PTL_FILTER) = [0, 1]
PTL_STR_TO_OP = {
'<': LT,
'<=': LE,
'=': EQ,
'>=': GE,
'>': GT,
'!=': NE,
' set ': SET,
' unset ': UNSET,
' match ': MATCH,
'~': MATCH_RE,
'!': NOT
}
PTL_OP_TO_STR = {
LT: '<',
LE: '<=',
EQ: '=',
GE: '>=',
GT: '>',
SET: ' set ',
NE: '!=',
UNSET: ' unset ',
MATCH: ' match ',
MATCH_RE: '~',
NOT: 'is not'
}
PTL_ATTROP_TO_STR = {PTL_AND: '&&', PTL_OR: '||'}
(RESOURCES_AVAILABLE, RESOURCES_TOTAL) = [0, 1]
EXPECT_MAP = {
UNSET: 'Unset',
SET: 'Set',
EQ: 'Equal',
NE: 'Not Equal',
LT: 'Less Than',
GT: 'Greater Than',
LE: 'Less Equal Than',
GE: 'Greater Equal Than',
MATCH_RE: 'Matches regexp',
MATCH: 'Matches',
NOT: 'Not'
}
PBS_CMD_MAP = {
MGR_CMD_CREATE: 'create',
MGR_CMD_SET: 'set',
MGR_CMD_DELETE: 'delete',
MGR_CMD_UNSET: 'unset',
MGR_CMD_IMPORT: 'import',
MGR_CMD_EXPORT: 'export',
MGR_CMD_LIST: 'list',
}
PBS_CMD_TO_OP = {
MGR_CMD_SET: SET,
MGR_CMD_UNSET: UNSET,
MGR_CMD_DELETE: UNSET,
MGR_CMD_CREATE: SET,
}
PBS_OBJ_MAP = {
MGR_OBJ_NONE: 'none',
SERVER: 'server',
QUEUE: 'queue',
JOB: 'job',
NODE: 'node',
RESV: 'reservation',
RSC: 'resource',
SCHED: 'sched',
HOST: 'host',
HOOK: 'hook',
VNODE: 'node',
PBS_HOOK: 'pbshook'
}
PTL_TRUE = ('1', 'true', 't', 'yes', 'y', 'enable', 'enabled', 'True', True)
PTL_FALSE = ('0', 'false', 'f', 'no', 'n', 'disable', 'disabled', 'False',
False)
PTL_NONE = ('None', None)
PTL_FORMULA = '__formula__'
PTL_NOARG = '__noarg__'
PTL_ALL = '__ALL__'
CMD_ERROR_MAP = {
'alterjob': 'PbsAlterError',
'holdjob': 'PbsHoldError',
'sigjob': 'PbsSignalError',
'msgjob': 'PbsMessageError',
'rlsjob': 'PbsReleaseError',
'rerunjob': 'PbsRerunError',
'orderjob': 'PbsOrderError',
'runjob': 'PbsRunError',
'movejob': 'PbsMoveError',
'delete': 'PbsDeleteError',
'deljob': 'PbsDeljobError',
'delresv': 'PbsDelresvError',
'status': 'PbsStatusError',
'manager': 'PbsManagerError',
'submit': 'PbsSubmitError',
'terminate': 'PbsQtermError'
}
class PtlConfig(object):
"""
Holds configuration options
The options can be stored in a file as well as in the OS environment
variables.When set, the environment variables will override
definitions in the file.By default, on Unix like systems, the file
read is ``/etc/ptl.conf``, the environment variable ``PTL_CONF_FILE``
can be used to set the path to the file to read.
The format of the file is a series of ``<key> = <value>`` properties.
A line that starts with a '#' is ignored and can be used for comments
:param conf: Path to PTL configuration file
:type conf: str or None
"""
logger = logging.getLogger(__name__)
def __init__(self, conf=None):
self.options = {
'PTL_SUDO_CMD': 'sudo -H',
'PTL_RSH_CMD': 'ssh',
'PTL_CP_CMD': 'scp -p',
'PTL_EXPECT_MAX_ATTEMPTS': 60,
'PTL_EXPECT_INTERVAL': 0.5,
'PTL_UPDATE_ATTRIBUTES': True,
}
self.handlers = {
'PTL_SUDO_CMD': DshUtils.set_sudo_cmd,
'PTL_RSH_CMD': DshUtils.set_rsh_cmd,
'PTL_CP_CMD': DshUtils.set_copy_cmd,
'PTL_EXPECT_MAX_ATTEMPTS': Server.set_expect_max_attempts,
'PTL_EXPECT_INTERVAL': Server.set_expect_interval,
'PTL_UPDATE_ATTRIBUTES': Server.set_update_attributes
}
if conf is None:
conf = os.environ.get('PTL_CONF_FILE', '/etc/ptl.conf')
try:
lines = open(conf).readlines()
except IOError:
lines = []
for line in lines:
line = line.strip()
if (line.startswith('#') or (line == '')):
continue
try:
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
self.options[k] = v
except:
self.logger.error('Error parsing line ' + line)
for k, v in self.options.items():
if k in os.environ:
v = os.environ[k]
else:
os.environ[k] = str(v)
if k in self.handlers:
self.handlers[k](v)
class PtlException(Exception):
"""
Generic errors raised by PTL operations.
Sets a ``return value``, a ``return code``, and a ``message``
A post function and associated positional and named arguments
are available to perform any necessary cleanup.
:param rv: Return value set for the error occured during PTL
operation
:type rv: int or None.
:param rc: Return code set for the error occured during PTL
operation
:type rc: int or None.
:param msg: Message set for the error occured during PTL operation
:type msg: str or None.
:param post: Execute necessary cleanup if not None
:raises: PTL exceptions
"""
def __init__(self, rv=None, rc=None, msg=None, post=None, *args, **kwargs):
self.rv = rv
self.rc = rc
self.msg = msg
if post is not None:
post(*args, **kwargs)
def __str__(self):
return ('rc=' + str(self.rc) + ', rv=' + str(self.rv) +
', msg=' + str(self.msg))
def __repr__(self):
return (self.__class__.__name__ + '(rc=' + str(self.rc) + ', rv=' +
str(self.rv) + ', msg=' + str(self.msg) + ')')
class PbsServiceError(PtlException):
pass
class PbsConnectError(PtlException):
pass
class PbsStatusError(PtlException):
pass
class PbsSubmitError(PtlException):
pass
class PbsManagerError(PtlException):
pass
class PbsDeljobError(PtlException):
pass
class PbsDelresvError(PtlException):
pass
class PbsDeleteError(PtlException):
pass
class PbsRunError(PtlException):
pass
class PbsSignalError(PtlException):
pass
class PbsMessageError(PtlException):
pass
class PbsHoldError(PtlException):
pass
class PbsReleaseError(PtlException):
pass
class PbsOrderError(PtlException):
pass
class PbsRerunError(PtlException):
pass
class PbsMoveError(PtlException):
pass
class PbsAlterError(PtlException):
pass
class PbsResourceError(PtlException):
pass
class PbsSelectError(PtlException):
pass
class PbsSchedConfigError(PtlException):
pass
class PbsMomConfigError(PtlException):
pass
class PbsFairshareError(PtlException):
pass
class PbsQdisableError(PtlException):
pass
class PbsQenableError(PtlException):
pass
class PbsQstartError(PtlException):
pass
class PbsQstopError(PtlException):
pass
class PtlExpectError(PtlException):
pass
class PbsInitServicesError(PtlException):
pass
class PbsQtermError(PtlException):
pass
class PbsTypeSize(str):
"""
Descriptor class for memory as a numeric entity.
Units can be one of ``b``, ``kb``, ``mb``, ``gb``, ``tb``, ``pt``
:param unit: The unit type associated to the memory value
:type unit: str
:param value: The numeric value of the memory
:type value: int or None
:raises: ValueError and TypeError
"""
def __init__(self, value=None):
if value is None:
return
if len(value) < 2:
raise ValueError
if value[-1:] in ('b', 'B') and value[:-1].isdigit():
self.unit = 'b'
self.value = int(int(value[:-1]) / 1024)
return
# lower() applied to ignore case
unit = value[-2:].lower()
self.value = value[:-2]
if not self.value.isdigit():
raise ValueError
if unit == 'kb':
self.value = int(self.value)
elif unit == 'mb':
self.value = int(self.value) * 1024
elif unit == 'gb':
self.value = int(self.value) * 1024 * 1024
elif unit == 'tb':
self.value = int(self.value) * 1024 * 1024 * 1024
elif unit == 'pb':
self.value = int(self.value) * 1024 * 1024 * 1024 * 1024
else:
raise TypeError
self.unit = 'kb'
def encode(self, value=None, valtype='kb', precision=1):
"""
Encode numeric memory input in kilobytes to a string, including
unit
:param value: The numeric value of memory to encode
:type value: int or None.
:param valtype: The unit of the input value, defaults to kb
:type valtype: str
:param precision: Precision of the encoded value, defaults to 1
:type precision: int
:returns: Encoded memory in kb to string
"""
if value is None:
value = self.value
if valtype == 'b':
val = value
elif valtype == 'kb':
val = value * 1024
elif valtype == 'mb':
val = value * 1024 * 1024
elif valtype == 'gb':
val = value * 1024 * 1024 * 1024 * 1024
elif valtype == 'tb':
val = value * 1024 * 1024 * 1024 * 1024 * 1024
elif valtype == 'pt':
val = value * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
m = (
(1 << 50, 'pb'),
(1 << 40, 'tb'),
(1 << 30, 'gb'),
(1 << 20, 'mb'),
(1 << 10, 'kb'),
(1, 'b')
)
for factor, suffix in m:
if val >= factor:
break
return '%.*f%s' % (precision, float(val) / factor, suffix)
def __cmp__(self, other):
if self.value < other.value:
return -1
if self.value == other.value:
return 0
return 1
def __lt__(self, other):
if self.value < other.value:
return True
return False
def __le__(self, other):
if self.value <= other.value:
return True
return False
def __gt__(self, other):
if self.value > other.value:
return True
return False
def __ge__(self, other):
if self.value < other.value:
return True
return False
def __eq__(self, other):
if self.value == other.value:
return True
return False
def __get__(self):
return self.value
def __add__(self, other):
if isinstance(other, int):
self.value += other
else:
self.value += other.value
return self
def __mul__(self, other):
if isinstance(other, int):
self.value *= other
else:
self.value *= other.value
return self
def __floordiv__(self, other):
self.value /= other.value
return self
def __sub__(self, other):
self.value -= other.value
return self
def __repr__(self):
return self.__str__()
def __str__(self):
return self.encode(valtype=self.unit)
class PbsTypeDuration(str):
"""
Descriptor class for a duration represented as ``hours``,
``minutes``, and ``seconds``,in the form of ``[HH:][MM:]SS``
:param as_seconds: HH:MM:SS represented in seconds
:type as_seconds: int
:param as_str: duration represented in HH:MM:SS
:type as_str: str
"""
def __init__(self, val):
if isinstance(val, str):
if ':' in val:
s = val.split(':')
l = len(s)
if l > 3:
raise ValueError
hr = mn = sc = 0
if l >= 2:
sc = s[l - 1]
mn = s[l - 2]
if l == 3:
hr = s[0]
self.duration = int(hr) * 3600 + int(mn) * 60 + int(sc)
elif val.isdigit():
self.duration = int(val)
elif isinstance(val, int) or isinstance(val, float):
self.duration = val
def __add__(self, other):
self.duration += other.duration
return self
def __sub__(self, other):
self.duration -= other.duration
return self
def __cmp__(self, other):
if self.duration < other.duration:
return -1
if self.duration == other.duration:
return 0
return 1
def __lt__(self, other):
if self.duration < other.duration:
return True
return False
def __le__(self, other):
if self.duration <= other.duration:
return True
return False
def __gt__(self, other):
if self.duration > other.duration:
return True
return False
def __ge__(self, other):
if self.duration < other.duration:
return True
return False
def __eq__(self, other):
if self.duration == other.duration:
return True
return False
def __get__(self):
return self.as_str
def __repr__(self):
return self.__str__()
def __int__(self):
return int(self.duration)
def __str__(self):
return str(datetime.timedelta(seconds=self.duration))
class PbsTypeArray(list):
"""
Descriptor class for a PBS array list type, e.g. String array
:param value: Array value to be passed
:param sep: Separator for two array elements
:type sep: str
:returns: List
"""
def __init__(self, value=None, sep=','):
self.separator = sep
self = list.__init__(self, value.split(sep))
def __str__(self):
return self.separator.join(self)
class PbsTypeList(dict):
"""
Descriptor class for a generic PBS list that are key/value pairs
delimited
:param value: List value to be passed
:param sep: Separator for two key/value pair
:type sep: str
:param kvsep: Separator for key and value
:type kvsep: str
:returns: Dictionary
"""
def __init__(self, value=None, sep=',', kvsep='='):
self.kvsep = kvsep
self.separator = sep
d = {}
as_list = map(lambda v: v.split(kvsep), value.split(sep))
if as_list:
for k, v in as_list:
d[k] = v
del as_list
dict.__init__(self, d)
def __str__(self):
s = []
for k, v in self.items():
s += [str(k) + self.kvsep + str(v)]
return self.separator.join(s)
class PbsTypeLicenseCount(PbsTypeList):
"""
Descriptor class for a PBS license_count attribute.
It is a specialized list where key/values are ':' delimited, separated
by a ' ' (space)
:param value: PBS license_count attribute value
:returns: Specialized list
"""
def __init__(self, value=None):
super(PbsTypeLicenseCount, self).__init__(value, sep=' ', kvsep=':')
class PbsTypeVariableList(PbsTypeList):
"""
Descriptor class for a PBS Variable_List attribute
It is a specialized list where key/values are '=' delimited, separated
by a ',' (space)
:param value: PBS Variable_List attribute value
:returns: Specialized list
"""
def __init__(self, value=None):
super(PbsTypeVariableList, self).__init__(value, sep=',', kvsep='=')
class PbsTypeSelect(list):
"""
Descriptor class for PBS select/schedselect specification.
Select is of the form:
``<select> ::= <m>":"<chunk> | <select>"+"<select>``
``<m> ::= <digit> | <digit><m>``
``<chunk> ::= <resc_name>":"<resc_value> | <chunk>":"<chunk>``
``<m>`` is a multiplying factor for each chunk requested
``<chunk>`` are resource key/value pairs
The type populates a list of single chunk of resource
``key/value`` pairs, the list can be walked by iterating over
the type itself.
:param num_chunks: The total number of chunks in the select
:type num_chunk: int
:param resources: A dictionary of all resource counts in the select
:type resources: Dictionary
"""
def __init__(self, s=None):
if s is not None:
self._as_str = s
self.resources = {}
self.num_chunks = 0
nc = s.split('+')
for chunk in nc:
self._parse_chunk(chunk)
def _parse_chunk(self, chunk):
d = chunk.split(':')
# number of chunks
_num_chunks = int(d[0])
self.num_chunks += _num_chunks
r = {}
for e in d[1:]:
k, v = e.split('=')
r[k] = v
if 'mem' in k:
try:
v = PbsTypeSize(v).value
except:
# failed so we guessed wrong on the type
pass
if isinstance(v, int) or v.isdigit():
if k not in self.resources:
self.resources[k] = _num_chunks * int(v)
else:
self.resources[k] += _num_chunks * int(v)
else:
if k not in self.resources:
self.resources[k] = v
else:
self.resources[k] = [self.resources[k], v]
# explicitly expose the multiplying factor
for _ in range(_num_chunks):
self.append(r)
def __add__(self, chunk=None):
if chunk is None:
return self
self._parse_chunk(chunk)
self._as_str = self._as_str + "+" + chunk
return self
def __repr__(self):
return str(self)
def __str__(self):
return self._as_str
class PbsTypeChunk(dict):
"""
Descriptor class for a PBS chunk associated to a
``PbsTypeExecVnode``.This type of chunk corresponds to
a node solution to a resource request,not to the select
specification.
``chunk ::= <subchk> | <chunk>"+"<chunk>``
``subchk ::= <node>":"<resource>``
``resource ::= <key>":"<val> | <resource>":"<resource>``
A chunk expresses a solution to a specific select-chunk
request. If multiple chunks are needed to solve a single
select-chunk, e.g., on a shared memory system, the chunk
will be extended into virtual chunk,vchunk.
:param vnode: the vnode name corresponding to the chunk
:type vnode: str or None
:param resources: the key value pair of resources in
dictionary form
:type resources: Dictionary or None
:param vchunk: a list of virtual chunks needed to solve
the select-chunk, vchunk is only set if more
than one vchunk are required to solve the
select-chunk
:type vchunk: list
"""
def __init__(self, vnode=None, resources=None, chunkstr=None):
self.vnode = vnode
if resources is not None:
self.resources = resources
else:
self.resources = {}
self.vchunk = []
self.as_str = chunkstr
self.__parse_chunk(chunkstr)
def __parse_chunk(self, chunkstr=None):
if chunkstr is None:
return
vchunks = chunkstr.split('+')
if len(vchunks) == 1:
entities = chunkstr.split(':')
self.vnode = entities[0]
if len(entities) > 1:
for e in entities[1:]:
(r, v) = e.split('=')
self.resources[r] = v
self[self.vnode] = self.resources
else:
for sc in vchunks:
chk = PbsTypeChunk(chunkstr=sc)
self.vchunk.append(chk)
self[chk.vnode] = chk.resources
def add(self, vnode, resources):
"""
Add a chunk specificiation. If a chunk is already
defined, add the chunk as a vchunk.
:param vnode: The vnode to add
:type vnode: str
:param resources: The resources associated to the
vnode
:type resources: str
:returns: Added chunk specification
"""
if self.vnode == vnode:
self.resources = dict(self.resources.items() + resources.items())
return self
elif len(self.vchunk) != 0:
for chk in self.vchunk:
if chk.vnode == vnode:
chk.resources = dict(self.resources.items() +
resources.items())
return self
chk = PbsTypeChunk(vnode, resources)
self.vchunk.append(chk)
return self
def __repr__(self):
return self.__str__()
def __str__(self):
_s = ["("]
_s += [self.vnode, ":"]
for resc_k, resc_v in self.resources.items():
_s += [resc_k, "=", str(resc_v)]
if self.vchunk:
for _v in self.vchunk:
_s += ["+", _v.vnode, ":"]
for resc_k, resc_v in _v.resources.items():
_s += [resc_k, "=", str(resc_v)]
_s += [")"]
return "".join(_s)
class PbsTypeExecVnode(list):
"""
Execvnode representation, expressed as a list of
PbsTypeChunk
:param vchunk: List of virtual chunks, only set when
more than one vnode is allocated to a
host satisfy a chunk requested
:type vchunk: List
:param num_chunks: The number of chunks satisfied by
this execvnode
:type num_chunks: int
:param vnodes: List of vnode names allocated to the execvnode
:type vnodes: List
:param resource: method to return the amount of a named
resource satisfied by this execvnode
"""
def __init__(self, s=None):
if s is None:
return None
self._as_str = s
start = 0
self.num_chunks = 0
for c in range(len(s)):
# must split on '+' between parens because '+' can occur within
# paren for complex specs
if s[c] == '(':
start = c + 1
if s[c] == ')':
self.append(PbsTypeChunk(chunkstr=s[start:c]))
self.num_chunks += 1
def resource(self, name=None):
"""
:param name: Name of the resource
:type name: str or None
"""
if name is None:
return None
_total = 0
for _c in self:
if _c.vchunk:
for _v in _c.vchunk:
if name in _v.resources:
_total += int(_v.resources[name])
if name in _c.resources:
if name in _c.resources:
_total += int(_c.resources[name])
return _total
@property
def vnodes(self):
vnodes = []
for e in self:
vnodes += [e.vnode]
if e.vchunk:
vnodes += map(lambda n: n.vnode, e.vchunk)
return list(set(vnodes))
def _str__(self):
return self._as_str
# below would be to verify that the converted type maps back correctly
_s = []
for _c in self:
_s += [str(_c)]
return "+".join(_s)
class PbsTypeExecHost(str):
"""
Descriptor class for exec_host attribute
:param hosts: List of hosts in the exec_host. Each entry is
a host info dictionary that maps the number of
cpus and its task number
:type hosts: List
"""
def __init__(self, s=None):
if s is None:
return None
self._as_str = s
self.hosts = []
hsts = s.split('+')
for h in hsts:
hi = {}
ti = {}
(host, task) = h.split('/',)
d = task.split('*')
if len(d) == 1:
taskslot = d[0]
ncpus = 1
elif len(d) == 2:
(taskslot, ncpus) = d
else:
(taskslot, ncpus) = (0, 1)
ti['task'] = taskslot
ti['ncpus'] = ncpus
hi[host] = ti
self.hosts.append(hi)
def __repr__(self):
return str(self.hosts)
def __str__(self):
return self._as_str
class PbsTypeJobId(str):
"""
Descriptor class for a Job identifier
:param id: The numeric portion of a job identifier
:type id: int
:param server_name: The pbs server name
:type server_name: str
:param server_shortname: The first portion of a FQDN server
name
:type server_shortname: str
"""
def __init__(self, value=None):
if value is None:
return
self.value = value
r = value.split('.', 1)
if len(r) != 2:
return
self.id = int(r[0])
self.server_name = r[1]
self.server_shortname = r[1].split('.', 1)[0]
def __str__(self):
return str(self.value)
class PbsUser(object):
"""
The PbsUser type augments a PBS username to associate
it to groups to which the user belongs
:param name: The user name referenced
:type name: str
:param uid: uid of user
:type uid: int or None
:param groups: The list of PbsGroup objects the user
belongs to
:type groups: List or None
"""
def __init__(self, name, uid=None, groups=None):
self.name = name
if uid is not None:
self.uid = int(uid)
else:
self.uid = None
self.home = None
self.gid = None
self.shell = None
self.gecos = None
try:
_user = pwd.getpwnam(self.name)
self.uid = _user.pw_uid
self.home = _user.pw_dir
self.gid = _user.pw_gid
self.shell = _user.pw_shell
self.gecos = _user.pw_gecos
except:
pass
if groups is None:
self.groups = []
elif isinstance(groups, list):
self.groups = groups
else:
self.groups = groups.split(",")
for g in self.groups:
if isinstance(g, str):
self.groups.append(PbsGroup(g, users=[self]))
elif self not in g.users:
g.users.append(self)
def __repr__(self):
return str(self.name)
def __str__(self):
return self.__repr__()
def __int__(self):
return int(self.uid)
class PbsGroup(object):
"""
The PbsGroup type augments a PBS groupname to associate it
to users to which the group belongs
:param name: The group name referenced
:type name: str
:param gid: gid of group
:type gid: int or None
:param users: The list of PbsUser objects the group belongs to
:type users: List or None
"""
def __init__(self, name, gid=None, users=None):
self.name = name
if gid is not None:
self.gid = int(gid)
else:
self.gid = None
try:
_group = grp.getgrnam(self.name)
self.gid = _group.gr_gid
except:
pass
if users is None:
self.users = []
elif isinstance(users, list):
self.users = users
else:
self.users = users.split(",")
for u in self.users:
if isinstance(u, str):
self.users.append(PbsUser(u, groups=[self]))
elif self not in u.groups:
u.groups.append(self)
def __repr__(self):
return str(self.name)
def __str__(self):
return self.__repr__()
def __int__(self):
return int(self.gid)
class BatchUtils(object):
"""
Utility class to create/convert/display various PBS
data structures
"""
legal = "\d\w:\+=\[\]~"
chunks_tag = re.compile("(?P<chunk>\([\d\w:\+=\[\]~]\)[\+]?)")
chunk_tag = re.compile("(?P<vnode>[\w\d\[\]]+):" +
"(?P<resources>[\d\w:\+=\[\]~])+\)")
array_tag = re.compile("(?P<jobid>[\d]+)\[(?P<subjobid>[0-9]*)\]*" +
"[.]*[(?P<server>.*)]*")
subjob_tag = re.compile("(?P<jobid>[\d]+)\[(?P<subjobid>[0-9]+)\]*" +
"[.]*[(?P<server>.*)]*")
pbsobjname_re = re.compile("^([\w\d][\d\w\s]*:?[\s]+)" +
"*(?P<name>[\w@\.\d\[\]-]+)$")
pbsobjattrval_re = re.compile(r"""
[\s]*(?P<attribute>[\w\d\.-]+)
[\s]*=[\s]*
(?P<value>.*)
[\s]*""",
re.VERBOSE)
dt_re = '(?P<dt_from>\d\d/\d\d/\d\d\d\d \d\d:\d\d)' + \
'[\s]+' + \
'(?P<dt_to>\d\d/\d\d/\d\d\d\d \d\d:\d\d)'
dt_tag = re.compile(dt_re)
hms_tag = re.compile('(?P<hr>\d\d):(?P<mn>\d\d):(?P<sc>\d\d)')
lim_tag = re.compile("(?P<limtype>[a-z_]+)[\.]*(?P<resource>[\w\d-]*)"
"=[\s]*\[(?P<entity_type>[ugpo]):"
"(?P<entity_name>[\w\d-]+)"
"=(?P<entity_value>[\d\w]+)\][\s]*")
def __init__(self):
self.logger = logging.getLogger(__name__)
self.du = DshUtils()
def list_to_attrl(self, l):
"""
Convert a list to a PBS attribute list
:param l: List to be converted
:type l: List
:returns: PBS attribute list
"""
return self.list_to_attropl(l, None)
def list_to_attropl(self, l, op=SET):
"""
Convert a list to a PBS attribute operation list
:param l: List to be converted
:type l: List
:returns: PBS attribute operation list
"""
head = None
prev = None
for i in l:
a = self.str_to_attropl(i, op)
if prev is None:
head = a
else:
prev.next = a
prev = a
if op is not None:
a.op = op
return head
def str_to_attrl(self, s):
"""
Convert a string to a PBS attribute list
:param s: String to be converted
:type s: str
:returns: PBS attribute list
"""
return self.str_to_attropl(s, None)
def str_to_attropl(self, s, op=SET):
"""
Convert a string to a PBS attribute operation list
:param s: String to be converted
:type s: str
:returns: PBS attribute operation list
"""
if op is not None:
a = attropl()
else:
a = attrl()
if '.' in s:
(attribute, resource) = s.split('.')
a.name = attribute
a.resource = resource.strip()
else:
a.name = s
a.value = ''
a.next = None
if op:
a.op = op
return a
def dict_to_attrl(self, d={}):
"""
Convert a dictionary to a PBS attribute list
:param d: Dictionary to be converted
:type d: Dictionary
:returns: PBS attribute list
"""
return self.dict_to_attropl(d, None)
def dict_to_attropl(self, d={}, op=SET):
"""
Convert a dictionary to a PBS attribute operation list
:param d: Dictionary to be converted
:type d: Dictionary
:returns: PBS attribute operation list
"""
if len(d.keys()) == 0:
return None
prev = None
head = None
for k, v in d.items():
if isinstance(v, tuple):
op = v[0]
v = v[1]
if op is not None:
a = attropl()
else:
a = attrl()
if '.' in k:
(attribute, resource) = k.split('.')
a.name = attribute
a.resource = resource
else:
a.name = k
a.value = str(v)
if op is not None:
a.op = op
a.next = None
if prev is None:
head = a
else:
prev.next = a
prev = a
return head
def convert_to_attrl(self, attrib):
"""
Generic call to convert Python type to PBS attribute list
:param attrib: Attributes to be converted
:type attrib: List or tuple or dictionary or str
:returns: PBS attribute list
"""
return self.convert_to_attropl(attrib, None)
def convert_to_attropl(self, attrib, cmd=MGR_CMD_SET, op=None):
"""
Generic call to convert Python type to PBS attribute
operation list
:param attrib: Attributes to be converted
:type attrib: List or tuple or dictionary or str
:returns: PBS attribute operation list
"""
if op is None:
op = self.command_to_op(cmd)
if isinstance(attrib, (list, tuple)):
a = self.list_to_attropl(attrib, op)
elif isinstance(attrib, (dict, OrderedDict)):
a = self.dict_to_attropl(attrib, op)
elif isinstance(attrib, str):
a = self.str_to_attropl(attrib, op)
else:
a = None
return a
def command_to_op(self, cmd=None):
"""
Map command to a ``SET`` or ``UNSET`` Operation. An unrecognized
command will return SET. No command will return None.
:param cmd: Command to be mapped
:type cmd: str
:returns: ``SET`` or ``UNSET`` operation for the command
"""
if cmd is None:
return None
if cmd in (MGR_CMD_SET, MGR_CMD_EXPORT, MGR_CMD_IMPORT):
return SET
if cmd == MGR_CMD_UNSET:
return UNSET
return SET
def display_attrl(self, a=None, writer=sys.stdout):
"""
Display an attribute list using writer, defaults to sys.stdout
:param a: Attributes
:type a: List
:returns: Displays attribute list
"""
return self.display_attropl(a)
def display_attropl(self, attropl=None, writer=sys.stdout):
"""
Display an attribute operation list with writer, defaults to
sys.stdout
:param attropl: Attribute operation list
:type attropl: List
:returns: Displays an attribute operation list
"""
attrs = attropl
while attrs is not None:
if attrs.resource:
writer.write('\t' + attrs.name + '.' + attrs.resource + '= ' +
attrs.value + '\n')
else:
writer.write('\t' + attrs.name + '= ' + attrs.value + '\n')
attrs = attrs.next
def display_dict(self, d, writer=sys.stdout):
"""
Display a dictionary using writer, defaults to sys.stdout
:param d: Dictionary
:type d: Dictionary
:returns: Displays a dictionary
"""
if not d:
return
for k, v in d.items():
writer.write(k + ': ' + v + '\n')
def batch_status_to_dictlist(self, bs=None, attr_names=None, id=None):
"""
Convert a batch status to a list of dictionaries.
version 0.1a6 added this conversion as a typemap(out) as
part of the swig wrapping itself so there are fewer uses
for this function.Returns a list of dictionary
representation of batch status
:param bs: Batch status
:param attr_names: Attribute names
:returns: List of dictionaries
"""
attr_time = (
'ctime', 'mtime', 'qtime', 'start', 'end', 'reserve_start',
'reserve_end', 'estimated.start_time')
ret = []
while bs:
if id is not None and bs.name != id:
bs = bs.next
continue
d = {}
attrs = bs.attribs
while attrs is not None:
if attrs.resource:
key = attrs.name + '.' + attrs.resource
else:
key = attrs.name
if attr_names is not None:
if key not in attr_names:
attrs = attrs.next
continue
val = attrs.value
if attrs.name in attr_time:
val = self.convert_time(val)
# for attributes that may occur multiple times (e.g., max_run)
# append the value in a comma-separated representation
if key in d:
d[key] = d[key] + ',' + str(val)
else:
d[key] = str(val)
attrs = attrs.next
if len(d.keys()) > 0:
ret.append(d)
d['id'] = bs.name
bs = bs.next
return ret
def display_batch_status(self, bs=None, attr_names=None,
writer=sys.stdout):
"""
Display a batch status using writer, defaults to sys.stdout
:param bs: Batch status
:param attr_name: Attribute name
:type attr_name: str
:returns: Displays batch status
"""
if bs is None:
return
l = self.batch_status_to_dictlist(bs, attr_names)
self.display_batch_status_as_dictlist(l, writer)
def display_dictlist(self, l=[], writer=sys.stdout, fmt=None):
"""
Display a list of dictionaries using writer, defaults to
sys.stdout
:param l: The list to display
:type l: List
:param writer: The stream on which to write
:param fmt: An optional formatting string
:type fmt: str or None
:returns: Displays list of dictionaries
"""
self.display_batch_status_as_dictlist(l, writer, fmt)
def dictlist_to_file(self, l=[], filename=None, mode='w'):
"""
write a dictlist to file
:param l: Dictlist
:type l: List
:param filename: File to which dictlist need to be written
:type filename: str
:param mode: Mode of file
:type mode: str
:raises: Exception writing to file
"""
if filename is None:
self.logger.error('a filename is required')
return
d = os.path.dirname(filename)
if d != '' and not os.path.isdir(d):
os.makedirs(d)
try:
f = open(filename, mode)
self.display_dictlist(l, f)
f.close()
except:
self.logger.error('error writing to file ' + filename)
raise
def batch_status_as_dictlist_to_file(self, l=[], writer=sys.stdout):
"""
Write a dictlist to file
:param l: Dictlist
:type l: List
:raises: Exception writing to file
"""
return self.dictlist_to_file(l, writer)
def file_to_dictlist(self, file=None, attribs=None, id=None):
"""
Convert a file to a batch dictlist format
:param file: File to be converted
:type file: str
:param attribs: Attributes
:returns: File converted to a batch dictlist format
"""
if file is None:
return []
try:
f = open(file, 'r')
lines = f.readlines()
f.close()
except Exception, e:
self.logger.error('error converting list of dictionaries to ' +
'file ' + str(e))
return []
return self.convert_to_dictlist(lines, attribs, id=id)
def file_to_vnodedef(self, file=None):
"""
Convert a file output of pbsnodes -av to a vnode
definition format
:param file: File to be converted
:type sile: str
:returns: Vnode definition format
"""
if file is None:
return None
try:
f = open(file, 'r')
lines = f.readlines()
f.close()
except:
self.logger.error('error converting nodes to vnode def')
return None
dl = self.convert_to_dictlist(lines)
return self.dictlist_to_vnodedef(dl)
def show(self, l=[], name=None, fmt=None):
"""
Alias to display_dictlist with sys.stdout as writer
:param name: if specified only show the object of
that name
:type name: str
:param fmt: Optional formatting string, uses %n for
object name, %a for attributes, for example
a format of '%nE{\}nE{\}t%aE{\}n' will display
objects with their name starting on the first
column, a new line, and attributes indented by
a tab followed by a new line at the end.
:type fmt: str
"""
if name:
i = 0
for obj in l:
if obj['id'] == name:
l = [l[i]]
break
i += 1
self.display_dictlist(l, fmt=fmt)
def get_objtype(self, d={}):
"""
Get the type of a given object
:param d: Dictionary
:type d: Dictionary
:Returns: Type of the object
"""
if 'Job_Name' in d:
return JOB
elif 'queue_type' in d:
return QUEUE
elif 'Reserve_Name' in d:
return RESV
elif 'server_state' in d:
return SERVER
elif 'Mom' in d:
return NODE
elif 'event' in d:
return HOOK
elif 'type' in d:
return RSC
return None
def display_batch_status_as_dictlist(self, l=[], writer=sys.stdout,
fmt=None):
"""
Display a batch status as a list of dictionaries
using writer, defaults to sys.stdout
:param l: List
:type l: List
:param fmt: - Optional format string
:type fmt: str or None
:returns: Displays batch status as a list of dictionaries
"""
if l is None:
return
for d in l:
self.display_batch_status_as_dict(d, writer, fmt)
def batch_status_as_dict_to_str(self, d={}, fmt=None):
"""
Return a string representation of a batch status dictionary
:param d: Dictionary
:type d: Dictionary
:param fmt: Optional format string
:type fmt: str or None
:returns: String representation of a batch status dictionary
"""
objtype = self.get_objtype(d)
if fmt is not None:
if '%1' in fmt:
_d1 = fmt['%1']
else:
_d1 = '\n'
if '%2' in fmt:
_d2 = fmt['%2']
else:
_d2 = ' '
if '%3' in fmt:
_d3 = fmt['%3']
else:
_d3 = ' = '
if '%4' in fmt:
_d4 = fmt['%4']
else:
_d4 = '\n'
if '%5' in fmt:
_d5 = fmt['%5']
else:
_d5 = '\n'
if '%6' in fmt:
_d6 = fmt['%6']
else:
_d6 = ''
else:
_d1 = '\n'
_d2 = ' '
_d3 = ' = '
_d4 = '\n'
_d5 = '\n'
_d6 = ''
if objtype == JOB:
_n = 'Job Id: ' + d['id'] + _d1
elif objtype == QUEUE:
_n = 'Queue: ' + d['id'] + _d1
elif objtype == RESV:
_n = 'Name: ' + d['id'] + _d1
elif objtype == SERVER:
_n = 'Server: ' + d['id'] + _d1
elif objtype == RSC:
_n = 'Resource: ' + d['id'] + _d1
elif 'id' in d:
_n = d['id'] + _d1
del d['id']
else:
_n = ''
_a = []
for k, v in sorted(d.items()):
if k == 'id':
continue
_a += [_d2 + k + _d3 + str(v)]
return _n + _d4.join(_a) + _d5 + _d6
def display_batch_status_as_dict(self, d={}, writer=sys.stdout, fmt=None):
"""
Display a dictionary representation of a batch status
using writer, defaults to sys.stdout
:param d: Dictionary
:type d: Dictionary
:param fmt: Optional format string
:param fmt: str
:returns: Displays dictionary representation of a batch
status
"""
writer.write(self.batch_status_as_dict_to_str(d, fmt))
def decode_dictlist(self, l=None, json=True):
"""
decode a list of dictionaries
:param l: List of dictionaries
:type l: List
:param json: The target of the decode is meant for ``JSON``
formatting
:returns: Decoded list of dictionaries
"""
if l is None:
return ''
_js = []
for d in l:
_jdict = {}
for k, v in d.items():
if ',' in v:
_jdict[k] = v.split(',')
else:
_jdict[k] = self.decode_value(v)
_js.append(_jdict)
return _js
def convert_to_dictlist(self, l, attribs=None, mergelines=True, id=None):
"""
Convert a list of records into a dictlist format.
:param l: array of records to convert
:type l: List
:param mergelines: merge qstat broken lines into one
:returns: Record list converted into dictlist format
"""
if mergelines:
lines = []
for i in range(len(l)):
if l[i].startswith('\t'):
_e = len(lines) - 1
lines[_e] = lines[_e].strip('\r\n\t') + \
l[i].strip('\r\n\t')
else:
lines.append(l[i])
else:
lines = l
objlist = []
d = {}
for l in lines:
l = l.strip()
m = self.pbsobjname_re.match(l)
if m:
if len(d.keys()) > 1:
if id is None or (id is not None and d['id'] == id):
objlist.append(d.copy())
d = {}
d['id'] = m.group('name')
else:
m = self.pbsobjattrval_re.match(l)
if m:
attr = m.group('attribute')
if attribs is None or attr in attribs:
if attr in d:
d[attr] = d[attr] + "," + m.group('value')
else:
d[attr] = m.group('value')
# add the last element
if len(d.keys()) > 1:
if id is None or (id is not None and d['id'] == id):
objlist.append(d.copy())
return objlist
def convert_to_batch(self, l, mergelines=True):
"""
Convert a list of records into a batch format.
:param l: array of records to convert
:type l: List
:param mergelines: qstat breaks long lines over
multiple lines, merge them\
to one by default.
:type mergelines: bool
:returns: A linked list of batch status
"""
if mergelines:
lines = []
for i in range(len(l)):
if l[i].startswith('\t'):
_e = len(lines) - 1
lines[_e] = lines[_e].strip('\r\t') + \
l[i].strip('\r\n')
else:
lines.append(l[i])
else:
lines = l
head_bs = None
prev_bs = None
prev_attr = None
for l in lines:
l = l.strip()
m = self.pbsobjname_re.match(l)
if m:
bs = batch_status()
bs.name = m.group('name')
bs.attribs = None
bs.next = None
if prev_bs:
prev_bs.next = bs
if head_bs is None:
head_bs = bs
prev_bs = bs
prev_attr = None
else:
m = self.pbsobjattrval_re.match(l)
if m:
attr = attrl()
attr.name = m.group('attribute')
attr.value = m.group('value')
attr.next = None
if bs.attribs is None:
bs.attribs = attr
if prev_attr:
prev_attr.next = attr
prev_attr = attr
return head_bs
def file_to_batch(self, file=None):
"""
Convert a file to batch format
:param file: File to be converted
:type file: str or None
:returns: File converted into batch format
"""
if file is None:
return None
try:
f = open(file, 'r')
l = f.readlines()
f.close()
except:
self.logger.error('error converting file ' + file + ' to batch')
return None
return self.convert_to_batch(l)
def batch_to_file(self, bs=None, file=None):
"""
Write a batch object to file
:param bs: Batch status
:param file: File to which batch object is to be written
:type file: str
"""
if bs is None or file is None:
return
try:
f = open(file, 'w')
self.display_batch_status(bs, writer=f)
f.close()
except:
self.logger.error('error converting batch status to file')
def batch_to_vnodedef(self, bs):
"""
:param bs: Batch status
:returns: The vnode definition string representation
of nodes batch_status
"""
out = ["$configversion 2\n"]
while bs is not None:
attr = bs.attribs
while attr is not None:
if attr.name.startswith("resources_available") or \
attr.name.startswith("sharing"):
out += [bs.name + ": "]
out += [attr.name + "=" + attr.value + "\n"]
attr = attr.next
bs = bs.next
return "".join(out)
def dictlist_to_vnodedef(self, dl=None):
"""
:param dl: Dictionary list
:type dl: List
:returns: The vnode definition string representation
of a dictlist
"""
if dl is None:
return ''
out = ["$configversion 2\n"]
for node in dl:
for k, v in node.items():
if (k.startswith("resources_available") or
k.startswith("sharing") or
k.startswith("provision_enable") or
k.startswith("queue")):
out += [node['id'] + ": "]
# MoM dislikes empty values reported in vnode defs so
# we substitute no value for an actual empty string
if not v:
v = '""'
out += [k + "=" + str(v) + "\n"]
return "".join(out)
def objlist_to_dictlist(self, objlist=None):
"""
Convert a list of PBS/PTL objects ``(e.g. Server/Job...)``
into a dictionary list representation of the batch status
:param objlist: List of ``PBS/PTL`` objects
:type objlist: List
:returns: Dictionary list representation of the batch status
"""
if objlist is None:
return None
bsdlist = []
for obj in objlist:
newobj = self.obj_to_dict(obj)
bsdlist.append(newobj)
return bsdlist
def obj_to_dict(self, obj):
"""
Convert a PBS/PTL object (e.g. Server/Job...) into a
dictionary format
:param obj: ``PBS/PTL`` object
:returns: Dictionary of ``PBS/PTL`` objects
"""
newobj = dict(obj.attributes.items())
newobj[id] = obj.name
return newobj
def parse_execvnode(self, s=None):
"""
Parse an execvnode string into chunk objects
:param s: Execvnode string
:type s: str or None
:returns: Chunk objects for parsed execvnode string
"""
if s is None:
return None
chunks = []
start = 0
for c in range(len(s)):
if s[c] == '(':
start = c + 1
if s[c] == ')':
chunks.append(PbsTypeChunk(chunkstr=s[start:c]).info)
return chunks
def anupbs_exechost_numhosts(self, s=None):
"""
:param s: Exechost string
:type s: str or None
"""
n = 0
if '[' in s:
eh = re.sub(r'.*\[(.*)\].*', r'\1', s)
hosts = eh.split(',')
for hid in hosts:
elm = hid.split('-')
if len(elm) == 2:
n += int(elm[1]) - int(elm[0]) + 1
else:
n += 1
else:
n += 1
return n
def parse_exechost(self, s=None):
"""
Parse an exechost string into a dictionary representation
:param s: String to be parsed
:type s: str or None
:returns: Dictionary format of the exechost string
"""
if s is None:
return None
hosts = []
hsts = s.split('+')
for h in hsts:
hi = {}
ti = {}
(host, task) = h.split('/',)
d = task.split('*')
if len(d) == 1:
taskslot = d[0]
ncpus = 1
elif len(d) == 2:
(taskslot, ncpus) = d
else:
(taskslot, ncpus) = (0, 1)
ti['task'] = taskslot
ti['ncpus'] = ncpus
hi[host] = ti
hosts.append(hi)
return hosts
def parse_select(self, s=None):
"""
Parse a ``select/schedselect`` string into a list
of dictionaries.
:param s: select/schedselect string
:type s: str or None
:returns: List of dictonaries
"""
if s is None:
return
info = []
chunks = s.split('+')
for chunk in chunks:
d = chunk.split(':')
numchunks = int(d[0])
resources = {}
for e in d[1:]:
k, v = e.split('=')
resources[k] = v
for _ in range(numchunks):
info.append(resources)
return info
@classmethod
def isfloat(cls, value):
"""
returns true if value is a float or a string representation
of a float returns false otherwise
:param value: value to be checked
:type value: str or int or float
:returns: True or False
"""
if isinstance(value, float):
return True
if isinstance(value, str):
try:
float(value)
return True
except ValueError:
return False
@classmethod
def decode_value(cls, value):
"""
Decode an attribute/resource value, if a value is
made up of digits only then return the numeric value
of it, if it is made of alphanumeric values only, return
it as a string, if it is of type size, i.e., with a memory
unit such as b,kb,mb,gb then return the converted size to
kb without the unit
:param value: attribute/resource value
:type value: str or int
:returns: int or float or string
"""
if value is None or callable(value):
return value
if isinstance(value, (int, float)):
return value
if value.isdigit():
return int(value)
if value.isalpha() or value == '':
return value
if cls.isfloat(value):
return float(value)
if ':' in value:
try:
value = int(PbsTypeDuration(value))
except ValueError:
pass
return value
# TODO revisit: assume (this could be the wrong type, need a real
# data model anyway) that the remaining is a memory expression
try:
value = PbsTypeSize(value)
return value.value
except ValueError:
pass
except TypeError:
# if not then we pass to return the value as is
pass
return value
def convert_time(self, val, fmt='%a %b %d %H:%M:%S %Y'):
"""
Convert a date time format into number of seconds
since epoch
:param val: date time value
:param fmt: date time format
:type fmt: str
:returns: seconds
"""
# Tweak for NAS format that puts the number of seconds since epoch
# in between
if val.split()[0].isdigit():
val = int(val.split()[0])
elif not val.isdigit():
val = time.strptime(val, fmt)
val = int(time.mktime(val))
return val
def convert_duration(self, val):
"""
Convert HH:MM:SS into number of seconds
If a number is fed in, that number is returned
If neither formatted data is fed in, returns 0
:param val: duration value
:type val: str
:raises: Incorrect format error
:returns: seconds
"""
if val.isdigit():
return int(val)
hhmmss = val.split(':')
if len(hhmmss) != 3:
self.logger.error('Incorrect format, expected HH:MM:SS')
return 0
return int(hhmmss[0]) * 3600 + int(hhmmss[1]) * 60 + int(hhmmss[2])
def convert_seconds_to_resvtime(self, tm, fmt=None, seconds=True):
"""
Convert time format to number of seconds since epoch
:param tm: the time to convert
:type tm: str
:param fmt: optional format string. If used, the seconds
parameter is ignored.Defaults to ``%Y%m%d%H%M``
:type fmt: str or None
:param seconds: if True, convert time with seconds
granularity. Defaults to True.
:type seconds: bool
:returns: Number of seconds
"""
if fmt is None:
fmt = "%Y%m%d%H%M"
if seconds:
fmt += ".%S"
return time.strftime(fmt, time.localtime(int(tm)))
def convert_stime_to_seconds(self, st):
"""
Convert a time to seconds, if we fail we return the
original time
:param st: Time to be converted
:type st: str
:returns: Number of seconds
"""
try:
ret = time.mktime(time.strptime(st, '%a %b %d %H:%M:%S %Y'))
except:
ret = st
return ret
def convert_dedtime(self, dtime):
"""
Convert dedicated time string of form %m/%d/%Y %H:%M.
:param dtime: A datetime string, as an entry in the
dedicated_time file
:type dtime: str
:returns: A tuple of (from,to) of time since epoch
"""
dtime_from = None
dtime_to = None
m = self.dt_tag.match(dtime.strip())
if m:
try:
_f = "%m/%d/%Y %H:%M"
dtime_from = self.convert_datetime_to_epoch(m.group('dt_from'),
fmt=_f)
dtime_to = self.convert_datetime_to_epoch(m.group('dt_to'),
fmt=_f)
except:
self.logger.error('error converting dedicated time')
return (dtime_from, dtime_to)
def convert_datetime_to_epoch(self, mdyhms, fmt="%m/%d/%Y %H:%M:%S"):
"""
Convert the date time to epoch
:param mdyhms: date time
:type mdyhms: str
:param fmt: Format for date time
:type fmt: str
:returns: Epoch time
"""
return int(time.mktime(time.strptime(mdyhms, fmt)))
def compare_versions(self, v1, v2, op=None):
"""
Compare v1 to v2 with respect to operation op
:param v1: If not a looseversion, it gets converted
to it
:param v2: If not a looseversion, it gets converted
to it
:param op: An operation, one of ``LT``, ``LE``, ``EQ``,
``GE``, ``GT``
:type op: str
:returns: True or False
"""
if op is None:
self.logger.error('missing operator, one of LT,LE,EQ,GE,GT')
return None
if v1 is None or v2 is None:
return False
if isinstance(v1, str):
v1 = LooseVersion(v1)
if isinstance(v2, str):
v2 = LooseVersion(v2)
if op == GT:
if v1 > v2:
return True
elif op == GE:
if v1 >= v2:
return True
elif op == EQ:
if v1 == v2:
return True
elif op == LT:
if v1 < v2:
return True
elif op == LE:
if v1 <= v2:
return True
return False
def convert_arglist(self, attr):
"""
strip the XML attributes from the argument list attribute
:param attr: Argument list attributes
:type attr: List
:returns: Stripped XML attributes
"""
xmls = "<jsdl-hpcpa:Argument>"
xmle = "</jsdl-hpcpa:Argument>"
nattr = attr.replace(xmls, " ")
nattr = nattr.replace(xmle, " ")
return nattr.strip()
def convert_to_cli(self, attrs, op=None, hostname=None, dflt_conf=True,
exclude_attrs=None):
"""
Convert attributes into their CLI format counterpart. This
method is far from complete, it grows as needs come by and
could use a rewrite, especially going along with a rewrite
of pbs_api_to_cli
:param attrs: Attributes to convert
:type attrs: List or str or dictionary
:param op: The qualifier of the operation being performed,
such as ``IFL_SUBMIT``, ``IFL_DELETE``,
``IFL_TERMINUTE``...
:type op: str or None
:param hostname: The name of the host on which to operate
:type hostname: str or None
:param dflt_conf: Whether we are using the default PBS
configuration
:type dflt_conf: bool
:param exclude_attrs: Optional list of attributes to not
convert
:type exclude_attrs: List
:returns: CLI format of attributes
"""
ret = []
if op == IFL_SUBMIT:
executable = arglist = None
elif op == IFL_DELETE:
_c = []
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for a in attrs:
if 'force' in a:
_c.append('-W')
_c.append('force')
if 'deletehist' in a:
_c.append('-x')
return _c
elif op == IFL_TERMINATE:
_c = []
if attrs is None:
_c = []
elif isinstance(attrs, str):
_c = ['-t', attrs]
else:
if ((attrs & SHUT_QUICK) == SHUT_QUICK):
_c = ['-t', 'quick']
if ((attrs & SHUT_IMMEDIATE) == SHUT_IMMEDIATE):
_c = ['-t', 'immediate']
if ((attrs & SHUT_DELAY) == SHUT_DELAY):
_c = ['-t', 'delay']
if ((attrs & SHUT_WHO_SCHED) == SHUT_WHO_SCHED):
_c.append('-s')
if ((attrs & SHUT_WHO_MOM) == SHUT_WHO_MOM):
_c.append('-m')
if ((attrs & SHUT_WHO_SECDRY) == SHUT_WHO_SECDRY):
_c.append('-f')
if ((attrs & SHUT_WHO_IDLESECDRY) == SHUT_WHO_IDLESECDRY):
_c.append('-F')
if ((attrs & SHUT_WHO_SECDONLY) == SHUT_WHO_SECDONLY):
_c.append('-i')
return _c
if attrs is None or len(attrs) == 0:
return ret
# if a list, convert to a dictionary to fall into a single processing
# of the attributes
if (isinstance(attrs, list) and len(attrs) > 0 and
not isinstance(attrs[0], tuple)):
tmp_attrs = {}
for each_attr in attrs:
tmp_attrs[each_attr] = ''
del attrs
attrs = tmp_attrs
del tmp_attrs
if isinstance(attrs, (dict, OrderedDict)):
attrs = attrs.items()
for a, v in attrs:
if exclude_attrs is not None and a in exclude_attrs:
continue
if op == IFL_SUBMIT:
if a == ATTR_executable:
executable = v
continue
if a == ATTR_Arglist:
if v is not None:
arglist = self.convert_arglist(v)
if len(arglist) == 0:
return []
continue
if isinstance(v, list):
v = ','.join(v)
# when issuing remote commands, escape spaces in attribute values
if (((hostname is not None) and
(not self.du.is_localhost(hostname))) or
(not dflt_conf)):
if ' ' in str(v):
v = '"' + v + '"'
if '.' in a:
(attribute, resource) = a.split('.')
ret.append('-' + api_to_cli[attribute])
rv = resource
if v is not None:
rv += '=' + str(v)
ret.append(rv)
else:
try:
val = api_to_cli[a]
except KeyError:
self.logger.error('error retrieving key ' + str(a))
# for unknown or junk options
ret.append(a)
if v is not None:
ret.append(str(v))
continue
# on a remote job submit append the remote server name
# to the queue name
if ((op == IFL_SUBMIT) and (hostname is not None)):
if ((not self.du.is_localhost(hostname)) and
(val == 'q') and (v is not None) and
('@' not in v) and (v != '')):
v += '@' + hostname
val = '-' + val
if '=' in val:
if v is not None:
ret.append(val + str(v))
else:
ret.append(val)
else:
ret.append(val)
if v is not None:
ret.append(str(v))
# Executable and argument list must come last in a job submission
if ((op == IFL_SUBMIT) and (executable is not None)):
ret.append('--')
ret.append(executable)
if arglist is not None:
ret.append(arglist)
return ret
def filter_batch_status(self, bs, attrib):
"""
Filter out elements that don't have the attributes requested
This is needed to adapt to the fact that requesting a
resource attribute returns all ``'<resource-name>.*'``
attributes so we need to ensure that the specific resource
requested is present in the stat'ed object.
This is needed especially when calling expect with an op=NE
because we need to filter on objects that have exactly
the attributes requested
:param bs: Batch status
:param attrib: Requested attributes
:type attrib: str or dictionary
:returns: Filtered batch status
"""
if isinstance(attrib, dict):
keys = attrib.keys()
elif isinstance(attrib, str):
keys = attrib.split(',')
else:
keys = attrib
if keys:
del_indices = []
for idx in range(len(bs)):
for k in bs[idx].keys():
if '.' not in k:
continue
if k != 'id' and k not in keys:
del bs[idx][k]
# if no matching resources, remove the object
if len(bs[idx]) == 1:
del_indices.append(idx)
for i in sorted(del_indices, reverse=True):
del bs[i]
return bs
def convert_attributes_by_op(self, attributes, setattrs=False):
"""
Convert attributes by operator, i.e. convert an attribute
of the form
``<attr_name><op><value>`` (e.g. resources_available.ncpus>4)
to
``<attr_name>: (<op>, <value>)``
(e.g. resources_available.ncpus: (GT, 4))
:param attributes: the attributes to convert
:type attributes: List
:param setattrs: if True, set the attributes with no operator
as (SET, '')
:type setattrs: bool
:returns: Converted attributes by operator
"""
# the order of operator matters because they are used to search by
# regex so the longer strings to search must come first
operators = ('<=', '>=', '!=', '=', '>', '<', '~')
d = {}
for attr in attributes:
found = False
for op in operators:
if op in attr:
a = attr.split(op)
d[a[0]] = (PTL_STR_TO_OP[op], a[1])
found = True
break
if not found and setattrs:
d[attr] = (SET, '')
return d
def operator_in_attribute(self, attrib):
"""
Returns True if an operator string is present in an
attribute name
:param attrib: Attribute name
:type attrib: str
:returns: True or False
"""
operators = PTL_STR_TO_OP.keys()
for a in attrib:
for op in operators:
if op in a:
return True
return False
def list_resources(self, objtype=None, objs=[]):
"""
Lists the resources
:param objtype: Type of the object
:type objtype: str
:param objs: Object list
:type objs: List
:returns: List of resources
"""
if objtype in (VNODE, NODE, SERVER, QUEUE, SCHED):
prefix = 'resources_available.'
elif objtype in (JOB, RESV):
prefix = 'Resource_List.'
else:
return
resources = []
for o in objs:
for a in o.keys():
if a.startswith(prefix):
res = a.replace(prefix, '')
if res not in resources:
resources.append(res)
return resources
def compare(self, obj1, obj2, showdiff=False):
"""
Compare two objects.
:param showdiff: whether to print the specific differences,
defaults to False
:type showdiff: bool
:returns: 0 if objects are identical and non zero otherwise
"""
if not showdiff:
ret = cmp(obj1, obj2)
if ret != 0:
self.logger.info('objects differ')
return ret
if not isinstance(obj1, type(obj2)):
self.logger.error('objects are of different type')
return 1
if isinstance(obj1, list):
if len(obj1) != len(obj2):
self.logger.info(
'comparing ' + str(
obj1) + ' and ' + str(
obj2))
self.logger.info('objects are of different lengths')
return
for i in range(len(obj1)):
self.compare(obj1[i], obj2[i], showdiff=showdiff)
return
if isinstance(obj1, dict):
self.logger.info('comparing ' + str(obj1) + ' and ' + str(obj2))
onlyobj1 = []
diffobjs = []
onlyobj2 = []
for k1, v1 in obj1.items():
if k1 not in obj2:
onlyobj1.append(k1 + '=' + str(v1))
if k1 in obj2 and obj2[k1] != v1:
diffobjs.append(
k1 + '=' + str(v1) + ' vs ' + k1 + '=' + str(obj2[k1]))
for k2, v2 in obj2.items():
if k2 not in obj1:
onlyobj2.append(k2 + '=' + str(v2))
if len(onlyobj1) > 0:
self.logger.info("only in first object: " + " ".join(onlyobj1))
if len(onlyobj2) > 0:
self.logger.info(
"only in second object: " + " ".join(onlyobj2))
if len(diffobjs) > 0:
self.logger.info("diff between objects: " + " ".join(diffobjs))
if len(onlyobj1) == len(onlyobj2) == len(diffobjs) == 0:
self.logger.info("objects are identical")
return 0
return 1
@classmethod
def random_str(cls, length=1, prefix=''):
"""
Generates the random string
:param length: Length of the string
:type length: int
:param prefix: Prefix of the string
:type prefix: str
:returns: Random string
"""
r = [random.choice(string.letters) for _ in range(length)]
r = ''.join([prefix] + r)
if hasattr(cls, '__uniq_rstr'):
while r in cls.__uniq_rstr:
r = [random.choice(string.letters) for _ in range(length)]
r = ''.join([prefix] + r)
cls.__uniq_rstr.append(r)
else:
cls.__uniq_rstr = [r]
return r
def _make_template_formula(self, formula):
"""
Create a template of the formula
:param formula: Formula for which template is to be created
:type formula: str
:returns: Template
"""
tformula = []
skip = False
for c in formula:
if not skip and c.isalpha():
tformula.append('$')
skip = True
if c in ('+', '-', '/', ' ', '*', '%'):
skip = False
tformula.append(c)
return "".join(tformula)
def update_attributes_list(self, obj):
"""
Updates the attribute list
:param obj: Objects
:returns: Updated attribute list
"""
if not hasattr(obj, 'attributes'):
return
if not hasattr(obj, 'Resource_List'):
setattr(obj, 'Resource_List', {})
for attr, val in obj.attributes.items():
if attr.startswith('Resource_List.'):
(_, resource) = attr.split('.')
obj.Resource_List[resource] = val
def parse_fgc_limit(self, limstr=None):
"""
Parse an ``FGC`` limit entry, of the form:
``<limtype>[.<resource>]=\[<entity_type>:<entity_name>
=<entity_value>\]``
:param limstr: FGC limit string
:type limstr: str or None
:returns: Parsed FGC string in given format
"""
m = self.lim_tag.match(limstr)
if m:
_v = str(self.decode_value(m.group('entity_value')))
return (m.group('limtype'), m.group('resource'),
m.group('entity_type'), m.group('entity_name'), _v)
return None
def is_job_array(self, jobid):
"""
If a job array return True, otherwise return False
:param jobid: PBS jobid
:returns: True or False
"""
if self.array_tag.match(jobid):
return True
return False
def is_subjob(self, jobid):
"""
If a subjob of a job array, return the subjob id
otherwise return False
:param jobid: PBS job id
:type jobid: str
:returns: True or False
"""
m = self.subjob_tag.match(jobid)
if m:
return m.group('subjobid')
return False
class PbsTypeFGCLimit(object):
"""
FGC limit entry, of the form:
``<limtype>[.<resource>]=\[<entity_type>:<entity_name>=
<entity_value>\]``
:param attr: FGC limit attribute
:type attr: str
:param value: Value of attribute
:type value: int
:returns: FGC limit entry of given format
"""
fgc_attr_pat = re.compile("(?P<ltype>[a-z_]+)[\.]*(?P<resource>[\w\d-]*)")
fgc_val_pat = re.compile("[\s]*\[(?P<etype>[ugpo]):(?P<ename>[\w\d-]+)"
"=(?P<eval>[\d]+)\][\s]*")
utils = BatchUtils()
def __init__(self, attr, val):
self.attr = attr
self.val = val
a = self.fgc_attr_pat.match(attr)
if a:
self.limit_type = a.group('ltype')
self.resource_name = a.group('resource')
else:
self.limit_type = None
self.resource_name = None
v = self.fgc_val_pat.match(val)
if v:
self.lim_value = self.utils.decode_value(v.group('eval'))
self.entity_type = v.group('etype')
self.entity_name = v.group('ename')
else:
self.lim_value = None
self.entity_type = None
self.entity_name = None
def __val__(self):
return ('[' + str(self.entity_type) + ':' +
str(self.entity_name) + '=' + str(self.lim_value) + ']')
def __str__(self):
return (self.attr + ' = ' + self.__val__())
class PbsBatchStatus(list):
"""
Wrapper class for Batch Status object
Converts a batch status (as dictlist) into a list of
PbsBatchObjects
:param bs: Batch status
:type bs: List or dictionary
:returns: List of PBS batch objects
"""
def __init__(self, bs):
if not isinstance(bs, (list, dict)):
raise TypeError("Expected a list or dictionary")
if isinstance(bs, dict):
self.append(PbsBatchObject(bs))
else:
for b in bs:
self.append(PbsBatchObject(b))
def __str__(self):
rv = []
for l in self.__bs:
rv += [self.__bu.batch_status_as_dict_to_str(l)]
return "\n".join(rv)
class PbsBatchObject(list):
def __init__(self, bs):
self.set_batch_status(bs)
def set_batch_status(self, bs):
"""
Sets the batch status
:param bs: Batch status
"""
if 'id' in bs:
self.name = bs['id']
for k, v in bs.items():
self.append(PbsAttribute(k, v))
class PbsAttribute(object):
"""
Descriptor class for PBS attribute
:param name: PBS attribute name
:type name: str
:param value: Value for the attribute
:type value: str or int or float
"""
utils = BatchUtils()
def __init__(self, name=None, value=None):
self.set_name(name)
self.set_value(value)
def set_name(self, name):
"""
Set PBS attribute name
:param name: PBS attribute
:type name: str
"""
self.name = name
if name is not None and '.' in name:
self.is_resource = True
self.resource_type, self.resource_name = self.name.split('.')
else:
self.is_resource = False
self.resource_type = self.resource_name = None
def set_value(self, value):
"""
Set PBS attribute value
:param value: Value of PBS attribute
:type value: str or int or float
"""
self.value = value
if isinstance(value, (int, float)) or str(value).isdigit():
self.is_consumable = True
else:
self.is_consumable = False
def obfuscate_name(self, a=None):
"""
Obfuscate PBS attribute name
"""
if a is not None:
on = a
else:
on = self.utils.random_str(len(self.name))
self.decoded_name = self.name
if self.is_resource:
self.set_name(self.resource_name + '.' + on)
def obfuscate_value(self, v=None):
"""
Obfuscate PBS attribute value
"""
if not self.is_consuable:
self.decoded_value = self.value
return
if v is not None:
ov = v
else:
ov = self.utils.random_str(len(self.value))
self.decoded_value = self.value
self.set_value(ov)
class PbsAnonymizer(object):
"""
Holds and controls anonymizing operations of PBS data
When a dictionary, the values associated to each key
is substituted during obfuscation.
The anonymizer operates on attributes or resources.
Resources operate on the resource name itself rather than
the entire name, for example,to obfuscate the values associated
to a custom resource "foo" that could be set as resources_available.
foo resources_default.foo or Resource_List.foo, all that needs to be
passed in to the function is "foo" in the resc_vals list.
:param attr_key: Attribute key
:type attr_key: str or None
:param attr_val: Attribute value
:type attr_val: str or None
:param resc_key: Resource key
:type resc_key: str or None
:param resc_val: Resource value
:type resc_val: str or None
"""
logger = logging.getLogger(__name__)
utils = BatchUtils()
du = DshUtils()
def __init__(self, attr_delete=None, resc_delete=None,
attr_key=None, attr_val=None,
resc_key=None, resc_val=None):
# special cases
self._entity = False
self.job_sort_formula = None
self.schedselect = None
self.select = None
self.set_attr_delete(attr_delete)
self.set_resc_delete(resc_delete)
self.set_attr_key(attr_key)
self.set_attr_val(attr_val)
self.set_resc_key(resc_key)
self.set_resc_val(resc_val)
self.anonymize = self.anonymize_batch_status
# global anonymized mapping data
self.gmap_attr_val = {}
self.gmap_resc_val = {}
self.gmap_attr_key = {}
self.gmap_resc_key = {}
def _initialize_key_map(self, keys):
k = {}
if keys is not None:
if isinstance(keys, dict):
return keys
elif isinstance(keys, list):
for i in keys:
k[i] = None
elif isinstance(keys, str):
for i in keys.split(','):
k[i] = None
else:
self.logger.error('unhandled map type')
k = {None: None}
return k
def _initialize_value_map(self, keys):
k = {}
if keys is not None:
if isinstance(keys, dict):
return keys
elif isinstance(keys, list):
for i in keys:
k[i] = {}
elif isinstance(keys, str):
for i in keys.split(','):
k[i] = {}
else:
self.logger.error('unhandled map type')
k = {None: None}
return k
def set_attr_delete(self, ad):
"""
Name of attributes to delete
:param ad: Attributes to delete
:type ad: str or list or dictionary
"""
self.attr_delete = self._initialize_value_map(ad)
def set_resc_delete(self, rd):
"""
Name of resources to delete
:param rd: Resources to delete
:type rd: str or list or dictionary
"""
self.resc_delete = self._initialize_value_map(rd)
def set_attr_key(self, ak):
"""
Name of attributes to obfuscate.
:param ak: Attribute keys
:type ak: str or list or dictionary
"""
self.attr_key = self._initialize_key_map(ak)
def set_attr_val(self, av):
"""
Name of attributes for which to obfuscate the value
:param av: Attributes value to obfuscate
:type av: str or list or dictionary
"""
self.attr_val = self._initialize_value_map(av)
if 'euser' in self.attr_val:
self._entity = True
elif 'egroup' in self.attr_val:
self._entity = True
elif 'project' in self.attr_val:
self._entity = True
def set_resc_key(self, rk):
"""
Name of resources to obfuscate
:param rk: Resource key
:type rk: str or list or dictionary
"""
self.resc_key = self._initialize_key_map(rk)
def set_resc_val(self, rv):
"""
Name of resources for which to obfuscate the value
:param rv: Resource value to obfuscate
:type rv: str or list or dictionary
"""
self.resc_val = self._initialize_value_map(rv)
def set_anon_map_file(self, name):
"""
Name of file in which to store anonymized map data.
This file is meant to remain private to a site as it
contains the sensitive anonymized data.
:param name: Name of file to which anonymized data to store.
:type name: str
"""
self.anon_map_file = name
def anonymize_resource_group(self, file):
"""
Anonymize the user and group fields of a resource
group file
:param file: Resource group file
:type file: str
"""
anon_rg = []
try:
f = open(file)
lines = f.readlines()
f.close()
except:
self.logger.error("Error processing " + file)
return None
for data in lines:
data = data.strip()
if data:
if data[0] == '#':
continue
_d = data.split()
ug = _d[0]
if ':' in ug:
(euser, egroup) = ug.split(':')
else:
euser = ug
egroup = None
if 'euser' not in self.attr_val:
anon_euser = euser
else:
anon_euser = None
if 'euser' in self.gmap_attr_val:
if euser in self.gmap_attr_val['euser']:
anon_euser = self.gmap_attr_val['euser'][euser]
else:
self.gmap_attr_val['euser'] = {}
if euser is not None and anon_euser is None:
anon_euser = self.utils.random_str(len(euser))
self.gmap_attr_val['euser'][euser] = anon_euser
if 'egroup' not in self.attr_val:
anon_egroup = egroup
else:
anon_egroup = None
if egroup is not None:
if 'egroup' in self.gmap_attr_val:
if egroup in self.gmap_attr_val['egroup']:
anon_egroup = (self.gmap_attr_val['egroup']
[egroup])
else:
self.gmap_attr_val['egroup'] = {}
if egroup is not None and anon_egroup is None:
anon_egroup = self.utils.random_str(len(egroup))
self.gmap_attr_val['egroup'][egroup] = anon_egroup
# reconstruct the fairshare info by combining euser and egroup
out = [anon_euser]
if anon_egroup is not None:
out[0] += ':' + anon_egroup
# and appending the rest of the original line
out.append(_d[1])
if len(_d) > 1:
p = _d[2].strip()
if ('euser' in self.gmap_attr_val and
p in self.gmap_attr_val['euser']):
out.append(self.gmap_attr_val['euser'][p])
else:
out.append(_d[2])
if len(_d) > 2:
out += _d[3:]
anon_rg.append(" ".join(out))
return anon_rg
def anonymize_resource_def(self, resources):
"""
Anonymize the resource definition
"""
if not self.resc_key:
return resources
for curr_anon_resc, val in self.resc_key.items():
if curr_anon_resc in resources:
tmp_resc = copy.copy(resources[curr_anon_resc])
del resources[curr_anon_resc]
if val is None:
if curr_anon_resc in self.gmap_resc_key:
val = self.gmap_resc_key[curr_anon_resc]
else:
val = self.utils.random_str(len(curr_anon_resc))
elif curr_anon_resc not in self.gmap_resc_key:
self.gmap_resc_key[curr_anon_resc] = val
tmp_resc.set_name(val)
resources[val] = tmp_resc
return resources
def __anonymize_fgc(self, d, attr, ar, name, val):
"""
Anonymize an FGC limit value
"""
m = {'u': 'euser', 'g': 'egroup', 'p': 'project'}
if ',' in val:
fgc_lim = val.split(',')
else:
fgc_lim = [val]
nfgc = []
for lim in fgc_lim:
_fgc = PbsTypeFGCLimit(attr, lim)
ename = _fgc.entity_name
if ename in ('PBS_GENERIC', 'PBS_ALL'):
nfgc.append(lim)
continue
obf_ename = ename
for etype, nm in m.items():
if _fgc.entity_type == etype:
if nm not in self.gmap_attr_val:
if nm in ar and ename in ar[nm]:
obf_ename = ar[nm][ename]
else:
obf_ename = self.utils.random_str(len(ename))
self.gmap_attr_val[nm] = {ename: obf_ename}
elif ename in self.gmap_attr_val[nm]:
if ename in self.gmap_attr_val[nm]:
obf_ename = self.gmap_attr_val[nm][ename]
break
_fgc.entity_name = obf_ename
nfgc.append(_fgc.__val__())
d[attr] = ",".join(nfgc)
def __anonymize_attr_val(self, d, attr, ar, name, val):
"""
Obfuscate an attribute/resource values
"""
# don't obfuscate default project
if attr == 'project' and val == '_pbs_project_default':
return
nstr = []
if '.' in attr:
m = self.gmap_resc_val
else:
m = self.gmap_attr_val
if val in ar[name]:
nstr.append(ar[name][val])
if name in self.lmap:
self.lmap[name][val] = ar[name][val]
else:
self.lmap[name] = {val: ar[name][val]}
if name not in m:
m[name] = {val: ar[name][val]}
elif val not in m[name]:
m[name][val] = ar[name][val]
else:
# Obfuscate by randomizing with a value of the same length
tmp_v = val.split(',')
for v in tmp_v:
if v in ar[name]:
r = ar[name][v]
elif name in m and v in m[name]:
r = m[name][v]
else:
r = self.utils.random_str(len(v))
if not isinstance(ar[name], dict):
ar[name] = {}
ar[name][v] = r
self.lmap[name] = {v: r}
if name not in m:
m[name] = {v: r}
elif v not in m[name]:
m[name][v] = r
nstr.append(r)
if d is not None:
d[attr] = ",".join(nstr)
def __anonymize_attr_key(self, d, attr, ar, name, res):
"""
Obfuscate an attribute/resource key
"""
if res is not None:
m = self.gmap_resc_key
else:
m = self.gmap_attr_key
if not ar[name]:
if name in m:
ar[name] = m[name]
else:
randstr = self.utils.random_str(len(name))
ar[name] = randstr
m[name] = randstr
if d is not None:
tmp_val = d[attr]
del d[attr]
if res is not None:
d[res + '.' + ar[name]] = tmp_val
else:
d[ar[name]] = tmp_val
if name not in self.lmap:
self.lmap[name] = ar[name]
if name not in m:
m[name] = ar[name]
def anonymize_batch_status(self, data=None):
"""
Anonymize arbitrary batch_status data
:param data: Batch status data
:type data: List or dictionary
"""
if not isinstance(data, (list, dict)):
self.logger.error('data expected to be dict or list')
return None
if isinstance(data, dict):
dat = [data]
else:
dat = data
# Local mapping data used to store obfuscation mapping data for this
# specific item, d
self.lmap = {}
# loop over each "batch_status" entry to obfuscate
for d in dat:
if self.attr_delete is not None:
for todel in self.attr_delete:
if todel in d:
del d[todel]
if self.resc_delete is not None:
for todel in self.resc_delete:
for tmpk, _ in d.items():
if '.' in tmpk and todel == tmpk.split('.')[1]:
del d[tmpk]
# Loop over each object's attributes, this is where the special
# cases are handled (e.g., FGC limits, formula, select spec...)
for attr in d:
val = d[attr]
if '.' in attr:
(res_type, res_name) = attr.split('.')
else:
res_type = None
res_name = attr
if res_type is not None:
if self._entity and attr.startswith('max_run'):
self.__anonymize_fgc(d, attr, self.attr_val,
attr, val)
if res_name in self.resc_val:
if attr.startswith('max_run'):
self.__anonymize_fgc(d, attr, self.attr_val,
attr, val)
self.__anonymize_attr_val(d, attr, self.resc_val,
res_name, val)
if res_name in self.resc_key:
self.__anonymize_attr_key(d, attr, self.resc_key,
res_name, res_type)
else:
if attr in self.attr_val:
self.__anonymize_attr_val(d, attr, self.attr_val,
attr, val)
if attr in self.attr_key:
self.__anonymize_attr_key(d, attr, self.attr_key,
attr, None)
if ((attr in ('job_sort_formula', 'schedselect',
'select')) and self.resc_key):
for r in self.resc_key:
if r in val:
if r not in self.gmap_resc_key:
self.gmap_resc_key[
r] = self.utils.random_str(len(r))
val = val.replace(r, self.gmap_resc_key[r])
setattr(self, attr, val)
d[attr] = val
def anonymize_file(self, filename, extension='.anon', inplace=False):
"""
Replace every occurrence of any entry in the global
map for the given file by its anonymized values.
Returns a file named after the original file with the
extension suffix,If inplace is True returns the original
file name for which contents have been replaced
:param filename: Name of the file to anonymize
:type filename: str
:param extension: Extension of the anonymized file
:type extension: str
:param inplace: If true returns the original file name for
which contents have been replaced
:type inplace: bool
"""
if not inplace:
fn = (filename + extension)
nf = open(fn, 'w')
else:
(_, fn) = self.du.mkstemp()
nf = open(fn, "w")
f = open(filename)
for data in f:
for k in self.attr_key:
if k in data:
if k not in self.gmap_attr_key:
ak = self.utils.random_str(len(k))
self.gmap_attr_key[k] = ak
else:
k = self.gmap_attr_key[k]
data = data.replace(k, self.gmap_attr_key[k])
for k in self.resc_key:
if k not in self.gmap_resc_key:
rk = self.utils.random_str(len(k))
self.gmap_resc_key[k] = rk
else:
rk = self.gmap_resc_key[k]
data = data.replace(k, self.gmap_resc_key[k])
for ak, av in self.gmap_attr_val.items():
for k, v in av.items():
data = data.replace(k, v)
for ak, av in self.gmap_resc_val.items():
for k, v in av.items():
data = data.replace(k, v)
nf.write(data)
nf.close()
f.close()
if inplace:
self.du.run_cmd(cmd=['mv', fn, filename])
return filename
return fn
def anonymize_accounting_log(self, logfile):
"""
Anonymize the accounting log
:param logfile: Acconting log file
:type logfile: str
"""
try:
f = open(logfile)
except:
self.logger.error("Error processing " + logfile)
return None
if 'euser' in self.attr_val:
self.attr_val['user'] = self.attr_val['euser']
self.attr_val['requestor'] = self.attr_val['euser']
if 'egroup' in self.attr_val:
self.attr_val['group'] = self.attr_val['egroup']
if 'euser' in self.gmap_attr_val:
self.gmap_attr_val['user'] = self.gmap_attr_val['euser']
if 'egroup' in self.gmap_attr_val:
self.gmap_attr_val['group'] = self.gmap_attr_val['egroup']
anon_data = []
for data in f:
# accounting log format is
# %Y/%m/%d %H:%M:%S;<Key>;<Id>;<key1=val1> <key2=val2> ...
curr = data.split(';', 3)
if curr[1] in ('A', 'L'):
anon_data.append(data.strip())
continue
buf = curr[3].strip().split(' ')
# Split the attribute list into key value pairs
kvl = map(lambda n: n.split('=', 1), buf)
for i in range(len(kvl)):
k, v = kvl[i]
if k == 'requestor':
if '@' in v:
(v, host) = v.split('@', 1)
if k in self.attr_val:
if k == 'project' and v == '_pbs_project_default':
continue
anon_kv = None
if k in self.gmap_attr_val:
if v in self.gmap_attr_val[k]:
anon_kv = self.gmap_attr_val[k][v]
else:
self.gmap_attr_val[k] = {}
if anon_kv is None:
anon_kv = self.utils.random_str(len(v))
self.gmap_attr_val[k][v] = anon_kv
kvl[i][1] = anon_kv
# append server from where request was made
if k == 'requestor':
kvl[i][1] += '@' + host
if k in self.attr_key:
if k in self.gmap_attr_key:
anon_ak = self.gmap_resc_key[k]
else:
anon_ak = self.utils.random_str(len(k))
self.gmap_attr_key[k] = anon_ak
kvl[i][0] = anon_ak
if '.' in k:
restype, resname = k.split('.')
for rv in self.resc_val:
if resname == rv:
anon_rv = None
if resname in self.gmap_resc_val:
if v in self.gmap_resc_val[resname]:
anon_rv = self.gmap_resc_val[resname][v]
else:
self.gmap_resc_val[resname] = {}
if anon_rv is None:
anon_rv = self.utils.random_str(len(v))
self.gmap_resc_val[resname][v] = anon_rv
kvl[i][1] = anon_rv
if resname in self.resc_key:
if resname in self.gmap_resc_key:
anon_rk = self.gmap_resc_key[resname]
else:
anon_rk = self.utils.random_str(len(resname))
self.gmap_resc_key[resname] = anon_rk
kvl[i][0] = restype + '.' + anon_rk
anon_data.append(";".join(curr[:3]) + ";" +
" ".join(map(lambda n: "=".join(n), kvl)))
f.close()
return anon_data
def anonymize_sched_config(self, scheduler):
"""
Anonymize the scheduler config
:param scheduler: PBS scheduler object
"""
if len(self.resc_key) == 0:
return
# when anonymizing we get rid of the comments as they may contain
# sensitive information
scheduler._sched_config_comments = {}
# If resources need to be anonymized then update the resources line
# job_sort_key and node_sort_key
sr = scheduler.get_resources()
if sr:
for i in range(0, len(sr)):
if sr[i] in self.resc_key:
if sr[i] in self.gmap_resc_key:
sr[i] = self.gmap_resc_key[sr[i]]
else:
anon_res = self.utils.random_str(len(sr[i]))
self.gmap_resc_key[sr[i]] = anon_res
sr[i] = anon_res
scheduler.sched_config['resources'] = ",".join(sr)
for k in ['job_sort_key', 'node_sort_key']:
if k in scheduler.sched_config:
sc_jsk = scheduler.sched_config[k]
if not isinstance(sc_jsk, list):
sc_jsk = list(sc_jsk)
for r in self.resc_key:
for i in range(len(sc_jsk)):
if r in sc_jsk[i]:
sc_jsk[i] = sc_jsk[i].replace(r, self.resc_key[r])
def __str__(self):
return ("Attributes Values: " + str(self.gmap_attr_val) + "\n" +
"Resources Values: " + str(self.gmap_resc_val) + "\n" +
"Attributes Keys: " + str(self.gmap_attr_key) + "\n" +
"Resources Keys: " + str(self.gmap_resc_key))
class Entity(object):
"""
Abstract representation of a PBS consumer that has an
external relationship to the PBS system. For example, a
user associated to an OS identifier (uid) maps to a PBS
user entity.
Entities may be subject to policies, such as limits, consume
a certain amount of resource and/or fairshare usage.
:param etype: Entity type
:type etype: str or None
:param name: Entity name
:type name: str or None
"""
def __init__(self, etype=None, name=None):
self.type = etype
self.name = name
self.limits = []
self.resource_usage = {}
self.fairshare_usage = 0
def set_limit(self, limit=None):
"""
:param limit: Limit to be set
:type limit: str or None
"""
for l in self.limits:
if str(limit) == str(l):
return
self.limits.append(limit)
def set_resource_usage(self, container=None, resource=None, usage=None):
"""
Set the resource type
:param resource: PBS resource
:type resource: str or None
:param usage: Resource usage value
:type usage: str or None
"""
if self.type:
if container in self.resource_usage:
if self.resource_usage[self.type]:
if resource in self.resource_usage[container]:
self.resource_usage[container][resource] += usage
else:
self.resource_usage[container][resource] = usage
else:
self.resource_usage[container] = {resource: usage}
def set_fairshare_usage(self, usage=0):
"""
Set fairshare usage
:param usage: Fairshare usage value
:type usage: int
"""
self.fairshare_usage += usage
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.limits) + ' ' + str(self.resource_usage) + ' ' + \
str(self.fairshare_usage)
class Policy(object):
"""
Abstract PBS policy. Can be one of ``limits``,
``access control``, ``scheduling policy``, etc...this
class does not currently support any operations
"""
def __init__(self):
pass
class Limit(Policy):
"""
Representation of a PBS limit
Limits apply to containers, are of a certain type
(e.g., max_run_res.ncpus) associated to a given resource
(e.g., resource), on a given entity (e.g.,user Bob) and
have a certain value.
:param limit_type: Type of the limit
:type limit_type: str or None
:param resource: PBS resource
:type resource: str or None
:param entity_obj: Entity object
:param value: Limit value
:type value: int
"""
def __init__(self, limit_type=None, resource=None,
entity_obj=None, value=None, container=None,
container_id=None):
self.set_container(container, container_id)
self.soft_limit = False
self.hard_limit = False
self.set_limit_type(limit_type)
self.set_resource(resource)
self.set_value(value)
self.entity = entity_obj
def set_container(self, container, container_id):
"""
Set the container
:param container: Container which is to be set
:type container: str
:param container_id: Container id
"""
self.container = container
self.container_id = container_id
def set_limit_type(self, t):
"""
Set the limit type
:param t: Limit type
:type t: str
"""
self.limit_type = t
if '_soft' in t:
self.soft_limit = True
else:
self.hard_limit = True
def set_resource(self, resource):
"""
Set the resource
:param resource: resource value to set
:type resource: str
"""
self.resource = resource
def set_value(self, value):
"""
Set the resource value
:param value: Resource value
:type value: str
"""
self.value = value
def __eq__(self, value):
if str(self) == str(value):
return True
return False
def __str__(self):
return self.__repr__()
def __repr__(self):
l = [self.container_id, self.limit_type, self.resource, '[',
self.entity.type, ':', self.entity.name, '=', self.value, ']']
return " ".join(l)
class ExpectActions(object):
"""
List of action handlers to run when Server's expect
function does not get the expected result
:param action: Action to run
:type action: str
:param level: Logging level
"""
actions = {}
def __init__(self, action=None, level=logging.INFO):
self.logger = logging.getLogger(__name__)
self.add_action(action, level=level)
def add_action(self, action=None, hostname=None, level=logging.INFO):
"""
Add an action
:param action: Action to add
:param hostname: Machine hostname
:type hostname: str
:param level: Logging level
"""
if action is not None and action.name is not None and\
action.name not in self.actions:
self.actions[action.name] = action
msg = ['expect action: added action ' + action.name]
if hostname:
msg += [' to server ' + hostname]
if level >= logging.INFO:
self.logger.info("".join(msg))
else:
self.logger.debug("".join(msg))
def has_action(self, name):
"""
check whether action exists or not
:param name: Name of action
:type name: str
"""
if name in self.actions:
return True
return False
def get_action(self, name):
"""
Get an action if exists
:param name: Name of action
:type name: str
"""
if name in self.actions:
return self.actions[name]
return None
def list_actions(self, level=logging.INFO):
"""
List an actions
:param level: Logging level
"""
if level >= logging.INFO:
self.logger.info(self.get_all_cations)
else:
self.logger.debug(self.get_all_cations)
def get_all_actions(self):
"""
Get all the action
"""
return self.actions.values()
def get_actions_by_type(self, atype=None):
"""
Get an action by type
:param atype: Action type
:type atype: str
"""
if atype is None:
return None
ret_actions = []
for action in self.actions.values():
if action.type is not None and action.type == atype:
ret_actions.append(action)
return ret_actions
def _control_action(self, action=None, name=None, enable=None):
if action:
action.enabled = False
name = action.name
elif name is not None:
if name == 'ALL':
for a in self.actions:
a.enabled = enable
else:
a = self.get_action(name)
a.enabled = False
else:
return
if enable:
msg = 'enabled'
else:
msg = 'disabled'
self.logger.info('expect action: ' + name + ' ' + msg)
def disable_action(self, action=None, name=None):
"""
Disable an action
"""
self._control_action(action, name, enable=False)
def enable_action(self, action=None, name=None):
"""
Enable an action
"""
self._control_action(action, name, enable=True)
def disable_all_actions(self):
"""
Disable all actions
"""
for a in self.actions.values():
a.enabled = False
def enable_all_actions(self):
"""
Enable all actions
"""
for a in self.actions.values():
a.enabled = True
class ExpectAction(object):
"""
Action function to run when Server's expect function does
not get the expected result
:param atype: Action type
:type atype: str
"""
def __init__(self, name=None, enabled=True, atype=None, action=None,
level=logging.INFO):
self.logger = logging.getLogger(__name__)
self.set_name(name, level=level)
self.set_enabled(enabled)
self.set_type(atype)
self.set_action(action)
def set_name(self, name, level=logging.INFO):
"""
Set the actione name
:param name: Action name
:type name: str
"""
if level >= logging.INFO:
self.logger.info('expect action: created new action ' + name)
else:
self.logger.debug('expect action: created new action ' + name)
self.name = name
def set_enabled(self, enabled):
self.enabled = enabled
def set_type(self, atype):
self.type = atype
def set_action(self, action):
self.action = action
class PbsTypeAttribute(dict):
"""
Experimental. This is a placeholder object that will be used
in the future to map attribute information and circumvent
the error-pron dynamic type detection that is currently done
using ``decode_value()``
"""
def __getitem__(self, name):
return BatchUtils.decode_value(super(PbsTypeAttribute,
self).__getitem__(name))
class PBSObject(object):
"""
Generic PBS Object encapsulating attributes and defaults
:param name: The name associated to the object
:type name: str
:param attrs: Dictionary of attributes to set on object
:type attrs: Dictionary
:param defaults: Dictionary of default attributes. Setting
this will override any other object's default
:type defaults: Dictionary
"""
utils = BatchUtils()
platform = sys.platform
def __init__(self, name, attrs={}, defaults={}):
self.attributes = OrderedDict()
self.name = name
self.dflt_attributes = defaults
self.attropl = None
self.custom_attrs = OrderedDict()
self.ctime = int(time.time())
self.set_attributes(attrs)
def set_attributes(self, a={}):
"""
set attributes and custom attributes on this object.
custom attributes are used when converting attributes
to CLI
:param a: Attribute dictionary
:type a: Dictionary
"""
if isinstance(a, list):
a = OrderedDict(a)
self.attributes = OrderedDict(self.dflt_attributes.items() +
self.attributes.items() + a.items())
self.custom_attrs = OrderedDict(self.custom_attrs.items() +
a.items())
def unset_attributes(self, attrl=[]):
"""
Unset attributes from object's attributes and custom
attributes
:param attrl: Attribute list
:type attrl: List
"""
for attr in attrl:
if attr in self.attributes:
del self.attributes[attr]
if attr in self.custom_attrs:
del self.custom_attrs[attr]
def __str__(self):
"""
Return a string representation of this PBSObject
"""
if self.name is None:
return ""
s = []
if isinstance(self, Job):
s += ["Job Id: " + self.name + "\n"]
elif isinstance(self, Queue):
s += ["Queue: " + self.name + "\n"]
elif isinstance(self, Server):
s += ["Server: " + self.hostname + "\n"]
elif isinstance(self, Reservation):
s += ["Name: " + "\n"]
else:
s += [self.name + "\n"]
for k, v in self.attributes.items():
s += [" " + k + " = " + str(v) + "\n"]
return "".join(s)
def __repr__(self):
return str(self.attributes)
class PBSService(PBSObject):
"""
Generic PBS service object to hold properties of PBS daemons
:param name: The name associated to the object
:type name: str or None
:param attrs: Dictionary of attributes to set on object
:type attrs: Dictionary
:param defaults: Dictionary of default attributes. Setting
this will override any other object's default
:type defaults: Dictionary
:param pbsconf_file: Optional path to the pbs configuration
file
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects (node,server,etc)
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory
(This will overrides diagmap)
:type diag: str or None
"""
du = DshUtils()
pu = ProcUtils()
def __init__(self, name=None, attrs={}, defaults={}, pbsconf_file=None,
diagmap={}, diag=None):
if name is None:
self.hostname = socket.gethostname()
else:
self.hostname = name
if diag:
self.diagmap = self._load_from_diag(diag)
self.has_diag = True
self.diag = diag
elif len(diagmap) > 0:
self.diagmap = diagmap
self.diag = None
self.has_diag = True
else:
self.diagmap = {}
self.diag = None
self.has_diag = False
if not self.has_diag:
try:
self.fqdn = socket.gethostbyaddr(self.hostname)[0]
if self.hostname != self.fqdn:
self.logger.info('FQDN name ' + self.fqdn + ' differs '
'from name provided ' + self.hostname)
self.hostname = self.fqdn
except:
pass
else:
self.fqdn = self.hostname
self.shortname = self.hostname.split('.')[0]
self.logutils = None
self.logfile = None
self.acctlogfile = None
self.pid = None
self.pbs_conf = {}
self.pbs_env = {}
self._is_local = True
self.launcher = None
PBSObject.__init__(self, name, attrs, defaults)
if not self.has_diag:
if not self.du.is_localhost(self.hostname):
self._is_local = False
if pbsconf_file is None and not self.has_diag:
self.pbs_conf_file = self.du.get_pbs_conf_file(name)
else:
self.pbs_conf_file = pbsconf_file
if self.pbs_conf_file == '/etc/pbs.conf':
self.default_pbs_conf = True
elif (('PBS_CONF_FILE' not in os.environ) or
(os.environ['PBS_CONF_FILE'] != self.pbs_conf_file)):
self.default_pbs_conf = False
else:
self.default_pbs_conf = True
# default pbs_server_name to hostname, it will get set again once the
# config file is processed
self.pbs_server_name = self.hostname
# If diag is given then bypass parsing pbs.conf
if self.has_diag:
if diag is None:
t = 'pbs_diag_%s' % (time.strftime("%y%m%d_%H%M%S"))
self.diag = os.path.join(self.du.get_tempdir(), t)
self.pbs_conf['PBS_HOME'] = self.diag
self.pbs_conf['PBS_EXEC'] = self.diag
self.pbs_conf['PBS_SERVER'] = self.hostname
m = re.match('.*pbs_diag_(?P<datetime>\d{6,6}_\d{6,6}).*',
self.diag)
if m:
tm = time.strptime(m.group('datetime'), "%y%m%d_%H%M%S")
self.ctime = int(time.mktime(tm))
else:
self.pbs_conf = self.du.parse_pbs_config(self.hostname,
self.pbs_conf_file)
if self.pbs_conf is None or len(self.pbs_conf) == 0:
self.pbs_conf = {'PBS_HOME': "", 'PBS_EXEC': ""}
else:
ef = os.path.join(self.pbs_conf['PBS_HOME'], 'pbs_environment')
self.pbs_env = self.du.parse_pbs_environment(self.hostname, ef)
self.pbs_server_name = self.du.get_pbs_server_name(
self.pbs_conf)
self.init_logfile_path(self.pbs_conf)
def _load_from_diag(self, diag):
diagmap = {}
diagmap[SERVER] = os.path.join(diag, 'qstat_Bf.out')
diagmap[VNODE] = os.path.join(diag, 'pbsnodes_va.out')
diagmap[QUEUE] = os.path.join(diag, 'qstat_Qf.out')
diagmap[JOB] = os.path.join(diag, 'qstat_tf.out')
if not os.path.isfile(diagmap[JOB]):
diagmap[JOB] = os.path.join(diag, 'qstat_f.out')
diagmap[RESV] = os.path.join(diag, 'pbs_rstat_f.out')
diagmap[SCHED] = os.path.join(diag, 'qmgr_psched.out')
diagmap[HOOK] = []
if (os.path.isdir(os.path.join(diag, 'server_priv')) and
os.path.isdir(os.path.join(diag, 'server_priv', 'hooks'))):
_ld = os.listdir(os.path.join(diag, 'server_priv', 'hooks'))
for f in _ld:
if f.endswith('.HK'):
diagmap[HOOK].append(
os.path.join(diag, 'server_priv', 'hooks', f))
# Format of qmgr_psched.out differs from Batch Status, we transform
# it to go through the common batch status parsing routines
if os.path.isfile(diagmap[SCHED]):
f = open(os.path.join(diag, 'ptl_qstat_Sched.out'), 'w')
lines = open(diagmap[SCHED])
f.write("Sched \n")
for l in lines:
recs = l.split()
f.write("".join(recs[2:5]) + "\n")
f.close()
diagmap[SCHED] = os.path.join(diag, 'ptl_qstat_Sched.out')
else:
diagmap[SCHED] = None
return diagmap
def init_logfile_path(self, conf=None):
"""
Initialize path to log files for this service
:param conf: PBS conf file parameters
:type conf: Dictionary
"""
elmt = self._instance_to_logpath(self)
if elmt is None:
return
if conf is not None and 'PBS_HOME' in conf:
tm = time.strftime("%Y%m%d", time.localtime())
self.logfile = os.path.join(conf['PBS_HOME'], elmt, tm)
self.acctlogfile = os.path.join(conf['PBS_HOME'], 'server_priv',
'accounting', tm)
def _instance_to_logpath(self, inst):
"""
returns the log path associated to this service
"""
if isinstance(inst, Scheduler):
logval = 'sched_logs'
elif isinstance(inst, Server):
logval = 'server_logs'
elif isinstance(inst, MoM):
logval = 'mom_logs'
elif isinstance(inst, Comm):
logval = 'comm_logs'
else:
logval = None
return logval
def _instance_to_cmd(self, inst):
"""
returns the command associated to this service
"""
if isinstance(inst, Scheduler):
cmd = 'pbs_sched'
elif isinstance(inst, Server):
cmd = 'pbs_server'
elif isinstance(inst, MoM):
cmd = 'pbs_mom'
elif isinstance(inst, Comm):
cmd = 'pbs_comm'
else:
cmd = None
return cmd
def _instance_to_servicename(self, inst):
"""
return the service name associated to the instance. One of
``server, scheduler, or mom.``
"""
if isinstance(inst, Scheduler):
nm = 'scheduler'
elif isinstance(inst, Server):
nm = 'server'
elif isinstance(inst, MoM):
nm = 'mom'
elif isinstance(inst, Comm):
nm = 'comm'
else:
nm = ''
return nm
def _instance_to_privpath(self, inst):
"""
returns the path to priv associated to this service
"""
if isinstance(inst, Scheduler):
priv = 'sched_priv'
elif isinstance(inst, Server):
priv = 'server_priv'
elif isinstance(inst, MoM):
priv = 'mom_priv'
elif isinstance(inst, Comm):
priv = 'server_priv'
else:
priv = None
return priv
def _instance_to_lock(self, inst):
"""
returns the path to lock file associated to this service
"""
if isinstance(inst, Scheduler):
lock = 'sched.lock'
elif isinstance(inst, Server):
lock = 'server.lock'
elif isinstance(inst, MoM):
lock = 'mom.lock'
elif isinstance(inst, Comm):
lock = 'comm.lock'
else:
lock = None
return lock
def set_launcher(self, execargs=None):
self.launcher = execargs
def _isUp(self, inst):
"""
returns True if service is up and False otherwise
"""
live_pids = self._all_instance_pids(inst)
pid = self._get_pid(inst)
if live_pids is not None and pid in live_pids:
return True
return False
def _signal(self, sig, inst=None, procname=None):
"""
Send signal ``sig`` to service. sig is the signal name
as it would be sent to the program kill, e.g. -HUP.
Return the ``out/err/rc`` from the command run to send
the signal. See DshUtils.run_cmd
:param inst: Instance
:type inst: str
:param procname: Process name
:type procname: str or None
"""
pid = None
if inst is not None:
if inst.pid is not None:
pid = inst.pid
else:
pid = self._get_pid(inst)
if procname is not None:
pi = self.pu.get_proc_info(self.hostname, procname)
if pi is not None and pi.values() and pi.values()[0]:
for _p in pi.values()[0]:
ret = self.du.run_cmd(self.hostname, ['kill', sig, _p.pid],
sudo=True)
return ret
if pid is None:
return {'rc': 0, 'err': '', 'out': 'no pid to signal'}
return self.du.run_cmd(self.hostname, ['kill', sig, pid], sudo=True)
def _all_instance_pids(self, inst):
"""
Return a list of all ``PIDS`` that match the
instance name or None.
"""
cmd = self._instance_to_cmd(inst)
self.pu.get_proc_info(self.hostname, ".*" + cmd + ".*",
regexp=True)
_procs = self.pu.processes.values()
if _procs:
_pids = []
for _p in _procs:
_pids.extend(map(lambda x: x.pid, _p))
return _pids
return None
def _get_pid(self, inst):
"""
Get the ``PID`` associated to this instance.
Implementation note, the pid is read from the
daemon's lock file.
This is different than _all_instance_pids in that
the PID of the last running instance can be retrieved
with ``_get_pid`` but not with ``_all_instance_pids``
"""
priv = self._instance_to_privpath(inst)
lock = self._instance_to_lock(inst)
path = os.path.join(self.pbs_conf['PBS_HOME'], priv, lock)
rv = self.du.cat(self.hostname, path, sudo=True, logerr=False)
if ((rv['rc'] == 0) and (len(rv['out']) > 0)):
self.pid = rv['out'][0].strip()
else:
self.pid = None
return self.pid
def _start(self, inst=None, args=None, cmd_map=None, launcher=None):
"""
Generic service startup
:param inst: The instance to act upon
:type inst: str
:param args: Optional command-line arguments
:type args: List
:param cmd_map: Optional dictionary of command line
options to configuration variables
:type cmd_map: Dictionary
:param launcher: Optional utility to invoke the launch
of the service. This option only takes
effect on ``Unix/Linux``. The option can
be a string or a list.Options may be passed
to the launcher, for example to start a
service through the valgrind utility
redirecting to a log file,launcher could be
set to e.g.
``['valgrind', '--log-file=/tmp/vlgrd.out']``
or ``'valgrind --log-file=/tmp/vlgrd.out'``
"""
if launcher is None and self.launcher is not None:
launcher = self.launcher
app = self._instance_to_cmd(inst)
if app is None:
return
_m = ['service: starting', app]
if args is not None:
_m += ['with args: ']
_m += args
as_script = False
wait_on = True
if launcher is not None:
if isinstance(launcher, str):
launcher = launcher.split()
if app == 'pbs_server':
# running the pbs server through valgrind requires a bit of
# a dance because the pbs_server binary is pbs_server.bin
# and to run it requires being able to find libraries, so
# LD_LIBRARY_PATH is set and pbs_server.bin is run as a
# script
pexec = inst.pbs_conf['PBS_EXEC']
ldlib = ['LD_LIBRARY_PATH=' +
os.path.join(pexec, 'lib') + ':' +
os.path.join(pexec, 'pgsql', 'lib')]
app = 'pbs_server.bin'
else:
ldlib = []
cmd = ldlib + launcher
as_script = True
wait_on = False
else:
cmd = []
cmd += [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', app)]
if args is not None:
cmd += args
if not self.default_pbs_conf:
cmd = ['PBS_CONF_FILE=' + inst.pbs_conf_file] + cmd
as_script = True
if cmd_map is not None:
conf_cmd = self.du.map_pbs_conf_to_cmd(cmd_map,
pconf=self.pbs_conf)
cmd.extend(conf_cmd)
_m += conf_cmd
self.logger.info(" ".join(_m))
ret = self.du.run_cmd(self.hostname, cmd, sudo=True,
as_script=as_script, wait_on_script=wait_on,
level=logging.INFOCLI, logerr=False)
if ret['rc'] != 0:
raise PbsServiceError(rv=False, rc=ret['rc'], msg=ret['err'])
ret_msg = True
if ret['err']:
ret_msg = ret['err']
self.pid = self._get_pid(inst)
# get_pid gets information from a lock file that may not have been
# removed when the daemon stopped so we verify that the PID is
# actually alive in the list of pids returned by ps
live_pids = self._all_instance_pids(inst)
i = 0
while ((self.pid is None) or
(live_pids is None or self.pid not in live_pids)) and (i < 30):
time.sleep(1)
i += 1
live_pids = self._all_instance_pids(inst)
self.pid = self._get_pid(inst)
if live_pids is not None and self.pid in live_pids:
return ret_msg
if i == 30:
raise PbsServiceError(rv=False, rc=-1, msg="Could not find PID")
return ret_msg
def _stop(self, sig='-TERM', inst=None):
if inst is None:
return True
self._signal(sig, inst)
pid = self._get_pid(inst)
chk_pid = self._all_instance_pids(inst)
if pid is None or chk_pid is None:
return True
num_seconds = 0
while (chk_pid is not None) and (str(pid) in chk_pid):
if num_seconds > 60:
m = (self.logprefix + 'could not stop service ' +
self._instance_to_servicename(inst))
raise PbsServiceError(rv=False, rc=-1, msg=m)
time.sleep(1)
num_seconds += 1
chk_pid = self._all_instance_pids(inst)
inst.pid = None
return True
def log_lines(self, logtype, id=None, n=50, tail=True, day=None,
starttime=None, endtime=None):
"""
Return the last ``<n>`` lines of a PBS log file, which
can be one of ``server``, ``scheduler``, ``MoM``, or
``tracejob``
:param logtype: The entity requested, an instance of a
Scheduler, Server or MoM object, or the
string 'tracejob' for tracejob
:type logtype: str or object
:param id: The id of the object to trace. Only used for
tracejob
:param n: One of 'ALL' of the number of lines to
process/display, defaults to 50.
:type n: int
:param tail: if True, parse log from the end to the start,
otherwise parse from the start to the end.
Defaults to True.
:type tail: bool
:param day: Optional day in ``YYYMMDD`` format. Defaults
to current day
:type day: int
:param starttime: date timestamp to start matching
:param endtime: date timestamp to end matching
:returns: Last ``<n>`` lines of logfile for ``Server``,
``Scheduler``, ``MoM or tracejob``
"""
logval = None
lines = None
sudo = False
try:
if logtype == 'tracejob':
if id is None:
return None
cmd = [os.path.join(
self.pbs_conf['PBS_EXEC'],
'bin',
'tracejob')]
cmd += [str(id)]
lines = self.du.run_cmd(self.hostname, cmd)['out']
if n != 'ALL':
lines = lines[-n:]
else:
if day is None:
day = time.strftime("%Y%m%d", time.localtime(time.time()))
if logtype == 'accounting':
filename = os.path.join(self.pbs_conf['PBS_HOME'],
'server_priv', 'accounting', day)
sudo = True
else:
logval = self._instance_to_logpath(logtype)
if logval:
filename = os.path.join(self.pbs_conf['PBS_HOME'],
logval, day)
if n == 'ALL':
if self._is_local and not sudo:
lines = open(filename)
else:
lines = self.du.cat(self.hostname, filename, sudo=sudo,
level=logging.DEBUG2)['out']
# tail is not a standard, e.g. on Solaris it does not recognize
# -n. We circumvent this problem by using PTL's version of tail
# but it currently only works on a local host, for remote hosts
# we fall back to using tail/head -n
elif self._is_local and not sudo:
if tail:
futils = FileUtils(filename, FILE_TAIL)
else:
futils = FileUtils(filename)
lines = futils.next(n)
else:
if tail:
cmd = ['/usr/bin/tail']
else:
cmd = ['/usr/bin/head']
pyexec = os.path.join(self.pbs_conf['PBS_EXEC'], 'python',
'bin', 'python')
osflav = self.du.get_platform(self.hostname, pyexec)
if osflav.startswith('sunos'):
cmd += ['-']
else:
cmd += ['-n']
cmd += [str(n), filename]
lines = self.du.run_cmd(self.hostname, cmd, sudo=sudo,
level=logging.DEBUG2)['out']
except:
self.logger.error('error in log_lines ')
traceback.print_exc()
return None
return lines
def _log_match(self, logtype, msg, id=None, n=50, tail=True,
allmatch=False, regexp=False, day=None, max_attempts=1,
interval=1, starttime=None, endtime=None,
level=logging.INFO):
"""
If ``'msg'`` found in the ``'n'`` lines of the log file,
returns a ``tupe (x,y)`` where x is the matching line
number and y the line itself. If no match,return None.
If allmatch is True, a list of tuples is returned.
:param logtype: The entity requested, an instance of a
Scheduler, Server, or MoM object, or the
strings 'tracejob' for tracejob or
'accounting' for accounting logs.
:param id: The id of the object to trace. Only used for
tracejob
:param n: 'ALL' or the number of lines to search through,
defaults to 50
:param tail: If true (default), starts from the end of
the file
:type tail: bool
:param allmatch: If True all matching lines out of then
parsed are returned as a list. Defaults
to False
:type allmatch: bool
:param regexp: If true msg is a Python regular expression.
Defaults to False
:type regex: bool
:param day: Optional day in YYYMMDD format.
:param max_attempts: the number of attempts to make to find
a matching entry
:type max_attemps: int
:param interval: the interval between attempts
:type interval: int
:param starttime: If set ignore matches that occur before
specified time
:param endtime: If set ignore matches that occur after
specified time
.. note:: The matching line number is relative to the record
number, not the absolute line number in the file.
"""
try:
from ptl.utils.pbs_logutils import PBSLogUtils
except:
self.logger.error('error loading ptl.utils.pbs_logutils')
return None
if self.logutils is None:
self.logutils = PBSLogUtils()
rv = (None, None)
attempt = 1
name = self._instance_to_servicename(logtype)
infomsg = (name + ' ' + self.shortname +
' log match: searching for "' + msg + '"')
if regexp:
infomsg += ' - using regular expression '
if allmatch:
infomsg += ' - on all matches '
attemptmsg = ' - No match'
while attempt <= max_attempts:
if attempt > 1:
attemptmsg = ' - attempt ' + str(attempt)
lines = self.log_lines(logtype, id, n=n, tail=tail, day=day,
starttime=starttime, endtime=endtime)
rv = self.logutils.match_msg(lines, msg, allmatch=allmatch,
regexp=regexp, starttime=starttime,
endtime=endtime)
if rv:
self.logger.log(level, infomsg + '... OK')
break
else:
if ((starttime is not None or endtime is not None) and
n != 'ALL'):
if attempt > max_attempts:
# We will do one last attempt to match in case the
# number of lines that were provided did not capture
# the start or end time of interest
max_attempts += 1
n = 'ALL'
self.logger.log(level, infomsg + attemptmsg)
attempt += 1
time.sleep(interval)
try:
# Depending on whether the hostname is local or remote and whether
# sudo privileges were required, lines returned by log_lines can be
# an open file descriptor, we close here but ignore errors in case
# any were raised for all irrelevant cases
lines.close()
except:
pass
return rv
def accounting_match(self, msg, id=None, n=50, tail=True,
allmatch=False, regexp=False, day=None,
max_attempts=1, interval=1, starttime=None,
endtime=None):
"""
Find msg in accounting log.
If ``'msg'`` found in the ``'n'`` lines of the log file,
returns a ``tupe (x,y)`` where x is the matching line
number and y the line itself. If no match,return None.
If allmatch is True, a list of tuples is returned.
:param id: The id of the object to trace. Only used for
tracejob
:param n: 'ALL' or the number of lines to search through,
defaults to 50
:type n: int
:param tail: If true (default), starts from the end of
the file
:type tail: bool
:param allmatch: If True all matching lines out of then
parsed are returned as a list. Defaults
to False
:type allmatch: bool
:param regexp: If true msg is a Python regular expression.
Defaults to False
:type regexp: bool
:param day: Optional day in YYYMMDD format.
:param max_attempts: the number of attempts to make to
find a matching entry
:type max_attemplts: int
:param interval: the interval between attempts
:type interval: int
:param starttime: If set ignore matches that occur before
specified time
:param endtime: If set ignore matches that occur after
specified time
.. note:: The matching line number is relative to the
record number, not the absolute line number
in the file.
"""
return self._log_match('accounting', msg, id, n, tail, allmatch,
regexp, day, max_attempts, interval, starttime,
endtime)
def tracejob_match(self, msg, id=None, n=50, tail=True, allmatch=False,
regexp=False, **kwargs):
"""
Find msg in tracejob log. See _log_match for details
"""
return self._log_match('tracejob', msg, id, n, tail, allmatch,
regexp, kwargs)
def _save_config_file(self, dict_conf, fname):
ret = self.du.cat(self.hostname, fname, sudo=True)
if ret['rc'] == 0:
dict_conf[fname] = ret['out']
else:
self.logger.error('error saving configuration ' + fname)
def _load_configuration(self, infile, objtype=None):
"""
Load configuration as was saved in infile
:param infile: the file in which configuration
was saved
:type infile: str
:param objtype: the object type to load configuration
for, one of server, scheduler, mom or
if None, load all objects in infile
"""
if os.path.isfile(infile):
conf = {}
f = open(infile, 'r')
# load all objects from the Pickled file
while True:
try:
conf = cPickle.load(f)
except:
break
f.close()
if objtype and objtype in conf:
conf = conf[objtype]
else:
# load all object types that could be in infile
newconf = {}
for ky in [MGR_OBJ_SERVER, MGR_OBJ_SCHED, MGR_OBJ_NODE]:
if ky not in conf:
conf[ky] = {}
newconf = dict(newconf.items() + conf[ky].items())
conf = newconf
for k, v in conf.items():
(fd, fn) = self.du.mkstemp()
# handle server data saved as output of qmgr commands by piping
# data back into qmgr
if k.startswith('qmgr_'):
qmgr = os.path.join(self.client_conf['PBS_EXEC'],
'bin', 'qmgr')
os.write(fd, "\n".join(v))
self.du.run_cmd(
self.hostname,
[qmgr],
cstdin=fd,
sudo=True)
else:
os.write(fd, "\n".join(v))
# append the last line
os.write(fd, "\n")
self.du.run_cmd(self.hostname, ['cp', fn, k], sudo=True)
os.close(fd)
os.remove(fn)
return True
return False
def get_tempdir(self):
"""
platform independent call to get a temporary directory
"""
return self.du.get_tempdir(self.hostname)
def __str__(self):
return (self.__class__.__name__ + ' ' + self.hostname + ' config ' +
self.pbs_conf_file)
def __repr__(self):
return (self.__class__.__name__ + '/' + self.pbs_conf_file + '@' +
self.hostname)
class Comm(PBSService):
"""
PBS ``Comm`` configuration and control
"""
"""
:param name: The hostname of the Comm. Defaults to current hostname.
:type name: str
:param attrs: Dictionary of attributes to set, these will override
defaults.
:type attrs: dictionary
:param pbsconf_file: path to config file to parse for PBS_HOME,
PBS_EXEC, etc
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects (node,server,etc) to
mapped files from PBS diag directory
:type diagmap: dictionary
:param diag: path to PBS diag directory (This will override diagmap)
:type diag: str or None
:param server: A PBS server instance to which this Comm is associated
:type server: str
:param db_access: set to either file containing credentials to DB access or
dictionary containing {'dbname':...,'user':...,
'port':...}
:type db_access: str or dictionary
"""
dflt_attributes = {}
def __init__(self, name=None, attrs={}, pbsconf_file=None, diagmap={},
diag=None, server=None, db_access=None):
self.logger = logging.getLogger(__name__)
if server is not None:
self.server = server
if diag is None and self.server.diag is not None:
diag = self.server.diag
if (len(diagmap) == 0) and (len(self.server.diagmap) != 0):
diagmap = self.server.diagmap
else:
self.server = Server(name, pbsconf_file=pbsconf_file,
db_access=db_access, diag=diag,
diagmap=diagmap)
PBSService.__init__(self, name, attrs, self.dflt_attributes,
pbsconf_file, diagmap, diag)
_m = ['Comm ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.conf_to_cmd_map = {
'PBS_COMM_ROUTERS': '-r',
'PBS_COMM_THREADS': '-t'
}
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
def isUp(self):
"""
Check for comm up
"""
return super(Comm, self)._isUp(self)
def signal(self, sig):
"""
Send signal to comm
"""
self.logger.info(self.logprefix + 'sent signal ' + sig)
return super(Comm, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the comm pid
"""
return super(Comm, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get all pids of given instance
"""
return super(Comm, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the comm
:param args: Argument required to start the comm
:type args: str
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list
"""
if args is not None or launcher is not None:
return super(Comm, self)._start(inst=self, args=args,
cmd_map=self.conf_to_cmd_map,
launcher=launcher)
else:
try:
rv = self.pi.start_comm()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return rv
def stop(self, sig=None):
"""
Stop the comm.
:param sig: Signal to stop the comm
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping Comm on host ' +
self.hostname)
return super(Comm, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_comm()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return True
def restart(self):
"""
Restart the comm.
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None, level=logging.INFO):
"""
Match the comm logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp,
day, max_attempts, interval, starttime, endtime,
level=level)
class Server(PBSService):
"""
PBS server ``configuration`` and ``control``
The Server class is a container to PBS server attributes
and implements wrappers to the ``IFL API`` to perform
operations on the server. For example to submit, status,
delete, manage, etc... jobs, reservations and configurations.
This class also offers higher-level routines to ease testing,
see functions, for ``example: revert_to_defaults,
init_logging, expect, counter.``
The ptl_conf dictionary holds general configuration for the
framework's operations, specifically, one can control:
mode: set to ``PTL_CLI`` to operate in ``CLI`` mode or
``PTL_API`` to operate in ``API`` mode
expect_max_attempts: the default maximum number of attempts
to be used\ by expect. Defaults to 60
expect_interval: the default time interval (in seconds)
between expect\ requests. Defaults to 0.5
update_attributes: the default on whether Object attributes
should be\ updated using a list of dictionaries. Defaults
to True
:param name: The hostname of the server. Defaults to
calling pbs_default()
:type name: str
:param attrs: Dictionary of attributes to set, these will
override defaults.
:type attrs: Dictionary
:param defaults: Dictionary of default attributes.
Default: dflt_attributes
:type defaults: Dictionary
:param pbsconf_file: path to config file to parse for PBS_HOME,
PBS_EXEC, etc
:type pbsconf_file: str
:param diagmap: A dictionary of PBS objects (node,server,etc)
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory (This will overrides
diagmap)
:type diag: str
:param client: The host to use as client for CLI queries.
Defaults to the local hostname.
:type client: str
:param client_pbsconf_file: The path to a custom PBS_CONF_FILE
on the client host. Defaults to
the same path as pbsconf_file.
:type client_pbsconf_file: str
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
{'dbname':...,'user':...,'port':...}
:param stat: if True, stat the server attributes
:type stat: bool
"""
logger = logging.getLogger(__name__)
dflt_attributes = {
ATTR_scheduling: "True",
ATTR_dfltque: "workq",
ATTR_logevents: "511",
ATTR_mailfrom: "adm",
ATTR_queryother: "True",
ATTR_rescdflt + ".ncpus": "1",
ATTR_DefaultChunk + ".ncpus": "1",
ATTR_schedit: "600",
ATTR_ResvEnable: "True",
ATTR_nodefailrq: "310",
ATTR_maxarraysize: "10000",
ATTR_license_linger: "3600",
ATTR_EligibleTimeEnable: "False",
ATTR_max_concurrent_prov: "5",
ATTR_FlatUID: 'True',
}
ptl_conf = {
'mode': PTL_API,
'expect_max_attempts': 60,
'expect_interval': 0.5,
'update_attributes': True,
}
# this pattern is a bit relaxed to match common developer build numbers
version_tag = re.compile("[a-zA-Z_]*(?P<version>[\d\.]+.[\w\d\.]*)[\s]*")
actions = ExpectActions()
def __init__(self, name=None, attrs={}, defaults={}, pbsconf_file=None,
diagmap={}, diag=None, client=None, client_pbsconf_file=None,
db_access=None, stat=True):
self.jobs = {}
self.nodes = {}
self.reservations = {}
self.queues = {}
self.resources = {}
self.hooks = {}
self.pbshooks = {}
self.entities = {}
self.scheduler = None
self.version = None
self.default_queue = None
self.last_error = [] # type: array. Set for CLI IFL errors. Not reset
self.last_rc = None # Set for CLI IFL return code. Not thread-safe
# default timeout on connect/disconnect set to 60s to mimick the qsub
# buffer introduced in PBS 11
self._conn_timeout = 60
self._conn_timer = None
self._conn = None
self._db_conn = None
self.current_user = pwd.getpwuid(os.getuid())[0]
if len(defaults.keys()) == 0:
defaults = self.dflt_attributes
self.pexpect_timeout = 15
self.pexpect_sleep_time = .1
PBSService.__init__(self, name, attrs, defaults, pbsconf_file, diagmap,
diag)
_m = ['server ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
self.set_client(client)
if client_pbsconf_file is None:
self.client_pbs_conf_file = self.du.get_pbs_conf_file(self.client)
else:
self.client_pbs_conf_file = client_pbsconf_file
self.client_conf = self.du.parse_pbs_config(
self.client, file=self.client_pbs_conf_file)
if self.client_pbs_conf_file == '/etc/pbs.conf':
self.default_client_pbs_conf = True
elif (('PBS_CONF_FILE' not in os.environ) or
(os.environ['PBS_CONF_FILE'] != self.client_pbs_conf_file)):
self.default_client_pbs_conf = False
else:
self.default_client_pbs_conf = True
a = {}
if os.getuid() == 0:
a = {ATTR_aclroot: 'root'}
self.dflt_attributes.update(a)
if not API_OK:
# mode must be set before the first stat call
self.set_op_mode(PTL_CLI)
if stat:
try:
tmp_attrs = self.status(SERVER, level=logging.DEBUG,
db_access=db_access)
except (PbsConnectError, PbsStatusError):
tmp_attrs = None
if tmp_attrs is not None and len(tmp_attrs) > 0:
self.attributes = tmp_attrs[0]
if ATTR_dfltque in self.attributes:
self.default_queue = self.attributes[ATTR_dfltque]
self.update_version_info()
def update_version_info(self):
"""
Update the version information.
"""
if ATTR_version not in self.attributes:
self.attributes[ATTR_version] = 'unknown'
else:
m = self.version_tag.match(self.attributes[ATTR_version])
if m:
v = m.group('version')
self.version = LooseVersion(v)
self.logger.info(self.logprefix + 'version ' +
self.attributes[ATTR_version])
@classmethod
def set_update_attributes(cls, val):
"""
Set update attributes
"""
cls.logger.info('setting update attributes ' + str(val))
if val == 1 or val[0] in ('t', 'T'):
val = True
else:
val = False
cls.ptl_conf['update_attributes'] = val
@classmethod
def set_expect_max_attempts(cls, val):
"""
Set expect max attempts
"""
cls.logger.info('setting expect max attempts ' + str(val))
cls.ptl_conf['expect_max_attempts'] = int(val)
@classmethod
def set_expect_interval(cls, val):
"""
Set expect interval
"""
cls.logger.info('setting expect interval ' + str(val))
cls.ptl_conf['expect_interval'] = float(val)
def set_client(self, name=None):
"""
Set server client
:param name: Client name
:type name: str
"""
if name is None:
self.client = socket.gethostname()
else:
self.client = name
def _connect(self, hostname, attempt=1):
if ((self._conn is None or self._conn < 0) or
(self._conn_timeout == 0 or self._conn_timer is None)):
self._conn = pbs_connect(hostname)
self._conn_timer = time.time()
if self._conn is None or self._conn < 0:
if attempt > 5:
m = self.logprefix + 'unable to connect'
raise PbsConnectError(rv=None, rc=-1, msg=m)
else:
self._disconnect(self._conn, force=True)
time.sleep(1)
return self._connect(hostname, attempt + 1)
return self._conn
def _disconnect(self, conn, force=False):
"""
disconnect a connection to a Server.
For performance of the API calls, a connection is
maintained up to _conn_timer, unless the force parameter
is set to True
:param conn: Server connection
:param force: If true then diconnect forcefully
:type force: bool
"""
if ((conn is not None and conn >= 0) and
(force or
(self._conn_timeout == 0 or
(self._conn_timer is not None and
(time.time() - self._conn_timer > self._conn_timeout))))):
pbs_disconnect(conn)
self._conn_timer = None
self._conn = None
def set_connect_timeout(self, timeout=0):
"""
Set server connection timeout
:param timeout: Timeout value
:type timeout: int
"""
self._conn_timeout = timeout
def get_op_mode(self):
"""
Returns operating mode for calls to the PBS server.
Currently, two modes are supported, either the ``API``
or the ``CLI``. Default is ``API``
"""
if (not API_OK or (self.ptl_conf['mode'] == PTL_CLI)):
return PTL_CLI
return PTL_API
def set_op_mode(self, mode):
"""
set operating mode to one of either ``PTL_CLI`` or
``PTL_API``.Returns the mode that was set which can
be different from the value requested, for example, if
requesting to set ``PTL_API``, in the absence of the
appropriate SWIG wrappers, the library will fall back to
``CLI``, or if requesting ``PTL_CLI`` and there is no
``PBS_EXEC`` on the system, None is returned.
:param mode: Operating mode
:type mode: str
"""
if mode == PTL_API:
if self._conn is not None or self._conn < 0:
self._conn = None
if not API_OK:
self.logger.error(self.logprefix +
'API submission is not available')
return PTL_CLI
elif mode == PTL_CLI:
if ((not self.has_diag) and
not os.path.isdir(os.path.join(self.client_conf['PBS_EXEC'],
'bin'))):
self.logger.error(self.logprefix +
'PBS commands are not available')
return None
else:
self.logger.error(self.logprefix + "Unrecognized operating mode")
return None
self.ptl_conf['mode'] = mode
self.logger.info(self.logprefix + 'server operating mode set to ' +
mode)
return mode
def add_expect_action(self, name=None, action=None):
"""
Add an action handler to expect. Expect Actions are
custom handlers that are triggered when an unexpected
value is encountered
:param name: Action name
:type name: str or None
:param action: Action to add
"""
if name is None and action.name is None:
return
if name is None and action.name is not None:
name = action.name
if not self.actions.has_action(name):
self.actions.add_action(action, self.shortname)
def set_attributes(self, a={}):
"""
set server attributes
:param a: Attribute dictionary
:type a: Dictionary
"""
super(Server, self).set_attributes(a)
self.__dict__.update(a)
def isUp(self):
"""
returns ``True`` if server is up and ``False`` otherwise
"""
if self.has_diag:
return True
i = 0
op_mode = self.get_op_mode()
if ((op_mode == PTL_API) and (self._conn is not None)):
self._disconnect(self._conn, force=True)
while i < 20:
rv = False
try:
if op_mode == PTL_CLI:
self.status(SERVER, level=logging.DEBUG, logerr=False)
else:
c = self._connect(self.hostname)
self._disconnect(c, force=True)
return True
except (PbsConnectError, PbsStatusError):
# if the status/connect operation fails then there might be
# chances that server process is running but not responsive
# so we wait until the server is reported operational.
rv = self._isUp(self)
# We really mean to check != False rather than just "rv"
if str(rv) != 'False':
self.logger.warning('Server process started' +
'but not up yet')
time.sleep(1)
i += 1
else:
# status/connect failed + no server process means
# server is actually down
return False
return False
def signal(self, sig):
"""
Send signal to server
:param sig: Signal to send
:type sig: str
"""
self.logger.info('server ' + self.shortname + ': sent signal ' + sig)
return super(Server, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the server pid
"""
return super(Server, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get all pids for a given instance
"""
return super(Server, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the PBS server
:param args: Argument required to start the server
:type args: str
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list
"""
if args is not None or launcher is not None:
rv = super(Server, self)._start(inst=self, args=args,
launcher=launcher)
else:
try:
rv = self.pi.start_server()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
if self.isUp():
return rv
else:
raise PbsServiceError(rv=False, rc=1, msg=rv['err'])
def stop(self, sig=None):
"""
Stop the PBS server
:param sig: Signal to stop PBS server
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping Server on host ' +
self.hostname)
rc = super(Server, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_server()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg,
post=self._disconnect, conn=self._conn,
force=True)
rc = True
self._disconnect(self._conn, force=True)
return rc
def restart(self):
"""
Terminate and start a PBS server.
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None, level=logging.INFO):
"""
Match the PBS server logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp,
day, max_attempts, interval, starttime, endtime,
level=level)
def revert_to_defaults(self, reverthooks=True, revertqueues=True,
revertresources=True, delhooks=True,
delqueues=True, server_stat=None):
"""
reset server attributes back to out of box defaults.
:param reverthooks: If True disable all hooks. Defaults
to True
:type reverthooks: bool
:param revertqueues: If True disable all non-default
queues. Defaults to True
:type revertqueues: bool
:param revertresources: If True, resourcedef file is
removed. Defaults to True.
Reverting resources causes a server
restart to occur.
:type revertresources: bool
:param delhooks: If True, hooks are deleted, if deletion
fails, fall back to reverting hooks. Defaults
to True.
:type delhooks: bool
:param delqueues: If True, all non-default queues are deleted,
will attempt to delete all jobs first, if it
fails, revertqueues will be honored,
otherwise,revertqueues is ignored. Defaults
to True
:type delqueues: bool
:returns: True upon success and False if an error is
encountered.
:raises: PbsStatusError or PbsManagerError
"""
ignore_attrs = ['id', 'pbs_license', ATTR_NODE_ProvisionEnable]
ignore_attrs += [ATTR_status, ATTR_total, ATTR_count]
ignore_attrs += [ATTR_rescassn, ATTR_FLicenses, ATTR_SvrHost]
ignore_attrs += [ATTR_license_count, ATTR_version, ATTR_managers]
ignore_attrs += [ATTR_pbs_license_info]
unsetlist = []
setdict = {}
self.logger.info(self.logprefix +
'reverting configuration to defaults')
self.cleanup_jobs_and_reservations()
if server_stat is None:
server_stat = self.status(SERVER, level=logging.DEBUG)[0]
for k in server_stat.keys():
if (k in ignore_attrs) or (k in self.dflt_attributes.keys()):
continue
elif (('.' in k) and (k.split('.')[0] in ignore_attrs)):
continue
else:
unsetlist.append(k)
if len(unsetlist) != 0:
self.manager(MGR_CMD_UNSET, MGR_OBJ_SERVER, unsetlist)
for k in self.dflt_attributes.keys():
if(k not in self.attributes or
self.attributes[k] != self.dflt_attributes[k]):
setdict[k] = self.dflt_attributes[k]
if delhooks:
reverthooks = False
hooks = self.status(HOOK, level=logging.DEBUG)
hooks = [h['id'] for h in hooks]
if len(hooks) > 0:
self.manager(MGR_CMD_DELETE, HOOK, id=hooks, expect=True)
if delqueues:
revertqueues = False
queues = self.status(QUEUE, level=logging.DEBUG)
queues = [q['id'] for q in queues]
if len(queues) > 0:
self.manager(MGR_CMD_DELETE, QUEUE, id=queues, expect=True)
a = {ATTR_qtype: 'Execution',
ATTR_enable: 'True',
ATTR_start: 'True'}
self.manager(MGR_CMD_CREATE, QUEUE, a, id='workq', expect=True)
setdict.update({ATTR_dfltque: 'workq'})
if reverthooks:
hooks = self.status(HOOK, level=logging.DEBUG)
hooks = [h['id'] for h in hooks]
a = {ATTR_enable: 'false'}
if len(hooks) > 0:
self.manager(MGR_CMD_SET, MGR_OBJ_HOOK, a, hooks, expect=True)
if revertqueues:
self.status(QUEUE, level=logging.DEBUG)
queues = []
for (qname, qobj) in self.queues.items():
# skip reservation queues. This syntax for Python 2.4
# compatibility
if (qname.startswith('R') or qname.startswith('S') or
qname == server_stat[ATTR_dfltque]):
continue
qobj.revert_to_defaults()
queues.append(qname)
a = {ATTR_enable: 'false'}
self.manager(MGR_CMD_SET, QUEUE, a, id=queues, expect=True)
a = {ATTR_enable: 'True', ATTR_start: 'True'}
self.manager(MGR_CMD_SET, MGR_OBJ_QUEUE, a,
id=server_stat[ATTR_dfltque], expect=True)
if len(setdict) > 0:
self.manager(MGR_CMD_SET, MGR_OBJ_SERVER, setdict)
if revertresources:
try:
rescs = self.status(RSC)
rescs = [r['id'] for r in rescs]
except:
rescs = []
if len(rescs) > 0:
self.manager(MGR_CMD_DELETE, RSC, id=rescs, expect=True)
return True
def save_configuration(self, outfile, mode='a'):
"""
Save a server configuration, this includes:
- ``server_priv/resourcedef``
- ``qmgr -c "print server"``
- ``qmgr -c "print sched"``
- ``qmgr -c "print hook"``
:param outfile: the output file to which onfiguration is
saved
:type outfile: str
:param mode: The mode in which to open outfile to save
configuration. The first object being saved
should open this file with 'w' and subsequent
calls from other objects should save with
mode 'a' or 'a+'. Defaults to a+
:type mode: str
:returns: True on success, False on error
"""
conf = {}
sconf = {MGR_OBJ_SERVER: conf}
rd = os.path.join(self.pbs_conf['PBS_HOME'], 'server_priv',
'resourcedef')
self._save_config_file(conf, rd)
qmgr = os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qmgr')
ret = self.du.run_cmd(self.client, [qmgr, '-c', 'p s'], sudo=True)
if ret['rc'] != 0:
return False
else:
conf['qmgr_print_server'] = ret['out']
ret = self.du.run_cmd(self.hostname, [qmgr, '-c', 'p sched'],
sudo=True)
if ret['rc'] != 0:
return False
else:
conf['qmgr_print_sched'] = ret['out']
ret = self.du.run_cmd(self.hostname, [qmgr, '-c', 'p h'], sudo=True)
if ret['rc'] != 0:
return False
else:
conf['qmgr_print_hook'] = ret['out']
try:
f = open(outfile, mode)
cPickle.dump(sconf, f)
f.close()
except:
self.logger.error('Error processing file ' + outfile)
return False
return True
def load_configuration(self, infile):
"""
load configuration from saved file ``infile``
"""
self.revert_to_defaults()
self._load_configuration(infile, MGR_OBJ_SERVER)
def get_hostname(self):
"""
return the default server hostname
"""
if self.get_op_mode() == PTL_CLI:
return self.hostname
return pbs_default()
def _db_connect(self, db_access=None):
if self._db_conn is None:
if 'user' not in db_access or\
'password' not in db_access:
self.logger.error('missing credentials to access DB')
return None
if 'dbname' not in db_access:
db_access['dbname'] = 'pbs_datastore'
if 'port' not in db_access:
db_access['port'] = '15007'
if 'host' not in db_access:
db_access['host'] = self.hostname
user = db_access['user']
dbname = db_access['dbname']
port = db_access['port']
password = db_access['password']
host = db_access['host']
cred = "host=%s dbname=%s user=%s password=%s port=%s" % \
(host, dbname, user, password, port)
self._db_conn = psycopg2.connect(cred)
return self._db_conn
def _db_server_host(self, cur=None, db_access=None):
"""
Get the server host name from the database. The server
host name is stored in the pbs.server table and not in
pbs.server_attr.
:param cur: Optional, a predefined cursor to use to
operate on the DB
:param db_acccess: set to either file containing
credentials to DB access or
dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
local_init = False
if cur is None:
conn = self._db_connect(db_access)
local_init = True
if conn is None:
return None
cur = conn.cursor()
# obtain server name. The server hostname is stored in table
# pbs.server
cur.execute('SELECT sv_hostname from pbs.server')
if local_init:
conn.commit()
tmp_query = cur.fetchone()
if len(tmp_query) > 0:
svr_host = tmp_query[0]
else:
svr_host = "unknown"
return svr_host
def status_db(self, obj_type=None, attrib=None, id=None, db_access=None,
logerr=True):
"""
Status PBS objects from the SQL database
:param obj_type: The type of object to query, one of the
* objects,\ Default: SERVER
:param attrib: Attributes to query, can a string, a list,
a dictionary\ Default: None. All attributes
will be queried
:type attrib: str or list or dictionary
:param id: An optional identifier, the name of the object
to status
:type id: str
:param db_access: information needed to access the database,
can be either a file containing user,
port, dbname, password info or a
dictionary of key/value entries
:type db_access: str or dictionary
"""
if not PSYCOPG:
self.logger.error('psycopg module unavailable, install from ' +
'http://initd.org/psycopg/ and retry')
return None
if not isinstance(db_access, dict):
try:
f = open(db_access, 'r')
except IOError:
self.logger.error('Unable to access ' + db_access)
return None
lines = f.readlines()
db_access = {}
for line in lines:
(k, v) = line.split('=')
db_access[k] = v
conn = self._db_connect(db_access)
if conn is None:
return None
cur = conn.cursor()
stmt = []
if obj_type == SERVER:
stmt = ["SELECT sv_name,attr_name,attr_resource,attr_value " +
"FROM pbs.server_attr"]
svr_host = self.hostname # self._db_server_host(cur)
elif obj_type == SCHED:
stmt = ["SELECT sched_name,attr_name,attr_resource,attr_value " +
"FROM pbs.scheduler_attr"]
# reuse server host name for sched host
svr_host = self.hostname
elif obj_type == JOB:
stmt = ["SELECT ji_jobid,attr_name,attr_resource,attr_value " +
"FROM pbs.job_attr"]
if id:
id_stmt = ["ji_jobid='" + id + "'"]
elif obj_type == QUEUE:
stmt = ["SELECT qu_name,attr_name,attr_resource,attr_value " +
"FROM pbs.queue_attr"]
if id:
id_stmt = ["qu_name='" + id + "'"]
elif obj_type == RESV:
stmt = ["SELECT ri_resvid,attr_name,attr_resource,attr_value " +
"FROM pbs.resv_attr"]
if id:
id_stmt = ["ri_resvid='" + id + "'"]
elif obj_type in (NODE, VNODE):
stmt = ["SELECT nd_name,attr_name,attr_resource,attr_value " +
"FROM pbs.node_attr"]
if id:
id_stmt = ["nd_name='" + id + "'"]
else:
self.logger.error('status: object type not handled')
return None
if attrib or id:
stmt += ["WHERE"]
extra_stmt = []
if attrib:
if isinstance(attrib, dict):
attrs = attrib.keys()
elif isinstance(attrib, list):
attrs = attrib
elif isinstance(attrib, str):
attrs = attrib.split(',')
for a in attrs:
extra_stmt += ["attr_name='" + a + "'"]
stmt += [" OR ".join(extra_stmt)]
if id:
stmt += [" AND ", " AND ".join(id_stmt)]
exec_stmt = " ".join(stmt)
self.logger.debug('server: executing db statement: ' + exec_stmt)
cur.execute(exec_stmt)
conn.commit()
_results = cur.fetchall()
obj_dict = {}
for _res in _results:
if obj_type in (SERVER, SCHED):
obj_name = svr_host
else:
obj_name = _res[0]
if obj_name not in obj_dict:
obj_dict[obj_name] = {'id': obj_name}
attr = _res[1]
if _res[2]:
attr += '.' + _res[2]
obj_dict[obj_name][attr] = _res[3]
return obj_dict.values()
#
# Begin IFL Wrappers
#
def status(self, obj_type=SERVER, attrib=None, id=None,
extend=None, level=logging.INFO, db_access=None, runas=None,
resolve_indirectness=False, logerr=True):
"""
Stat any PBS object ``[queue, server, node, hook, job,
resv, sched]``.If the Server is setup from diag input,
see diag or diagmap member, the status calls are routed
directly to the data on files from diag.
The server can be queried either through the 'qstat'
command line tool or through the wrapped PBS IFL api,
see set_op_mode.
Return a dictionary representation of a batch status object
raises ``PbsStatsuError on error``.
:param obj_type: The type of object to query, one of the *
objects.Default: SERVER
:param attrib: Attributes to query, can be a string, a
list, a dictionary.Default is to query all
attributes.
:type attrib: str or list or dictionary
:param id: An optional id, the name of the object to status
:type id: str
:param extend: Optional extension to the IFL call
:param level: The logging level, defaults to INFO
:type level: str
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
:param runas: run stat as user
:type runas: str
:param resolve_indirectness: If True resolves indirect node
resources values
:type resolve_indirectness: bool
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
In addition to standard IFL stat call, this wrapper handles
a few cases that aren't implicitly offered by pbs_stat*,
those are for Hooks,Resources, and a formula evaluation.
"""
prefix = 'status on ' + self.shortname
if runas:
prefix += ' as ' + str(runas)
prefix += ': '
self.logit(prefix, obj_type, attrib, id, level)
bs = None
bsl = []
freebs = False
# 2 - Special handling for gathering the job formula value.
if attrib is not None and PTL_FORMULA in attrib:
if (((isinstance(attrib, list) or isinstance(attrib, dict)) and
(len(attrib) == 1)) or
(isinstance(attrib, str) and len(attrib.split(',')) == 1)):
bsl = self.status(
JOB, 'Resource_List.select', id=id, extend='t')
if self.scheduler is None:
self.scheduler = Scheduler(self.hostname)
if 'log_filter' in self.scheduler.sched_config:
_prev_filter = self.scheduler.sched_config['log_filter']
if int(_prev_filter) & 2048:
self.scheduler.set_sched_config(
{'log_filter': 2048})
self.manager(MGR_CMD_SET, SERVER, {'scheduling': 'True'})
if id is None:
_formulas = self.scheduler.job_formula()
else:
_formulas = {id: self.scheduler.job_formula(jobid=id)}
if not int(_prev_filter) & 2048:
self.scheduler.set_sched_config(
{'log_filter': int(_prev_filter)})
if len(bsl) == 0:
bsl = [{'id': id}]
for _b in bsl:
if _b['id'] in _formulas:
_b[PTL_FORMULA] = _formulas[_b['id']]
return bsl
# 3- Serve data from database if requested... and available for the
# given object type
if db_access and obj_type in (SERVER, SCHED, NODE, QUEUE, RESV, JOB):
bsl = self.status_db(obj_type, attrib, id, db_access=db_access,
logerr=logerr)
# 4- Serve data from diag files
elif obj_type in self.diagmap:
if obj_type in (HOOK, PBS_HOOK):
for f in self.diagmap[obj_type]:
_b = self.utils.file_to_dictlist(f, attrib)
if _b and 'hook_name' in _b[0]:
_b[0]['id'] = _b[0]['hook_name']
else:
_b[0]['id'] = os.path.basename(f)
if id is None or id == _b[0]['id']:
bsl.extend(_b)
else:
bsl = self.utils.file_to_dictlist(self.diagmap[obj_type],
attrib, id=id)
# 6- Stat using PBS CLI commands
elif self.get_op_mode() == PTL_CLI:
tgt = self.client
if obj_type in (JOB, QUEUE, SERVER):
pcmd = [os.path.join(
self.client_conf['PBS_EXEC'],
'bin',
'qstat')]
if extend:
pcmd += ['-' + extend]
if obj_type == JOB:
pcmd += ['-f']
if id:
pcmd += [id]
else:
pcmd += ['@' + self.hostname]
elif obj_type == QUEUE:
pcmd += ['-Qf']
if id:
if '@' not in id:
pcmd += [id + '@' + self.hostname]
else:
pcmd += [id]
else:
pcmd += ['@' + self.hostname]
elif obj_type == SERVER:
pcmd += ['-Bf', self.hostname]
elif obj_type in (NODE, VNODE, HOST):
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbsnodes')]
pcmd += ['-s', self.hostname]
if obj_type in (NODE, VNODE):
pcmd += ['-v']
if obj_type == HOST:
pcmd += ['-H']
if id:
pcmd += [id]
else:
pcmd += ['-a']
elif obj_type == RESV:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbs_rstat')]
pcmd += ['-f']
if id:
pcmd += [id]
elif obj_type in (SCHED, PBS_HOOK, HOOK, RSC):
try:
rc = self.manager(MGR_CMD_LIST, obj_type, attrib, id,
runas=runas, level=level, logerr=logerr)
except PbsManagerError, e:
rc = e.rc
# PBS bug, no hooks yields a return code of 1, we ignore
if obj_type != HOOK:
raise PbsStatusError(
rc=rc, rv=[], msg=self.geterrmsg())
if rc == 0:
if obj_type == HOOK:
o = self.hooks
elif obj_type == PBS_HOOK:
o = self.pbshooks
elif obj_type == SCHED:
if self.scheduler is None:
return []
o = {'sched': self.scheduler}
elif obj_type == RSC:
o = self.resources
if id:
if id in o:
return [o[id].attributes]
else:
return None
return [h.attributes for h in o.values()]
return []
else:
self.logger.error(self.logprefix + "unrecognized object type")
raise PbsStatusError(rc=-1, rv=[],
msg="unrecognized object type")
return None
# as_script is used to circumvent some shells that will not pass
# along environment variables when invoking a command through sudo
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
elif obj_type == RESV and not self._is_local:
pcmd = ['PBS_SERVER=' + self.hostname] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(tgt, pcmd, runas=runas, as_script=as_script,
level=logging.INFOCLI, logerr=logerr)
o = ret['out']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if ret['rc'] != 0:
raise PbsStatusError(rc=ret['rc'], rv=[], msg=self.geterrmsg())
bsl = self.utils.convert_to_dictlist(o, attrib, mergelines=True)
# 7- Stat with impersonation over PBS IFL swig-wrapped API
elif runas is not None:
_data = {'obj_type': obj_type, 'attrib': attrib, 'id': id}
bsl = self.pbs_api_as('status', user=runas, data=_data,
extend=extend)
else:
# 8- Stat over PBS IFL API
#
# resources are special attributes, all resources are queried as
# a single attribute.
# e.g. querying the resources_available attribute returns all
# resources such as ncpus, mem etc. when querying for
# resources_available.ncpus and resources_available.mem only query
# resources_available once and retrieve the resources desired from
# there
if isinstance(attrib, dict):
attribcopy = {}
restype = []
for k, v in attrib.items():
if isinstance(v, tuple):
# SET requires a special handling because status may
# have been called through counter to count the number
# of objects have a given attribute set, in this case
# we set the attribute to an empty string rather than
# the number of elements requested. This is a
# side-effect of the way pbs_statjob works
if v[0] in (SET, MATCH_RE):
v = ''
else:
v = v[1]
if callable(v):
v = ''
if '.' in k:
_r = k.split('.')[0]
if _r not in restype:
attribcopy[k] = v
restype.append(_r)
else:
attribcopy[k] = v
elif isinstance(attrib, list):
attribcopy = []
for k in attrib:
if '.' in k:
_found = False
for _e in attribcopy:
_r = k.split('.')[0]
if _r == _e.split('.')[0]:
_found = True
break
if not _found:
attribcopy.append(k)
else:
attribcopy.append(k)
else:
attribcopy = attrib
a = self.utils.convert_to_attrl(attribcopy)
c = self._connect(self.hostname)
if obj_type == JOB:
bs = pbs_statjob(c, id, a, extend)
elif obj_type == QUEUE:
bs = pbs_statque(c, id, a, extend)
elif obj_type == SERVER:
bs = pbs_statserver(c, a, extend)
elif obj_type == HOST:
bs = pbs_statnode(c, id, a, extend)
elif obj_type == VNODE:
bs = pbs_statvnode(c, id, a, extend)
elif obj_type == RESV:
bs = pbs_statresv(c, id, a, extend)
elif obj_type == SCHED:
bs = pbs_statsched(c, a, extend)
elif obj_type == RSC:
# up to PBSPro 12.3 pbs_statrsc was not in pbs_ifl.h
bs = pbs_statrsc(c, id, a, extend)
elif obj_type in (HOOK, PBS_HOOK):
if os.getuid() != 0:
try:
rc = self.manager(MGR_CMD_LIST, obj_type, attrib,
id, level=level)
if rc == 0:
if id:
if (obj_type == HOOK and
id in self.hooks):
return [self.hooks[id].attributes]
elif (obj_type == PBS_HOOK and
id in self.pbshooks):
return [self.pbshooks[id].attributes]
else:
return None
if obj_type == HOOK:
return [h.attributes for h in
self.hooks.values()]
elif obj_type == PBS_HOOK:
return [h.attributes for h in
self.pbshooks.values()]
except:
pass
else:
bs = pbs_stathook(c, id, a, extend)
else:
self.logger.error(self.logprefix +
"unrecognized object type " + str(obj_type))
freebs = True
err = self.geterrmsg()
self._disconnect(c)
if err:
raise PbsStatusError(rc=-1, rv=[], msg=err)
if not isinstance(bs, list):
bsl = self.utils.batch_status_to_dictlist(bs, attrib)
else:
bsl = self.utils.filter_batch_status(bs, attrib)
# Update each object's dictionary with corresponding attributes and
# values
self.update_attributes(obj_type, bsl)
# Hook stat is done through CLI, no need to free the batch_status
if (not isinstance(bs, list) and freebs and
obj_type not in (HOOK, PBS_HOOK) and os.getuid() != 0):
pbs_statfree(bs)
# 9- Resolve indirect resources
if obj_type in (NODE, VNODE) and resolve_indirectness:
nodes = {}
for _b in bsl:
for k, v in _b.items():
if v.startswith('@'):
if v[1:] in nodes:
_b[k] = nodes[v[1:]][k]
else:
for l in bsl:
if l['id'] == v[1:]:
nodes[k] = l[k]
_b[k] = l[k]
break
del nodes
return bsl
def submit_interactive_job(self, job, cmd):
"""
submit an ``interactive`` job. Returns a job identifier
or raises PbsSubmitError on error
:param cmd: The command to run to submit the interactive
job
:type cmd: str
:param job: the job object. The job must have the attribute
'interactive_job' populated. That attribute is
a list of tuples of the form:
(<command>, <expected output>, <...>)
for example to send the command
hostname and expect 'myhost.mydomain' one would
set:job.interactive_job =
[('hostname', 'myhost.mydomain')]
If more than one lines are expected they are
appended to the tuple.
:raises: PbsSubmitError
"""
ij = InteractiveJob(job, cmd, self.hostname)
# start the interactive job submission thread and wait to pickup the
# actual job identifier
ij.start()
while ij.jobid is None:
continue
return ij.jobid
def submit(self, obj, script=None, extend=None, submit_dir=None):
"""
Submit a job or reservation. Returns a job identifier
or raises PbsSubmitError on error
:param obj: The Job or Reservation instance to submit
:param script: Path to a script to submit. Default: None
as an executable\ /bin/sleep 100 is submitted
:type script: str or None
:param extend: Optional extension to the IFL call.
see pbs_ifl.h
:type extend: str or None
:param submit_dir: directory from which job is submitted.
Defaults to temporary directory
:type submit_dir: str or None
:raises: PbsSubmitError
"""
_interactive_job = False
as_script = False
rc = None
if isinstance(obj, Job):
if script is None and obj.script is not None:
script = obj.script
if ATTR_inter in obj.attributes:
_interactive_job = True
if ATTR_executable in obj.attributes:
del obj.attributes[ATTR_executable]
if ATTR_Arglist in obj.attributes:
del obj.attributes[ATTR_Arglist]
elif not isinstance(obj, Reservation):
m = self.logprefix + "unrecognized object type"
self.logger.error(m)
return None
if submit_dir is None:
submit_dir = tempfile.gettempdir()
cwd = os.getcwd()
os.chdir(submit_dir)
c = None
# 1- Submission using the command line tools
if self.get_op_mode() == PTL_CLI:
exclude_attrs = [] # list of attributes to not convert to CLI
if isinstance(obj, Job):
runcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qsub')]
elif isinstance(obj, Reservation):
runcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbs_rsub')]
if ATTR_resv_start in obj.custom_attrs:
start = obj.custom_attrs[ATTR_resv_start]
obj.custom_attrs[ATTR_resv_start] = \
self.utils.convert_seconds_to_resvtime(start)
if ATTR_resv_end in obj.custom_attrs:
end = obj.custom_attrs[ATTR_resv_end]
obj.custom_attrs[ATTR_resv_end] = \
self.utils.convert_seconds_to_resvtime(end)
if ATTR_resv_timezone in obj.custom_attrs:
exclude_attrs += [ATTR_resv_timezone, ATTR_resv_standing]
# handling of impersonation differs widely across OS's,
# when setting PBS_TZID we standardize on running the cmd
# as a script instead of customizing for each OS flavor
_tz = obj.custom_attrs[ATTR_resv_timezone]
runcmd = ['PBS_TZID=' + _tz] + runcmd
as_script = True
if ATTR_resv_rrule in obj.custom_attrs:
_rrule = obj.custom_attrs[ATTR_resv_rrule]
if _rrule[0] not in ("'", '"'):
_rrule = "'" + _rrule + "'"
obj.custom_attrs[ATTR_resv_rrule] = _rrule
if not self._is_local:
if ATTR_queue not in obj.attributes:
runcmd += ['-q@' + self.hostname]
elif '@' not in obj.attributes[ATTR_queue]:
curq = obj.attributes[ATTR_queue]
runcmd += ['-q' + curq + '@' + self.hostname]
if obj.custom_attrs and (ATTR_queue in obj.custom_attrs):
del obj.custom_attrs[ATTR_queue]
_conf = self.default_client_pbs_conf
cmd = self.utils.convert_to_cli(obj.custom_attrs, IFL_SUBMIT,
self.hostname, dflt_conf=_conf,
exclude_attrs=exclude_attrs)
if cmd is None:
try:
os.chdir(cwd)
except OSError:
pass
return None
runcmd += cmd
if script:
runcmd += [script]
else:
if ATTR_executable in obj.attributes:
runcmd += ['--', obj.attributes[ATTR_executable]]
if ((ATTR_Arglist in obj.attributes) and
(obj.attributes[ATTR_Arglist] is not None)):
args = obj.attributes[ATTR_Arglist]
arglist = self.utils.convert_arglist(args)
if arglist is None:
try:
os.chdir(cwd)
except OSError:
pass
return None
runcmd += [arglist]
if obj.username != self.current_user:
runas = obj.username
else:
runas = None
if _interactive_job:
ijid = self.submit_interactive_job(obj, runcmd)
try:
os.chdir(cwd)
except OSError:
pass
return ijid
if not self.default_client_pbs_conf:
runcmd = [
'PBS_CONF_FILE=' + self.client_pbs_conf_file] + runcmd
as_script = True
ret = self.du.run_cmd(self.client, runcmd, runas=runas,
level=logging.INFOCLI, as_script=as_script,
logerr=False)
if ret['rc'] != 0:
objid = None
else:
objid = ret['out'][0]
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc = ret['rc']
# 2- Submission with impersonation over API
elif obj.username != self.current_user:
# submit job as a user requires setting uid to that user. It's
# done in a separate process
obj.set_variable_list(obj.username, submit_dir)
obj.set_attributes()
if (obj.script is not None and not self._is_local):
# This copy assumes that the file system layout on the
# remote host is identical to the local host. When not
# the case, this code will need to be updated to copy
# to a known remote location and update the obj.script
self.du.run_copy(self.hostname, obj.script, obj.script)
os.remove(obj.script)
objid = self.pbs_api_as('submit', obj, user=obj.username,
extend=extend)
# 3- Submission as current user over API
else:
c = self._connect(self.hostname)
if isinstance(obj, Job):
if script:
if ATTR_o not in obj.attributes:
obj.attributes[ATTR_o] = (self.hostname + ':' +
obj.script + '.o')
if ATTR_e not in obj.attributes:
obj.attributes[ATTR_e] = (self.hostname + ':' +
obj.script + '.e')
sc = os.path.basename(script)
obj.unset_attributes([ATTR_executable, ATTR_Arglist])
if ATTR_N not in obj.custom_attrs:
obj.attributes[ATTR_N] = sc
if ATTR_queue in obj.attributes:
destination = obj.attributes[ATTR_queue]
# queue must be removed otherwise will cause the submit
# to fail silently
del obj.attributes[ATTR_queue]
else:
destination = None
if (ATTR_o not in obj.attributes or
ATTR_e not in obj.attributes):
fn = self.utils.random_str(
length=4, prefix='PtlPbsJob')
tmp = self.du.get_tempdir(self.hostname)
fn = os.path.join(tmp, fn)
if ATTR_o not in obj.attributes:
obj.attributes[ATTR_o] = (self.hostname + ':' +
fn + '.o')
if ATTR_e not in obj.attributes:
obj.attributes[ATTR_e] = (self.hostname + ':' +
fn + '.e')
obj.attropl = self.utils.dict_to_attropl(obj.attributes)
objid = pbs_submit(c, obj.attropl, script, destination,
extend)
elif isinstance(obj, Reservation):
if ATTR_resv_duration in obj.attributes:
# reserve_duration is not a valid attribute, the API call
# will get rejected if it is used
wlt = ATTR_l + '.walltime'
obj.attributes[wlt] = obj.attributes[ATTR_resv_duration]
del obj.attributes[ATTR_resv_duration]
obj.attropl = self.utils.dict_to_attropl(obj.attributes)
objid = pbs_submit_resv(c, obj.attropl, extend)
prefix = 'submit to ' + self.shortname + ' as '
if isinstance(obj, Job):
self.logit(prefix + '%s: ' % obj.username, JOB, obj.custom_attrs,
objid)
if obj.script_body:
self.logger.log(logging.INFOCLI, 'job script ' + script +
'\n---\n' + obj.script_body + '\n---')
if objid is not None:
self.jobs[objid] = obj
elif isinstance(obj, Reservation):
# Reservations without -I option return as 'R123 UNCONFIRMED'
# so split to get the R123 only
self.logit(prefix + '%s: ' % obj.username, RESV, obj.attributes,
objid)
if objid is not None:
objid = objid.split()[0]
self.reservations[objid] = obj
if objid is not None:
obj.server[self.hostname] = objid
else:
try:
os.chdir(cwd)
except OSError:
pass
raise PbsSubmitError(rc=rc, rv=None, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
try:
os.chdir(cwd)
except OSError:
pass
return objid
def deljob(self, id=None, extend=None, runas=None, wait=False,
logerr=True, attr_W=None):
"""
delete a single job or list of jobs specified by id
raises ``PbsDeljobError`` on error
:param id: The identifier(s) of the jobs to delete
:type id: str or list
:param extend: Optional parameters to pass along to PBS
:type extend: str or None
:param runas: run as user
:type runas: str or None
:param wait: Set to True to wait for job(s) to no longer
be reported by PBS. False by default
:type wait: bool
:param logerr: Whether to log errors. Defaults to True.
:type logerr: bool
:param attr_w: -W args to qdel (Only for cli mode)
:type attr_w: str
:raises: PbsDeljobError
"""
prefix = 'delete job on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if id is not None:
if not isinstance(id, list):
id = id.split(',')
prefix += ', '.join(id)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qdel')]
if extend is not None:
pcmd += self.utils.convert_to_cli(extend, op=IFL_DELETE,
hostname=self.hostname)
if attr_W is not None:
pcmd += ['-W']
if attr_W != PTL_NOARG:
pcmd += [attr_W]
if id is not None:
pcmd += id
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
elif not self._is_local:
pcmd = ['PBS_SERVER=' + self.hostname] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, logerr=logerr,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('deljob', id, user=runas, extend=extend)
else:
c = self._connect(self.hostname)
rc = 0
for ajob in id:
tmp_rc = pbs_deljob(c, ajob, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsDeljobError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if self.jobs is not None:
for j in id:
if j in self.jobs:
if self.jobs[j].interactive_handle is not None:
self.jobs[j].interactive_handle.close()
del self.jobs[j]
if c:
self._disconnect(c)
if wait:
for oid in id:
self.expect(JOB, 'queue', id=oid, op=UNSET, runas=runas,
level=logging.DEBUG)
return rc
def delresv(self, id=None, extend=None, runas=None, wait=False,
logerr=True):
"""
delete a single job or list of jobs specified by id
raises ``PbsDeljobError`` on error
:param id: The identifier(s) of the jobs to delete
:type id: str or list
:param extend: Optional parameters to pass along to PBS
:type extend: str or None
:param runas: run as user
:type runas: str or None
:param wait: Set to True to wait for job(s) to no longer
be reported by PBS. False by default
:type wait: bool
:param logerr: Whether to log errors. Defaults to True.
:type logerr: bool
:raises: PbsDeljobError
"""
prefix = 'delete resv on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if id is not None:
if not isinstance(id, list):
id = id.split(',')
prefix += ', '.join(id)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbs_rdel')]
if id is not None:
pcmd += id
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
elif not self._is_local:
pcmd = ['PBS_SERVER=' + self.hostname] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, logerr=logerr,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('delresv', id, user=runas, extend=extend)
else:
c = self._connect(self.hostname)
rc = 0
for ajob in id:
tmp_rc = pbs_delresv(c, ajob, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsDelresvError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if self.reservations is not None:
for j in id:
if j in self.reservations:
del self.reservations[j]
if c:
self._disconnect(c)
if wait:
for oid in id:
self.expect(RESV, 'queue', id=oid, op=UNSET, runas=runas,
level=logging.DEBUG)
return rc
def delete(self, id=None, extend=None, runas=None, wait=False,
logerr=True):
"""
delete a single job or list of jobs specified by id
raises ``PbsDeleteError`` on error
:param id: The identifier(s) of the jobs/resvs to delete
:type id: str or list
:param extend: Optional parameters to pass along to PBS
:type extend: str or none
:param runas: run as user
:type runas: str
:param wait: Set to True to wait for job(s)/resv(s) to
no longer be reported by PBS. False by default
:type wait: bool
:param logerr: Whether to log errors. Defaults to True.
:type logerr: bool
:raises: PbsDeleteError
"""
prefix = 'delete on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if id is not None:
if not isinstance(id, list):
id = id.split(',')
prefix += ','.join(id)
if extend is not None:
prefix += ' with ' + str(extend)
self.logger.info(prefix)
if not len(id) > 0:
return 0
obj_type = {}
for j in id:
if j[0] in ('R', 'S'):
obj_type[j] = RESV
try:
rc = self.delresv(j, extend, runas, logerr=logerr)
except PbsDelresvError, e:
rc = e.rc
msg = e.msg
rv = e.rv
else:
obj_type[j] = JOB
try:
rc = self.deljob(j, extend, runas, logerr=logerr)
except PbsDeljobError, e:
rc = e.rc
msg = e.msg
rv = e.rv
if rc != 0:
raise PbsDeleteError(rc=rc, rv=rv, msg=msg)
if wait:
for oid in id:
self.expect(obj_type[oid], 'queue', id=oid, op=UNSET,
runas=runas, level=logging.DEBUG)
return rc
def select(self, attrib=None, extend=None, runas=None, logerr=True):
"""
Select jobs that match attributes list or all jobs if no
attributes raises ``PbsSelectError`` on error
:param attrib: A string, list, or dictionary of attributes
:type attrib: str or list or dictionary
:param extend: the extended attributes to pass to select
:type extend: str or None
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:returns: A list of job identifiers that match the
attributes specified
:raises: PbsSelectError
"""
prefix = "select on " + self.shortname
if runas is not None:
prefix += " as " + str(runas)
prefix += ": "
if attrib is None:
s = PTL_ALL
elif not isinstance(attrib, dict):
self.logger.error(prefix + "attributes must be a dictionary")
return
else:
s = str(attrib)
self.logger.info(prefix + s)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'],
'bin', 'qselect')]
cmd = self.utils.convert_to_cli(attrib, op=IFL_SELECT,
hostname=self.hostname)
if extend is not None:
pcmd += ['-' + extend]
if not self._is_local and ((attrib is None) or
(ATTR_queue not in attrib)):
pcmd += ['-q', '@' + self.hostname]
pcmd += cmd
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsSelectError(rc=self.last_rc, rv=False,
msg=self.geterrmsg())
jobs = ret['out']
# command returns no jobs as empty, since we expect a valid id,
# we reset the jobs to an empty array
if len(jobs) == 1 and jobs[0] == '':
jobs = []
elif runas is not None:
jobs = self.pbs_api_as('select', user=runas, data=attrib,
extend=extend)
else:
attropl = self.utils.convert_to_attropl(attrib, op=EQ)
c = self._connect(self.hostname)
jobs = pbs_selectjob(c, attropl, extend)
err = self.geterrmsg()
if err:
raise PbsSelectError(rc=-1, rv=False, msg=err,
post=self._disconnect, conn=c)
self._disconnect(c)
return jobs
def selstat(self, select_list, rattrib, runas=None, extend=None):
"""
stat and filter jobs attributes.
:param select_list: The filter criteria
:type select: List
:param rattrib: The attributes to query
:type rattrib: List
:param runas: run as user
:type runas: str or None
.. note:: No ``CLI`` counterpart for this call
"""
attrl = self.utils.convert_to_attrl(rattrib)
attropl = self.utils.convert_to_attropl(select_list)
c = self._connect(self.hostname)
bs = pbs_selstat(c, attropl, attrl, extend)
self._disconnect(c)
return bs
def manager(self, cmd, obj_type, attrib=None, id=None, extend=None,
expect=False, max_attempts=None, level=logging.INFO,
sudo=None, runas=None, logerr=True):
"""
issue a management command to the server, e.g to set an
attribute
Returns the return code of ``qmgr/pbs_manager()`` on
success, if expect is set to True, the return value is
that of the call to expect.Raises ``PbsManagerError`` on
error
:param cmd: The command to issue,
``MGR_CMD_[SET,UNSET, LIST,...]`` see pbs_ifl.h
:type cmd: str
:param obj_type: The type of object to query, one of
the * objects
:param attrib: Attributes to operate on, can be a string, a
list,a dictionary
:type attrib: str or list or dictionary
:param id: The name or list of names of the object(s) to act
upon.
:type id: str or list
:param extend: Optional extension to the IFL call. see
pbs_ifl.h
:type extend: str or None
:param expect: If set to True, query the server expecting
the value to be\ accurately reflected.
Defaults to False
:type expect: bool
:param max_attempts: Sets a maximum number of attempts to
call expect with.
:type max_attempts: int
:param level: logging level
:param sudo: If True, run the manager command as super user.
Defaults to None. Some attribute settings
should be run with sudo set to True, those are
acl_roots, job_sort_formula, hook operations,
no_sched_hook_event, in those cases, setting
sudo to False is only needed for testing
purposes
:type sudo: bool
:param runas: run as user
:type runas: str
:param logerr: If False, CLI commands do not log error,
i.e. silent mode
:type logerr: bool
:raises: PbsManagerError
When expect is ``False``, return the value, ``0/!0``
returned by pbs_manager
When expect is ``True``, return the value, ``True/False``,
returned by expect
"""
if isinstance(id, str):
oid = id.split(',')
else:
oid = id
self.logit('manager on ' + self.shortname +
[' as ' + str(runas), ''][runas is None] + ': ' +
PBS_CMD_MAP[cmd] + ' ', obj_type, attrib, oid, level=level)
c = None # connection handle
if (self.get_op_mode() == PTL_CLI or
sudo is not None or
obj_type in (HOOK, PBS_HOOK) or
(attrib is not None and ('job_sort_formula' in attrib or
'acl_roots' in attrib or
'no_sched_hook_event' in attrib))):
execcmd = [PBS_CMD_MAP[cmd], PBS_OBJ_MAP[obj_type]]
if oid is not None:
if cmd == MGR_CMD_DELETE and obj_type == NODE and oid[0] == "":
oid[0] = "@default"
execcmd += [",".join(oid)]
if attrib is not None and cmd != MGR_CMD_LIST:
if cmd == MGR_CMD_IMPORT:
execcmd += [attrib['content-type'],
attrib['content-encoding'],
attrib['input-file']]
else:
if isinstance(attrib, (dict, OrderedDict)):
kvpairs = []
for k, v in attrib.items():
if isinstance(v, tuple):
if v[0] == INCR:
op = '+='
elif v[0] == DECR:
op = '-='
else:
msg = 'Invalid operation: %s' % (v[0])
raise PbsManagerError(rc=1, rv=False,
msg=msg)
v = v[1]
else:
op = '='
# handle string arrays as double quotes if
# not already set:
if isinstance(v, str) and ',' in v and v[0] != '"':
v = '"' + v + '"'
kvpairs += [str(k) + op + str(v)]
if kvpairs:
execcmd += [",".join(kvpairs)]
del kvpairs
elif isinstance(attrib, list):
execcmd += [",".join(attrib)]
elif isinstance(attrib, str):
execcmd += [attrib]
if not self.default_pbs_conf or not self.default_client_pbs_conf:
as_script = True
else:
as_script = False
if not self._is_local or as_script:
execcmd = '\'' + " ".join(execcmd) + '\''
else:
execcmd = " ".join(execcmd)
# Hooks can only be queried as a privileged user on the host where
# the server is running, care must be taken to use the appropriate
# path to qmgr and appropriate escaping sequences
# VERSION INFO: no_sched_hook_event introduced in 11.3.120 only
if sudo is None:
if (obj_type in (HOOK, PBS_HOOK) or
(attrib is not None and
('job_sort_formula' in attrib or
'acl_roots' in attrib or
'no_sched_hook_event' in attrib))):
sudo = True
else:
sudo = False
pcmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'bin', 'qmgr'),
'-c', execcmd]
if as_script:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
ret = self.du.run_cmd(self.hostname, pcmd, sudo=sudo, runas=runas,
level=logging.INFOCLI, as_script=as_script,
logerr=logerr)
rc = ret['rc']
# NOTE: workaround the fact that qmgr overloads the return code in
# cases where the list returned is empty an error flag is set even
# through there is no error. Handled here by checking if there is
# no err and out message, in which case return code is set to 0
if rc != 0 and (ret['out'] == [''] and ret['err'] == ['']):
rc = 0
if rc == 0:
if cmd == MGR_CMD_LIST:
bsl = self.utils.convert_to_dictlist(ret['out'], attrib,
mergelines=False)
self.update_attributes(obj_type, bsl)
else:
# Need to rework setting error, this is not thread safe
self.last_error = ret['err']
self.last_rc = ret['rc']
elif runas is not None:
_data = {'cmd': cmd, 'obj_type': obj_type, 'attrib': attrib,
'id': oid}
rc = self.pbs_api_as('manager', user=runas, data=_data,
extend=extend)
else:
a = self.utils.convert_to_attropl(attrib, cmd)
c = self._connect(self.hostname)
rc = 0
if obj_type == SERVER and oid is None:
oid = [self.hostname]
if oid is None:
# server will run strlen on id, it can not be NULL
oid = ['']
if cmd == MGR_CMD_LIST:
if oid is None:
bsl = self.status(obj_type, attrib, oid, extend)
else:
bsl = None
for i in oid:
tmpbsl = self.status(obj_type, attrib, i, extend)
if tmpbsl is None:
rc = 1
else:
if bsl is None:
bsl = tmpbsl
else:
bsl += tmpbsl
else:
rc = 0
if oid is None:
rc = pbs_manager(c, cmd, obj_type, i, a, extend)
else:
for i in oid:
tmprc = pbs_manager(c, cmd, obj_type, i, a, extend)
if tmprc != 0:
rc = tmprc
break
if rc == 0:
rc = tmprc
if cmd == MGR_CMD_DELETE and oid is not None:
for i in oid:
if obj_type == MGR_OBJ_HOOK and i in self.hooks:
del self.hooks[i]
if obj_type in (NODE, VNODE) and i in self.nodes:
del self.nodes[i]
if obj_type == MGR_OBJ_QUEUE and i in self.queues:
del self.queues[i]
if obj_type == MGR_OBJ_RSC and i in self.resources:
del self.resources[i]
if rc != 0:
raise PbsManagerError(rv=False, rc=rc, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c is not None:
self._disconnect(c)
if expect:
offset = None
if obj_type in (NODE, HOST):
obj_type = VNODE
if obj_type in (VNODE, QUEUE):
offset = 0.5
if cmd in PBS_CMD_TO_OP:
op = PBS_CMD_TO_OP[cmd]
else:
op = EQ
if oid is None:
return self.expect(obj_type, attrib, oid, op=op,
max_attempts=max_attempts, offset=offset)
for i in oid:
rc = self.expect(obj_type, attrib, i, op=op,
max_attempts=max_attempts, offset=offset)
if not rc:
break
return rc
def sigjob(self, jobid=None, signal=None, extend=None, runas=None,
logerr=True):
"""
Send a signal to a job. Raises ``PbsSignalError`` on error.
:param jobid: identifier of the job or list of jobs to send
the signal to
:type jobid: str or list
:param signal: The signal to send to the job, see pbs_ifl.h
:type signal: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsSignalError
"""
prefix = 'signal on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if signal is not None:
prefix += ' with signal = ' + str(signal)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qsig')]
if signal is not None:
pcmd += ['-s']
if signal != PTL_NOARG:
pcmd += [str(signal)]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('sigjob', jobid, runas, data=signal)
else:
c = self._connect(self.hostname)
rc = 0
for ajob in jobid:
tmp_rc = pbs_sigjob(c, ajob, signal, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsSignalError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def msgjob(self, jobid=None, to_file=None, msg=None, extend=None,
runas=None, logerr=True):
"""
Send a message to a job. Raises ``PbsMessageError`` on
error.
:param jobid: identifier of the job or list of jobs to
send the message to
:type jobid: str or List
:param msg: The message to send to the job
:type msg: str or None
:param to_file: one of ``MSG_ERR`` or ``MSG_OUT`` or
``MSG_ERR|MSG_OUT``
:type to_file: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsMessageError
"""
prefix = 'msgjob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if to_file is not None:
prefix += ' with to_file = '
if MSG_ERR == to_file:
prefix += 'MSG_ERR'
elif MSG_OUT == to_file:
prefix += 'MSG_OUT'
elif MSG_OUT | MSG_ERR == to_file:
prefix += 'MSG_ERR|MSG_OUT'
else:
prefix += str(to_file)
if msg is not None:
prefix += ' msg = %s' % (str(msg))
if extend is not None:
prefix += ' extend = %s' % (str(extend))
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qmsg')]
if to_file is not None:
if MSG_ERR == to_file:
pcmd += ['-E']
elif MSG_OUT == to_file:
pcmd += ['-O']
elif MSG_OUT | MSG_ERR == to_file:
pcmd += ['-E', '-O']
else:
pcmd += ['-' + str(to_file)]
if msg is not None:
pcmd += [msg]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
data = {'msg': msg, 'to_file': to_file}
rc = self.pbs_api_as('msgjob', jobid, runas, data=data,
extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
for ajob in jobid:
tmp_rc = pbs_msgjob(c, ajob, to_file, msg, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsMessageError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def alterjob(self, jobid=None, attrib=None, extend=None, runas=None,
logerr=True):
"""
Alter attributes associated to a job. Raises
``PbsAlterError`` on error.
:param jobid: identifier of the job or list of jobs to
operate on
:type jobid: str or list
:param attrib: A dictionary of attributes to set
:type attrib: dictionary
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If False, CLI commands do not log error,
i.e. silent mode
:type logerr: bool
:raises: PbsAlterError
"""
prefix = 'alter on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if attrib is not None:
prefix += ' %s' % (str(attrib))
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qalter')]
if attrib is not None:
_conf = self.default_client_pbs_conf
pcmd += self.utils.convert_to_cli(attrib, op=IFL_ALTER,
hostname=self.client,
dflt_conf=_conf)
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('alterjob', jobid, runas, data=attrib)
else:
c = self._connect(self.hostname)
if c < 0:
return c
a = self.utils.convert_to_attrl(attrib)
rc = 0
for ajob in jobid:
tmp_rc = pbs_alterjob(c, ajob, a, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsAlterError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def holdjob(self, jobid=None, holdtype=None, extend=None, runas=None,
logerr=True):
"""
Hold a job. Raises ``PbsHoldError`` on error.
:param jobid: identifier of the job or list of jobs to hold
:type jobid: str or list
:param holdtype: The type of hold to put on the job
:type holdtype: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsHoldError
"""
prefix = 'holdjob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if holdtype is not None:
prefix += ' with hold_list = %s' % (holdtype)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qhold')]
if holdtype is not None:
pcmd += ['-h']
if holdtype != PTL_NOARG:
pcmd += [holdtype]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
logerr=logerr, as_script=as_script,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('holdjob', jobid, runas, data=holdtype,
logerr=logerr)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
tmp_rc = pbs_holdjob(c, ajob, holdtype, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsHoldError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def rlsjob(self, jobid, holdtype, extend=None, runas=None, logerr=True):
"""
Release a job. Raises ``PbsReleaseError`` on error.
:param jobid: job or list of jobs to release
:type jobid: str or list
:param holdtype: The type of hold to release on the job
:type holdtype: str
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsReleaseError
"""
prefix = 'release on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if holdtype is not None:
prefix += ' with hold_list = %s' % (holdtype)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qrls')]
if holdtype is not None:
pcmd += ['-h']
if holdtype != PTL_NOARG:
pcmd += [holdtype]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('rlsjob', jobid, runas, data=holdtype)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
tmp_rc = pbs_rlsjob(c, ajob, holdtype, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsHoldError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def rerunjob(self, jobid=None, extend=None, runas=None, logerr=True):
"""
Rerun a job. Raises ``PbsRerunError`` on error.
:param jobid: job or list of jobs to release
:type jobid: str or list
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsRerunError
"""
prefix = 'rerun on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if extend is not None:
prefix += extend
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qrerun')]
if extend:
pcmd += ['-W', extend]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('rerunjob', jobid, runas, extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
tmp_rc = pbs_rerunjob(c, ajob, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsRerunError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def orderjob(self, jobid1=None, jobid2=None, extend=None, runas=None,
logerr=True):
"""
reorder position of ``jobid1`` and ``jobid2``. Raises
``PbsOrderJob`` on error.
:param jobid1: first jobid
:type jobid1: str or None
:param jobid2: second jobid
:type jobid2: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsOrderJob
"""
prefix = 'orderjob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
prefix += str(jobid1) + ', ' + str(jobid2)
if extend is not None:
prefix += ' ' + str(extend)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qorder')]
if jobid1 is not None:
pcmd += [jobid1]
if jobid2 is not None:
pcmd += [jobid2]
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('orderjob', jobid1, runas, data=jobid2,
extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = pbs_orderjob(c, jobid1, jobid2, extend)
if rc != 0:
raise PbsOrderError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def runjob(self, jobid=None, location=None, async=False, extend=None,
runas=None, logerr=False):
"""
Run a job on given nodes. Raises ``PbsRunError`` on error.
:param jobid: job or list of jobs to run
:type jobid: str or list
:param location: An execvnode on which to run the job
:type location: str or None
:param async: If true the call will return immediately
assuming success.
:type async: bool
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsRunError
"""
if async:
prefix = 'Async run on ' + self.shortname
else:
prefix = 'run on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if location is not None:
prefix += ' with location = %s' % (location)
self.logger.info(prefix)
if self.has_diag:
return 0
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qrun')]
if async:
pcmd += ['-a']
if location is not None:
pcmd += ['-H']
if location != PTL_NOARG:
pcmd += [location]
if jobid:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as(
'runjob', jobid, runas, data=location, extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
if async:
tmp_rc = pbs_asyrunjob(c, ajob, location, extend)
else:
tmp_rc = pbs_runjob(c, ajob, location, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsRunError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def movejob(self, jobid=None, destination=None, extend=None, runas=None,
logerr=True):
"""
Move a job or list of job ids to a given destination queue.
Raises ``PbsMoveError`` on error.
:param jobid: A job or list of job ids to move
:type jobid: str or list
:param destination: The destination queue@server
:type destination: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsMoveError
"""
prefix = 'movejob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if destination is not None:
prefix += ' destination = %s' % (destination)
self.logger.info(prefix)
c = None
rc = 0
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qmove')]
if destination is not None:
pcmd += [destination]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
logerr=logerr, as_script=as_script,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('movejob', jobid, runas, data=destination,
extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
for ajob in jobid:
tmp_rc = pbs_movejob(c, ajob, destination, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsMoveError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def qterm(self, manner=None, extend=None, server_name=None, runas=None,
logerr=True):
"""
Terminate the ``pbs_server`` daemon
:param manner: one of ``(SHUT_IMMEDIATE | SHUT_DELAY |
SHUT_QUICK)`` and can be\
combined with SHUT_WHO_SCHED, SHUT_WHO_MOM,
SHUT_WHO_SECDRY, \
SHUT_WHO_IDLESECDRY, SHUT_WHO_SECDONLY. \
:param extend: extend options
:param server_name: name of the pbs server
:type server_name: str or None
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsQtermError
"""
prefix = 'terminate ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': with manner '
attrs = manner
if attrs is None:
prefix += "None "
elif isinstance(attrs, str):
prefix += attrs
else:
if ((attrs & SHUT_QUICK) == SHUT_QUICK):
prefix += "quick "
if ((attrs & SHUT_IMMEDIATE) == SHUT_IMMEDIATE):
prefix += "immediate "
if ((attrs & SHUT_DELAY) == SHUT_DELAY):
prefix += "delay "
if ((attrs & SHUT_WHO_SCHED) == SHUT_WHO_SCHED):
prefix += "schedular "
if ((attrs & SHUT_WHO_MOM) == SHUT_WHO_MOM):
prefix += "mom "
if ((attrs & SHUT_WHO_SECDRY) == SHUT_WHO_SECDRY):
prefix += "secondary server "
if ((attrs & SHUT_WHO_IDLESECDRY) == SHUT_WHO_IDLESECDRY):
prefix += "idle secondary "
if ((attrs & SHUT_WHO_SECDONLY) == SHUT_WHO_SECDONLY):
prefix += "shoutdown secondary only "
self.logger.info(prefix)
if self.has_diag:
return 0
c = None
rc = 0
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qterm')]
_conf = self.default_client_pbs_conf
pcmd += self.utils.convert_to_cli(manner, op=IFL_TERMINATE,
hostname=self.hostname,
dflt_conf=_conf)
if server_name is not None:
pcmd += [server_name]
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
level=logging.INFOCLI, as_script=as_script)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
attrs = {'manner': manner, 'server_name': server_name}
rc = self.pbs_api_as('terminate', None, runas, data=attrs,
extend=extend)
else:
if server_name is None:
server_name = self.hostname
c = self._connect(self.hostname)
rc = pbs_terminate(c, manner, extend)
if rc != 0:
raise PbsQtermError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c, force=True)
if c:
self._disconnect(c, force=True)
return rc
teminate = qterm
def geterrmsg(self):
"""
Get the error message
"""
mode = self.get_op_mode()
if mode == PTL_CLI:
return self.last_error
elif self._conn is not None and self._conn >= 0:
m = pbs_geterrmsg(self._conn)
if m is not None:
m = m.split('\n')
return m
#
# End IFL Wrappers
#
def qdisable(self, queue=None, runas=None, logerr=True):
"""
Disable queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue to
disable
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command
errors.Defaults to True.
:type logerr: bool
:raises: PbsQdisableError
"""
prefix = 'qdisable on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qdisable')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQdisableError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qdisable: currently not supported in API mode'
raise PbsQdisableError(rv=False, rc=1, msg=_msg)
def qenable(self, queue=None, runas=None, logerr=True):
"""
Enable queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue to
enable
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command
errors.Defaults to True.
:type logerr: bool
:raises: PbsQenableError
"""
prefix = 'qenable on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qenable')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQenableError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qenable: currently not supported in API mode'
raise PbsQenableError(rv=False, rc=1, msg=_msg)
def qstart(self, queue=None, runas=None, logerr=True):
"""
Start queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue
to start
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command
errors.Defaults to True.
:type logerr: bool
:raises: PbsQstartError
"""
prefix = 'qstart on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qstart')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQstartError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qstart: currently not supported in API mode'
raise PbsQstartError(rv=False, rc=1, msg=_msg)
def qstop(self, queue=None, runas=None, logerr=True):
"""
Stop queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue to stop
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command errors.
Defaults to True.
:type logerr: bool
:raises: PbsQstopError
"""
prefix = 'qstop on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qstop')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQstopError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qstop: currently not supported in API mode'
raise PbsQstopError(rv=False, rc=1, msg=_msg)
def parse_resources(self):
"""
Parse server resources as defined in the resourcedef file
Populates instance variable self.resources
:returns: The resources as a dictionary
"""
if not self.has_diag:
self.manager(MGR_CMD_LIST, RSC)
return self.resources
def remove_resource(self, name):
"""
Remove an entry from resourcedef
:param name: The name of the resource to remove
:type name: str
:param restart: Whether to restart the server or not.
Applicable to update_mode 'file'
operations only.
:param update_mode: one of 'file' or 'auto' (the default).
If 'file', updates the resourcedef file
only and will not use the qmgr
operations on resources introduced in
12.3. If 'auto', will automatically
handle the update on resourcedef or
using qmgr based on the version of the
Server.
"""
self.parse_resources()
if not self.has_diag:
if name in self.resources:
self.manager(MGR_CMD_DELETE, RSC, id=name)
def add_resource(self, name, type=None, flag=None):
"""
Define a server resource
:param name: The name of the resource to add to the
resourcedef file
:type name: str
:param type: The type of the resource, one of string,
long, boolean, float
:param flag: The target of the resource, one of n, h, q,
or none
:type flag: str or None
:param restart: Whether to restart the server after adding
a resource.Applicable to update_mode 'file'
operations only.
:param update_mode: one of 'file' or 'auto' (the default).
If 'file', updates the resourcedef file
only and will not use the qmgr
operations on resources introduced in
12.3. If 'auto', will automatically
handle the update on resourcedef or
using qmgr based on the version of the
Server.
:returns: True on success False on error
"""
rv = self.parse_resources()
if rv is None:
return False
resource_exists = False
if name in self.resources:
msg = [self.logprefix + "resource " + name]
if type:
msg += ["type: " + type]
if flag:
msg += ["flag: " + flag]
msg += [" already defined"]
self.logger.info(" ".join(msg))
(t, f) = (self.resources[name].type, self.resources[name].flag)
if type == t and flag == f:
return True
self.logger.info("resource: redefining resource " + name +
" type: " + str(type) + " and flag: " + str(flag))
del self.resources[name]
resource_exists = True
r = Resource(name, type, flag)
self.resources[name] = r
a = {}
if type:
a['type'] = type
if flag:
a['flag'] = flag
if resource_exists:
self.manager(MGR_CMD_SET, RSC, a, id=name)
else:
self.manager(MGR_CMD_CREATE, RSC, a, id=name)
return True
def write_resourcedef(self, resources=None, filename=None, restart=True):
"""
Write into resource def file
:param resources: PBS resources
:type resources: dictionary
:param filename: resourcedef file name
:type filename: str or None
"""
if resources is None:
resources = self.resources
if isinstance(resources, Resource):
resources = {resources.name: resources}
fn = self.du.mkstemp()[1]
f = open(fn, 'w+')
for r in resources.values():
f.write(r.attributes['id'])
if r.attributes['type'] is not None:
f.write(' type=' + r.attributes['type'])
if r.attributes['flag'] is not None:
f.write(' flag=' + r.attributes['flag'])
f.write('\n')
f.close()
if filename is None:
dest = os.path.join(self.pbs_conf['PBS_HOME'], 'server_priv',
'resourcedef')
else:
dest = filename
self.du.run_copy(self.hostname, fn, dest, mode=0644, sudo=True)
if filename is None:
self.du.chown(self.hostname, path=dest, uid=0, gid=0,
sudo=True)
os.remove(fn)
if restart:
return self.restart()
return True
def parse_resourcedef(self, file=None):
"""
Parse an arbitrary resource definition file passed as
input and return a dictionary of resources
:param file: resource definition file
:type file: str or None
:returns: Dictionary of resource
:raises: PbsResourceError
"""
if file is None:
file = os.path.join(self.pbs_conf['PBS_HOME'], 'server_priv',
'resourcedef')
ret = self.du.cat(self.hostname, file, logerr=False, sudo=True)
if ret['rc'] != 0 or len(ret['out']) == 0:
# Most probable error is that file does not exist, we'll let it
# be created
return {}
resources = {}
lines = ret['out']
try:
for l in lines:
l = l.strip()
if l == '' or l.startswith('#'):
continue
name = None
rtype = None
flag = None
res = l.split()
e0 = res[0]
if len(res) > 1:
e1 = res[1].split('=')
else:
e1 = None
if len(res) > 2:
e2 = res[2].split('=')
else:
e2 = None
if e1 is not None and e1[0] == 'type':
rtype = e1[1]
elif e2 is not None and e2[0] == 'type':
rtype = e2[1]
if e1 is not None and e1[0] == 'flag':
flag = e1[0]
elif e2 is not None and e2[0] == 'flag':
flag = e2[1]
name = e0
r = Resource(name, rtype, flag)
resources[name] = r
except:
raise PbsResourceError(rc=1, rv=False,
msg="error in parse_resources")
return resources
def pbs_api_as(self, cmd=None, obj=None, user=None, **kwargs):
"""
Generic handler to run an ``API`` call impersonating
a given user.This method is only used for impersonation
over the ``API`` because ``CLI`` impersonation takes place
through the generic ``DshUtils`` run_cmd mechanism.
:param cmd: PBS command
:type cmd: str or None
:param user: PBS user or current user
:type user: str or None
:raises: eval
"""
fn = None
objid = None
_data = None
if user is None:
user = self.du.get_current_user()
else:
# user may be a PbsUser object, cast it to string for the remainder
# of the function
user = str(user)
if cmd == 'submit':
if obj is None:
return None
_data = copy.copy(obj)
# the following attributes cause problems 'pickling',
# since they are not needed we unset them
_data.attrl = None
_data.attropl = None
_data.logger = None
_data.utils = None
elif cmd in ('alterjob', 'holdjob', 'sigjob', 'msgjob', 'rlsjob',
'rerunjob', 'orderjob', 'runjob', 'movejob',
'select', 'delete', 'status', 'manager', 'terminate',
'deljob', 'delresv'):
objid = obj
if 'data' in kwargs:
_data = kwargs['data']
if _data is not None:
(fd, fn) = self.du.mkstemp()
tmpfile = open(fn, 'w+b')
cPickle.dump(_data, tmpfile)
tmpfile.close()
os.close(fd)
os.chmod(fn, 0755)
if self._is_local:
os.chdir(tempfile.gettempdir())
else:
self.du.run_copy(self.hostname, fn, fn, sudo=True)
if not self._is_local:
p_env = '"import os; print os.environ[\'PTL_EXEC\']"'
ret = self.du.run_cmd(self.hostname, ['python', '-c', p_env],
logerr=False)
if ret['out']:
runcmd = [os.path.join(ret['out'][0], 'pbs_as')]
else:
runcmd = ['pbs_as']
elif 'PTL_EXEC' in os.environ:
runcmd = [os.path.join(os.environ['PTL_EXEC'], 'pbs_as')]
else:
runcmd = ['pbs_as']
runcmd += ['-c', cmd, '-u', user]
if objid is not None:
runcmd += ['-o']
if isinstance(objid, list):
runcmd += [','.join(objid)]
else:
runcmd += [objid]
if fn is not None:
runcmd += ['-f', fn]
if 'hostname' in kwargs:
hostname = kwargs['hostname']
else:
hostname = self.hostname
runcmd += ['-s', hostname]
if 'extend' in kwargs and kwargs['extend'] is not None:
runcmd += ['-e', kwargs['extend']]
ret = self.du.run_cmd(self.hostname, runcmd, logerr=False, runas=user)
out = ret['out']
if ret['err']:
if cmd in CMD_ERROR_MAP:
m = CMD_ERROR_MAP[cmd]
if m in ret['err'][0]:
if fn is not None:
os.remove(fn)
if not self._is_local:
self.du.rm(self.hostname, fn)
raise eval(str(ret['err'][0]))
self.logger.debug('err: ' + str(ret['err']))
if fn is not None:
os.remove(fn)
if not self._is_local:
self.du.rm(self.hostname, fn)
if cmd == 'submit':
if out:
return out[0].strip()
else:
return None
elif cmd in ('alterjob', 'holdjob', 'sigjob', 'msgjob', 'rlsjob',
'rerunjob', 'orderjob', 'runjob', 'movejob', 'delete',
'terminate'):
if ret['out']:
return int(ret['out'][0])
else:
return 1
elif cmd in ('manager', 'select', 'status'):
return eval(out[0])
def expect(self, obj_type, attrib=None, id=None, op=EQ, attrop=PTL_OR,
attempt=0, max_attempts=None, interval=None, count=None,
extend=None, offset=0, runas=None, level=logging.INFO,
msg=None):
"""
expect an attribute to match a given value as per an
operation.
:param obj_type: The type of object to query, JOB, SERVER,
SCHEDULER, QUEUE NODE
:type obj_type: str
:param attrib: Attributes to query, can be a string, a list,
or a dict
:type attrib: str or list or dictionary
:param id: The id of the object to act upon
:param op: An operation to perform on the queried data,
e.g., EQ, SET, LT,..
:param attrop: Operation on multiple attributes, either
PTL_AND, PTL_OR when an PTL_AND is used, only
batch objects having all matches are
returned, otherwise an OR is applied
:param attempt: The number of times this function has been
called
:type attempt: int
:param max_attempts: The maximum number of attempts to
perform.C{param_max_attempts}: 5
:type max_attempts: int or None
:param interval: The interval time btween attempts.
C{param_interval}: 1s
:param count: If True, attrib will be accumulated using
function counter
:type count: bool
:param extend: passed to the stat call
:param offset: the time to wait before the initial check.
Defaults to 0.
:type offset: int
:param runas: query as a given user. Defaults to current
user
:type runas: str or None
:param msg: Message from last call of this function, this
message will be used while raising
PtlExpectError.
:type msg: str or None
:returns: True if attributes are as expected and False
otherwise
"""
if attempt == 0 and offset > 0:
self.logger.log(level, self.logprefix + 'expect offset set to ' +
str(offset))
time.sleep(offset)
if attrib is None:
attrib = {}
if ATTR_version in attrib and max_attempts is None:
max_attempts = 3
if max_attempts is None:
max_attempts = int(self.ptl_conf['expect_max_attempts'])
if interval is None:
interval = self.ptl_conf['expect_interval']
if attempt >= max_attempts:
_msg = "expected on " + self.logprefix + msg
raise PtlExpectError(rc=1, rv=False, msg=_msg)
if obj_type == SERVER and id is None:
id = self.hostname
if isinstance(attrib, str):
attrib = {attrib: ''}
elif isinstance(attrib, list):
d = {}
for l in attrib:
d[l] = ''
attrib = d
# Add check for substate=42 for jobstate=R, if not added explicitly.
if obj_type == JOB:
add_attribs = {'substate': False}
substate = False
for k, v in attrib.items():
if k == 'job_state' and ((isinstance(v, tuple) and
'R' in v[-1]) or v == 'R'):
add_attribs['substate'] = 42
elif k == 'job_state=R':
add_attribs['substate=42'] = v
elif 'substate' in k:
substate = True
if add_attribs['substate'] and not substate:
attrib['substate'] = add_attribs['substate']
attrop = PTL_AND
del add_attribs, substate
prefix = 'expect on ' + self.logprefix
msg = []
for k, v in attrib.items():
args = None
if isinstance(v, tuple):
operator = v[0]
if len(v) > 2:
args = v[2:]
val = v[1]
else:
operator = op
val = v
msg += [k, PTL_OP_TO_STR[operator].strip()]
if callable(val):
msg += ['callable(' + val.__name__ + ')']
if args is not None:
msg.extend(map(lambda x: str(x), args))
else:
msg += [str(val)]
msg += [PTL_ATTROP_TO_STR[attrop]]
# remove the last converted PTL_ATTROP_TO_STR
if len(msg) > 1:
msg = msg[:-1]
if len(attrib) == 0:
msg += [PTL_OP_TO_STR[op]]
msg += [PBS_OBJ_MAP[obj_type]]
if id is not None:
msg += [str(id)]
if attempt > 0:
msg += ['attempt:', str(attempt + 1)]
# Default count to True if the attribute contains an '=' in its name
# for example 'job_state=R' implies that a count of job_state is needed
if count is None and self.utils.operator_in_attribute(attrib):
count = True
if count:
newattr = self.utils.convert_attributes_by_op(attrib)
if len(newattr) == 0:
newattr = attrib
statlist = [self.counter(obj_type, newattr, id, extend, op=op,
attrop=attrop, level=logging.DEBUG,
runas=runas)]
else:
try:
statlist = self.status(obj_type, attrib, id=id,
level=logging.DEBUG, extend=extend,
runas=runas, logerr=False)
except PbsStatusError:
statlist = []
if (len(statlist) == 0 or statlist[0] is None or
len(statlist[0]) == 0):
if op == UNSET or list(set(attrib.values())) == [0]:
self.logger.log(level, prefix + " ".join(msg) + ' ... OK')
return True
else:
time.sleep(interval)
msg = " no data for " + " ".join(msg)
self.logger.log(level, prefix + msg)
return self.expect(obj_type, attrib, id, op, attrop,
attempt + 1, max_attempts, interval, count,
extend, level=level, msg=msg)
if attrib is None:
time.sleep(interval)
return self.expect(obj_type, attrib, id, op, attrop, attempt + 1,
max_attempts, interval, count, extend,
runas=runas, level=level, msg=" ".join(msg))
for k, v in attrib.items():
varargs = None
if isinstance(v, tuple):
op = v[0]
if len(v) > 2:
varargs = v[2:]
v = v[1]
for stat in statlist:
if k == ATTR_version and k in stat:
m = self.version_tag.match(stat[k])
if m:
stat[k] = m.group('version')
else:
time.sleep(interval)
return self.expect(obj_type, attrib, id, op, attrop,
attempt + 1, max_attempts, interval,
count, extend, runas=runas,
level=level, msg=" ".join(msg))
if k not in stat:
if op == UNSET:
continue
else:
# functions/methods are invoked and their return value
# used on expect
if callable(v):
if varargs is not None:
rv = v(stat[k], *varargs)
else:
rv = v(stat[k])
if isinstance(rv, bool):
if op == NOT:
if not rv:
continue
if rv:
continue
else:
v = rv
stat[k] = self.utils.decode_value(stat[k])
v = self.utils.decode_value(v)
if k == ATTR_version:
stat[k] = LooseVersion(str(stat[k]))
v = LooseVersion(str(v))
if op == EQ and stat[k] == v:
continue
elif op == SET and count and stat[k] == v:
continue
elif op == SET and count in (False, None):
continue
elif op == NE and stat[k] != v:
continue
elif op == LT:
if stat[k] < v:
continue
elif op == GT:
if stat[k] > v:
continue
elif op == LE:
if stat[k] <= v:
continue
elif op == GE:
if stat[k] >= v:
continue
elif op == MATCH_RE:
if re.search(str(v), str(stat[k])):
continue
elif op == MATCH:
if str(stat[k]).find(str(v)) != -1:
continue
if k in stat:
msg += [' got: ' + str(k) + ' = ' + str(stat[k])]
self.logger.info(prefix + " ".join(msg))
time.sleep(interval)
# run custom actions defined for this object type
if self.actions:
for act_obj in self.actions.get_actions_by_type(obj_type):
if act_obj.enabled:
act_obj.action(self, obj_type, attrib, id, op,
attrop)
return self.expect(obj_type, attrib, id, op, attrop,
attempt + 1, max_attempts, interval, count,
extend, level=level, msg=" ".join(msg))
self.logger.log(level, prefix + " ".join(msg) + ' ... OK')
return True
def is_history_enabled(self):
"""
Short-hand method to return the value of job_history_enable
"""
a = ATTR_JobHistoryEnable
attrs = self.status(SERVER, level=logging.DEBUG)[0]
if ((a in attrs.keys()) and attrs[a] == 'True'):
return True
return False
def cleanup_jobs(self, extend=None, runas=None):
"""
Helper function to delete all jobs.
By default this method will determine whether
job_history_enable is on and will cleanup all history
jobs. Specifying an extend parameter could override
this behavior.
:param runas: Clean the job as
:type runas: str or None
"""
delete_xt = 'force'
select_xt = None
if self.is_history_enabled():
delete_xt += 'deletehist'
select_xt = 'x'
job_ids = self.select(extend=select_xt)
if len(job_ids) > 0:
try:
self.deljob(id=job_ids, extend=delete_xt, runas=runas,
wait=True)
except:
pass
rv = self.expect(JOB, {'job_state': 0}, count=True, op=SET)
if not rv:
return self.cleanup_jobs(extend=extend, runas=runas)
return rv
def cleanup_reservations(self, extend=None, runas=None):
"""
Helper function to delete all reservations
"""
reservations = self.status(RESV, level=logging.DEBUG)
while reservations is not None and len(reservations) != 0:
resvs = [r['id'] for r in reservations]
if len(resvs) > 0:
try:
self.delresv(resvs, logerr=False, runas=runas)
except:
pass
reservations = self.status(RESV, level=logging.DEBUG)
def cleanup_jobs_and_reservations(self, extend='forcedeletehist'):
"""
Helper function to delete all jobs and reservations
:param extend: Optional extend parameter that is passed
to delete. It defaults to 'deletehist' which
is used in qdel and pbs_deljob() to force
delete all jobs, including history jobs
:param extend: str
"""
rv = self.cleanup_jobs(extend)
self.cleanup_reservations()
return rv
def update_attributes(self, obj_type, bs):
"""
Populate objects from batch status data
"""
if bs is None:
return
for binfo in bs:
if 'id' not in binfo:
continue
id = binfo['id']
obj = None
if obj_type == JOB:
if ATTR_owner in binfo:
user = binfo[ATTR_owner].split('@')[0]
else:
user = None
if id in self.jobs:
self.jobs[id].attributes.update(binfo)
if self.jobs[id].username != user:
self.jobs[id].username = user
else:
self.jobs[id] = Job(user, binfo)
obj = self.jobs[id]
elif obj_type in (VNODE, NODE):
if id in self.nodes:
self.nodes[id].attributes.update(binfo)
else:
self.nodes[id] = MoM(id, binfo, diagmap={NODE: None},
server=self)
obj = self.nodes[id]
elif obj_type == SERVER:
self.attributes.update(binfo)
obj = self
elif obj_type == QUEUE:
if id in self.queues:
self.queues[id].attributes.update(binfo)
else:
self.queues[id] = Queue(id, binfo, server=self)
obj = self.queues[id]
elif obj_type == RESV:
if id in self.reservations:
self.reservations[id].attributes.update(binfo)
else:
self.reservations[id] = Reservation(id, binfo)
obj = self.reservations[id]
elif obj_type == HOOK:
if id in self.hooks:
self.hooks[id].attributes.update(binfo)
else:
self.hooks[id] = Hook(id, binfo, server=self)
obj = self.hooks[id]
elif obj_type == SCHED:
if self.scheduler:
self.scheduler.attributes.update(binfo)
else:
if SCHED in self.diagmap:
diag = self.diag
diagmap = self.diagmap
else:
diag = None
diagmap = None
self.scheduler = Scheduler(server=self, diag=diag,
diagmap=diagmap)
self.scheduler.attributes.update(binfo)
obj = self.scheduler
elif obj_type == RSC:
if id in self.resources:
self.resources[id].attributes.update(binfo)
else:
rtype = None
rflag = None
if 'type' in binfo:
rtype = binfo['type']
if 'flag' in binfo:
rflag = binfo['flag']
self.resources[id] = Resource(id, rtype, rflag)
if obj is not None:
self.utils.update_attributes_list(obj)
obj.__dict__.update(binfo)
def counter(self, obj_type=None, attrib=None, id=None, extend=None,
op=None, attrop=None, bslist=None, level=logging.INFO,
idonly=True, grandtotal=False, db_access=None, runas=None,
resolve_indirectness=False):
"""
Accumulate properties set on an object. For example, to
count number of free nodes:
``server.counter(VNODE,{'state':'free'})``
:param obj_type: The type of object to query, one of the
* objects
:param attrib: Attributes to query, can be a string, a
list, a dictionary
:type attrib: str or list or dictionary
:param id: The id of the object to act upon
:param extend: The extended parameter to pass to the stat
call
:param op: The operation used to match attrib to what is
queried. SET or None
:type op: str or None
:param attrop: Operation on multiple attributes, either
PTL_AND, PTL_OR
:param bslist: Optional, use a batch status dict list
instead of an obj_type
:param idonly: if true, return the name/id of the matching
objects
:type idonly: bool
:param db_access: credentials to access db, either a path
to file or dictionary
:type db_access: str or dictionary
:param runas: run as user
:type runas: str or None
"""
self.logit('counter: ', obj_type, attrib, id, level=level)
return self._filter(obj_type, attrib, id, extend, op, attrop, bslist,
PTL_COUNTER, idonly, grandtotal, db_access,
runas=runas,
resolve_indirectness=resolve_indirectness)
def filter(self, obj_type=None, attrib=None, id=None, extend=None, op=None,
attrop=None, bslist=None, idonly=True, grandtotal=False,
db_access=None, runas=None, resolve_indirectness=False):
"""
Filter objects by properties. For example, to filter all
free nodes:``server.filter(VNODE,{'state':'free'})``
For each attribute queried, if idonly is True, a list of
matching object names is returned; if idonly is False, then
the value of each attribute queried is returned.
This is unlike Python's built-in 'filter' that returns a
subset of objects matching from a pool of objects. The
Python filtering mechanism remains very useful in some
situations and should be used programmatically to achieve
desired filtering goals that can not be met easily with
PTL's filter method.
:param obj_type: The type of object to query, one of the
* objects
:param attrib: Attributes to query, can be a string, a
list, a dictionary
:type attrib: str or list or dictionary
:param id: The id of the object to act upon
:param extend: The extended parameter to pass to the stat
call
:param op: The operation used to match attrib to what is
queried. SET or None
:type op: str or None
:param bslist: Optional, use a batch status dict list
instead of an obj_type
:type bslist: List or None
:param idonly: if true, return the name/id of the matching
objects
:type idonly: bool
:param db_access: credentials to access db, either path to
file or dictionary
:type db_access: str or dictionary
:param runas: run as user
:type runas: str or None
"""
self.logit('filter: ', obj_type, attrib, id)
return self._filter(obj_type, attrib, id, extend, op, attrop, bslist,
PTL_FILTER, idonly, db_access, runas=runas,
resolve_indirectness=resolve_indirectness)
def _filter(self, obj_type=None, attrib=None, id=None, extend=None,
op=None, attrop=None, bslist=None, mode=PTL_COUNTER,
idonly=True, grandtotal=False, db_access=None, runas=None,
resolve_indirectness=False):
if bslist is None:
try:
_a = resolve_indirectness
tmp_bsl = self.status(obj_type, attrib, id,
level=logging.DEBUG, extend=extend,
db_access=db_access, runas=runas,
resolve_indirectness=_a)
del _a
except PbsStatusError:
return None
bslist = self.utils.filter_batch_status(tmp_bsl, attrib)
del tmp_bsl
if bslist is None:
return None
if isinstance(attrib, str):
attrib = attrib.split(',')
total = {}
for bs in bslist:
if isinstance(attrib, list):
# when filtering on multiple values, ensure that they are
# all present on the object, otherwise skip
if attrop == PTL_AND:
match = True
for k in attrib:
if k not in bs:
match = False
if not match:
continue
for a in attrib:
if a in bs:
if op == SET:
k = a
else:
# Since this is a list of attributes, no operator
# was provided so we settle on "equal"
k = a + '=' + str(bs[a])
if mode == PTL_COUNTER:
amt = 1
if grandtotal:
amt = self.utils.decode_value(bs[a])
if not isinstance(amt, (int, float)):
amt = 1
if a in total:
total[a] += amt
else:
total[a] = amt
else:
if k in total:
total[k] += amt
else:
total[k] = amt
elif mode == PTL_FILTER:
if k in total:
if idonly:
total[k].append(bs['id'])
else:
total[k].append(bs)
else:
if idonly:
total[k] = [bs['id']]
else:
total[k] = [bs]
else:
self.logger.error("Unhandled mode " + str(mode))
return None
elif isinstance(attrib, dict):
tmptotal = {} # The running count that will be used for total
# when filtering on multiple values, ensure that they are
# all present on the object, otherwise skip
match = True
for k, v in attrib.items():
if k not in bs:
match = False
if attrop == PTL_AND:
break
else:
continue
amt = self.utils.decode_value(bs[k])
if isinstance(v, tuple):
op = v[0]
val = self.utils.decode_value(v[1])
elif op == SET:
val = None
pass
else:
op = EQ
val = self.utils.decode_value(v)
if ((op == LT and amt < val) or
(op == LE and amt <= val) or
(op == EQ and amt == val) or
(op == GE and amt >= val) or
(op == GT and amt > val) or
(op == NE and amt != val) or
(op == MATCH and str(amt).find(str(val)) != -1) or
(op == MATCH_RE and
re.search(str(val), str(amt))) or
(op == SET)):
# There is a match, proceed to track the attribute
self._filter_helper(bs, k, val, amt, op, mode,
tmptotal, idonly, grandtotal)
elif attrop == PTL_AND:
match = False
if mode == PTL_COUNTER:
# requesting specific key/value pairs should result
# in 0 available elements
tmptotal[str(k) + PTL_OP_TO_STR[op] + str(val)] = 0
break
elif mode == PTL_COUNTER:
tmptotal[str(k) + PTL_OP_TO_STR[op] + str(val)] = 0
if attrop != PTL_AND or (attrop == PTL_AND and match):
for k, v in tmptotal.items():
if k not in total:
total[k] = v
else:
total[k] += v
return total
def _filter_helper(self, bs, k, v, amt, op, mode, total, idonly,
grandtotal):
# default operation to '='
if op is None or op not in PTL_OP_TO_STR:
op = '='
op_str = PTL_OP_TO_STR[op]
if op == SET:
# override PTL_OP_TO_STR fro SET operations
op_str = ''
v = ''
ky = k + op_str + str(v)
if mode == PTL_COUNTER:
incr = 1
if grandtotal:
if not isinstance(amt, (int, float)):
incr = 1
else:
incr = amt
if ky in total:
total[ky] += incr
else:
total[ky] = incr
elif mode == PTL_FILTER:
if ky in total:
if idonly:
total[ky].append(bs['id'])
else:
total[ky].append(bs)
else:
if idonly:
total[ky] = [bs['id']]
else:
total[ky] = [bs]
def logit(self, msg, obj_type, attrib, id, level=logging.INFO):
"""
Generic logging routine for ``IFL`` commands
:param msg: The message to log
:type msg: str
:param obj_type: object type, i.e *
:param attrib: attributes to log
:param id: name of object to log
:type id: str or list
:param level: log level, defaults to ``INFO``
"""
s = []
if self.logger is not None:
if obj_type is None:
obj_type = MGR_OBJ_NONE
s = [msg + PBS_OBJ_MAP[obj_type]]
if id:
if isinstance(id, list):
s += [' ' + ",".join(id)]
else:
s += [' ' + str(id)]
if attrib:
s += [' ' + str(attrib)]
self.logger.log(level, "".join(s))
def equivalence_classes(self, obj_type=None, attrib={}, bslist=None,
op=RESOURCES_AVAILABLE, show_zero_resources=True,
db_access=None, resolve_indirectness=False):
"""
:param obj_type: PBS Object to query, one of *
:param attrib: attributes to build equivalence classes
out of.
:type attrib: dictionary
:param bslist: Optional, list of dictionary representation
of a batch status
:type bslist: List
:param op: set to RESOURCES_AVAILABLE uses the dynamic
amount of resources available, i.e., available -
assigned, otherwise uses static amount of
resources available
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if attrib is None:
attrib = {}
if len(attrib) == 0 and obj_type is not None:
if obj_type in (VNODE, NODE):
attrib = ['resources_available.ncpus',
'resources_available.mem', 'state']
elif obj_type == JOB:
attrib = ['Resource_List.select',
'queue', 'array_indices_submitted']
elif obj_type == RESV:
attrib = ['Resource_List.select']
else:
return {}
if bslist is None and obj_type is not None:
# To get the resources_assigned we must stat the entire object so
# bypass the specific attributes that would filter out assigned
if op == RESOURCES_AVAILABLE:
bslist = self.status(obj_type, None, level=logging.DEBUG,
db_access=db_access,
resolve_indirectness=resolve_indirectness)
else:
bslist = self.status(obj_type, attrib, level=logging.DEBUG,
db_access=db_access,
resolve_indirectness=resolve_indirectness)
if bslist is None or len(bslist) == 0:
return {}
# automatically convert an objectlist into a batch status dict list
# for ease of use.
if not isinstance(bslist[0], dict):
bslist = self.utils.objlist_to_dictlist(bslist)
if isinstance(attrib, str):
attrib = attrib.split(',')
self.logger.debug("building equivalence class")
equiv = {}
for bs in bslist:
cls = ()
skip_cls = False
# attrs will be part of the EquivClass object
attrs = {}
# Filter the batch attributes by the attribs requested
for a in attrib:
if a in bs:
amt = self.utils.decode_value(bs[a])
if a.startswith('resources_available.'):
val = a.replace('resources_available.', '')
if (op == RESOURCES_AVAILABLE and
'resources_assigned.' + val in bs):
amt = (int(amt) - int(self.utils.decode_value(
bs['resources_assigned.' + val])))
# this case where amt goes negative is not a bug, it
# may happen when computing whats_available due to the
# fact that the computation is subtractive, it does
# add back resources when jobs/reservations end but
# is only concerned with what is available now for
# a given duration, that is why in the case where
# amount goes negative we set it to 0
if amt < 0:
amt = 0
# TODO: not a failproof way to catch a memory type
# but PbsTypeSize should return the right value if
# it fails to parse it as a valid memory value
if a.endswith('mem'):
try:
amt = PbsTypeSize().encode(amt)
except:
# we guessed the type incorrectly
pass
else:
val = a
if amt == 0 and not show_zero_resources:
skip_cls = True
break
# Build the key of the equivalence class
cls += (val + '=' + str(amt),)
attrs[val] = amt
# Now that we are done with this object, add it to an equiv class
if len(cls) > 0 and not skip_cls:
if cls in equiv:
equiv[cls].add_entity(bs['id'])
else:
equiv[cls] = EquivClass(cls, attrs, [bs['id']])
return equiv.values()
def show_equivalence_classes(self, eq=None, obj_type=None, attrib={},
bslist=None, op=RESOURCES_AVAILABLE,
show_zero_resources=True, db_access=None,
resolve_indirectness=False):
"""
helper function to show the equivalence classes
:param eq: equivalence classes as compute by
equivalence_classes see equivalence_classes
for remaining parameters description
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if eq is None:
equiv = self.equivalence_classes(obj_type, attrib, bslist, op,
show_zero_resources, db_access,
resolve_indirectness)
else:
equiv = eq
equiv = sorted(equiv, key=lambda e: len(e.entities))
for e in equiv:
# e.show()
print str(e)
def whats_available(self, attrib=None, jobs=None, resvs=None, nodes=None):
"""
Returns what's available as a list of node equivalence
classes listed by availability over time.
:param attrib: attributes to consider
:type attrib: List
:param jobs: jobs to consider, if None, jobs are queried
locally
:param resvs: reservations to consider, if None, they are
queried locally
:param nodes: nodes to consider, if None, they are queried
locally
"""
if attrib is None:
attrib = ['resources_available.ncpus',
'resources_available.mem', 'state']
if resvs is None:
self.status(RESV)
resvs = self.reservations
if jobs is None:
self.status(JOB)
jobs = self.jobs
if nodes is None:
self.status(NODE)
nodes = self.nodes
nodes_id = nodes.keys()
avail_nodes_by_time = {}
def alloc_resource(self, node, resources):
# helper function. Must work on a scratch copy of nodes otherwise
# resources_available will get corrupted
for rsc, value in resources.items():
if isinstance(value, int) or value.isdigit():
avail = node.attributes['resources_available.' + rsc]
nvalue = int(avail) - int(value)
node.attributes['resources_available.' + rsc] = nvalue
# Account for reservations
for resv in resvs.values():
resvnodes = resv.execvnode('resv_nodes')
if resvnodes:
starttime = self.utils.convert_stime_to_seconds(
resv.attributes['reserve_start'])
for node in resvnodes:
for n, resc in node.items():
tm = int(starttime) - int(self.ctime)
if tm < 0 or n not in nodes_id:
continue
if tm not in avail_nodes_by_time:
avail_nodes_by_time[tm] = []
if nodes[n].attributes['sharing'] in ('default_excl',
'force_excl'):
avail_nodes_by_time[tm].append(nodes[n])
try:
nodes_id.remove(n)
except:
pass
else:
ncopy = copy.copy(nodes[n])
ncopy.attributes = copy.deepcopy(
nodes[n].attributes)
avail_nodes_by_time[tm].append(ncopy)
self.alloc_resource(nodes[n], resc)
# go on to look at the calendar of scheduled jobs to run and set
# the node availability according to when the job is estimated to
# start on the node
for job in self.jobs.values():
if (job.attributes['job_state'] != 'R' and
'estimated.exec_vnode' in job.attributes):
estimatednodes = job.execvnode('estimated.exec_vnode')
if estimatednodes:
st = job.attributes['estimated.start_time']
# Tweak for nas format of estimated time that has
# num seconds from epoch followed by datetime
if st.split()[0].isdigit():
starttime = st.split()[0]
else:
starttime = self.utils.convert_stime_to_seconds(st)
for node in estimatednodes:
for n, resc in node.items():
tm = int(starttime) - int(self.ctime)
if (tm < 0 or n not in nodes_id or
nodes[n].state != 'free'):
continue
if tm not in avail_nodes_by_time:
avail_nodes_by_time[tm] = []
if (nodes[n].attributes['sharing'] in
('default_excl', 'force_excl')):
avail_nodes_by_time[tm].append(nodes[n])
try:
nodes_id.remove(n)
except:
pass
else:
ncopy = copy.copy(nodes[n])
ncopy.attributes = copy.deepcopy(
nodes[n].attributes)
avail_nodes_by_time[tm].append(ncopy)
self.alloc_resource(nodes[n], resc)
# remaining nodes are free "forever"
for node in nodes_id:
if self.nodes[node].state == 'free':
if 'infinity' not in avail_nodes_by_time:
avail_nodes_by_time['infinity'] = [nodes[node]]
else:
avail_nodes_by_time['infinity'].append(nodes[node])
# if there is a dedicated time, move the availaility time up to that
# time as necessary
if self.scheduler:
scheduler = self.scheduler
else:
scheduler = Scheduler(server=self)
scheduler.parse_dedicated_time()
if scheduler.dedicated_time:
dedtime = scheduler.dedicated_time[0]['from'] - int(self.ctime)
if dedtime <= int(time.time()):
dedtime = None
else:
dedtime = None
# finally, build the equivalence classes off of the nodes availability
# over time
self.logger.debug("Building equivalence classes")
whazzup = {}
if 'state' in attrib:
attrib.remove('state')
for tm, nds in avail_nodes_by_time.items():
equiv = self.equivalence_classes(VNODE, attrib, bslist=nds,
show_zero_resources=False)
if dedtime and (tm > dedtime or tm == 'infinity'):
tm = dedtime
if tm != 'infinity':
tm = str(datetime.timedelta(seconds=int(tm)))
whazzup[tm] = equiv
return whazzup
def show_whats_available(self, wa=None, attrib=None, jobs=None,
resvs=None, nodes=None):
"""
helper function to show availability as computed by
whats_available
:param wa: a dictionary of available attributes. see
whats_available for a\
description of the remaining parameters
:type wa: Dictionary
"""
if wa is None:
wa = self.whats_available(attrib, jobs, resvs, nodes)
if len(wa) > 0:
print "%24s\t%s" % ("Duration of availability", "Resources")
print "-------------------------\t----------"
swa = sorted(wa.items(), key=lambda x: x[0])
for (k, eq_classes) in swa:
for eq_cl in eq_classes:
print "%24s\t%s" % (str(k), str(eq_cl))
def utilization(self, resources=None, nodes=None, jobs=None, entity={}):
"""
Return utilization of consumable resources on a set of
nodes
:param nodes: A list of dictionary of nodes on which to
compute utilization.Defaults to nodes
resulting from a stat call to the current
server.
:type nodes: List
:param resources: comma-separated list of resources to
compute utilization on. The name of the
resource is for example, ncpus or mem
:type resources: List
:param entity: An optional dictionary of entities to
compute utilization of,
``e.g. {'user':u1, 'group':g1, 'project'=p1}``
:type entity: Dictionary
The utilization is returned as a dictionary of percentage
utilization for each resource.
Non-consumable resources are silently ignored.
"""
if nodes is None:
nodes = self.status(NODE)
if jobs is None:
jobs = self.status(JOB)
if resources is None:
rescs = ['ncpus', 'mem']
else:
rescs = resources
utilization = {}
resavail = {}
resassigned = {}
usednodes = 0
totnodes = 0
nodes_set = set()
for res in rescs:
resavail[res] = 0
resassigned[res] = 0
# If an entity is specified utilization must be collected from the
# Jobs usage, otherwise we can get the information directly from
# the nodes.
if len(entity) > 0 and jobs is not None:
for job in jobs:
if 'job_state' in job and job['job_state'] != 'R':
continue
entity_match = True
for k, v in entity.items():
if k not in job or job[k] != v:
entity_match = False
break
if entity_match:
for res in rescs:
r = 'Resource_List.' + res
if r in job:
tmpr = int(self.utils.decode_value(job[r]))
resassigned[res] += tmpr
if 'exec_host' in job:
hosts = ResourceResv.get_hosts(job['exec_host'])
nodes_set |= set(hosts)
for node in nodes:
# skip nodes in non-schedulable state
nstate = node['state']
if ('down' in nstate or 'unavailable' in nstate or
'unknown' in nstate or 'Stale' in nstate):
continue
totnodes += 1
# If an entity utilization was requested, all used nodes were
# already filtered into the nodes_set specific to that entity, we
# simply add them up. If no entity was requested, it suffices to
# have the node have a jobs attribute to count it towards total
# used nodes
if len(entity) > 0:
if node['id'] in nodes_set:
usednodes += 1
elif 'jobs' in node:
usednodes += 1
for res in rescs:
avail = 'resources_available.' + res
if avail in node:
val = self.utils.decode_value(node[avail])
if isinstance(val, int):
resavail[res] += val
# When entity matching all resources assigned are
# accounted for by the job usage
if len(entity) == 0:
assigned = 'resources_assigned.' + res
if assigned in node:
val = self.utils.decode_value(node[assigned])
if isinstance(val, int):
resassigned[res] += val
for res in rescs:
if res in resavail:
if res in resassigned:
if resavail[res] > 0:
utilization[res] = [resassigned[res], resavail[res]]
# Only report nodes utilization if no specific resources were requested
if resources is None:
utilization['nodes'] = [usednodes, totnodes]
return utilization
def create_vnodes(self, name=None, attrib=None, num=1, mom=None,
additive=False, sharednode=True, restart=True,
delall=True, natvnode=None, usenatvnode=False,
attrfunc=None, fname=None, vnodes_per_host=1,
createnode=True, expect=True):
"""
helper function to create vnodes.
:param name: prefix name of the vnode(s) to create
:type name: str or None
:param attrib: attributes to assign to each node
:param num: the number of vnodes to create. Defaults to 1
:type num: int
:param mom: the MoM object on which the vnode definition is
to be inserted
:param additive: If True, vnodes are added to the existing
vnode defs.Defaults to False.
:type additive: bool
:param sharednode: If True, all vnodes will share the same
host.Defaults to True.
:type sharednode: bool
:param restart: If True the MoM will be restarted.
:type restart: bool
:param delall: If True delete all server nodes prior to
inserting vnodes
:type delall: bool
:param natvnode: name of the natural vnode.i.e. The node
name in qmgr -c "create node <name>"
:type natvnode: str or None
:param usenatvnode: count the natural vnode as an
allocatable node.
:type usenatvnode: bool
:param attrfunc: an attribute=value function generator,
see create_vnode_def
:param fname: optional name of the vnode def file
:type fname: str or None
:param vnodes_per_host: number of vnodes per host
:type vnodes_per_host: int
:param createnode: whether to create the node via manage or
not. Defaults to True
:type createnode: bool
:param expect: whether to expect attributes to be set or
not. Defaults to True
:type expect: bool
:returns: True on success and False otherwise
"""
if mom is None or name is None or attrib is None:
self.logger.error("name, attributes, and mom object are required")
return False
if delall:
try:
rv = self.manager(MGR_CMD_DELETE, NODE, None, "")
if rv != 0:
return False
except PbsManagerError:
pass
if natvnode is None:
natvnode = mom.shortname
vdef = mom.create_vnode_def(name, attrib, num, sharednode,
usenatvnode=usenatvnode, attrfunc=attrfunc,
vnodes_per_host=vnodes_per_host)
mom.insert_vnode_def(vdef, fname=fname, additive=additive,
restart=restart)
if createnode:
try:
statm = self.status(NODE, id=natvnode)
except:
statm = []
if len(statm) >= 1:
_m = 'Mom %s already exists, not creating' % (natvnode)
self.logger.info(_m)
else:
if mom.pbs_conf and 'PBS_MOM_SERVICE_PORT' in mom.pbs_conf:
m_attr = {'port': mom.pbs_conf['PBS_MOM_SERVICE_PORT']}
else:
m_attr = None
self.manager(MGR_CMD_CREATE, NODE, m_attr, natvnode)
attrs = {}
# only expect if vnodes were added rather than the nat vnode modified
if expect and num > 0:
for k, v in attrib.items():
attrs[str(k) + '=' + str(self.utils.decode_value(v))] = num
attrs['state=free'] = num
rv = self.expect(VNODE, attrs, attrop=PTL_AND)
else:
rv = True
return rv
def create_moms(self, name=None, attrib=None, num=1, delall=True,
createnode=True, conf_prefix='pbs.conf_m',
home_prefix='pbs_m', momhosts=None, init_port=15011,
step_port=2):
"""
Create MoM configurations and optionall add them to the
server. Unique ``pbs.conf`` files are defined and created
on each hosts on which MoMs are to be created.
:param name: Optional prefix name of the nodes to create.
Defaults to the name of the MoM host.
:type name: str or None
:param attrib: Optional node attributes to assign to the
MoM.
:param num: Number of MoMs to create
:type num: int
:param delall: Whether to delete all nodes on the server.
Defaults to True.
:type delall: bool
:param createnode: Whether to create the nodes and add them
to the server.Defaults to True.
:type createnode: bool
:param conf_prefix: The prefix of the PBS conf file.Defaults
to pbs.conf_m
:type conf_prefix: str
:param home_prefix: The prefix of the PBS_HOME directory.
Defaults to pbs_m
:type home_prefix: str
:param momhosts: A list of hosts on which to deploy num
MoMs.
:type momhosts: List
:param init_port: The initial port number to start assigning
``PBS_MOM_SERIVCE_PORT to.
Default 15011``.
:type init_port: int
:param step_port: The increments at which ports are
allocated. Defaults to 2.
:type step_port: int
.. note:: Since PBS requires that
PBS_MANAGER_SERVICE_PORT = PBS_MOM_SERVICE_PORT+1
The step number must be greater or equal to 2.
"""
if not self.isUp():
logging.error("An up and running PBS server on " + self.hostname +
" is required")
return False
if delall:
try:
rc = self.manager(MGR_CMD_DELETE, NODE, None, "")
except PbsManagerError, e:
rc = e.rc
if rc:
if len(self.status(NODE)) > 0:
self.logger.error("create_moms: Error deleting all nodes")
return False
pi = PBSInitServices()
if momhosts is None:
momhosts = [self.hostname]
if attrib is None:
attrib = {}
error = False
for hostname in momhosts:
_pconf = self.du.parse_pbs_config(hostname)
if 'PBS_HOME' in _pconf:
_hp = _pconf['PBS_HOME']
if _hp.endswith('/'):
_hp = _hp[:-1]
_hp = os.path.dirname(_hp)
else:
_hp = '/var/spool'
_np_conf = _pconf
_np_conf['PBS_START_SERVER'] = '0'
_np_conf['PBS_START_SCHED'] = '0'
_np_conf['PBS_START_MOM'] = '1'
for i in xrange(0, num * step_port, step_port):
_np = os.path.join(_hp, home_prefix + str(i))
_n_pbsconf = os.path.join('/etc', conf_prefix + str(i))
_np_conf['PBS_HOME'] = _np
port = init_port + i
_np_conf['PBS_MOM_SERVICE_PORT'] = str(port)
_np_conf['PBS_MANAGER_SERVICE_PORT'] = str(port + 1)
self.du.set_pbs_config(hostname, fout=_n_pbsconf,
confs=_np_conf)
pi.initd(hostname, conf_file=_n_pbsconf, op='start')
m = MoM(hostname, pbsconf_file=_n_pbsconf)
if m.isUp():
m.stop()
if hostname != self.hostname:
m.add_config({'$clienthost': self.hostname})
try:
m.start()
except PbsServiceError:
# The service failed to start
self.logger.error("Service failed to start using port " +
str(port) + "...skipping")
self.du.rm(hostname, _n_pbsconf)
continue
if createnode:
attrib['Mom'] = hostname
attrib['port'] = port
if name is None:
name = hostname.split('.')[0]
_n = name + '-' + str(i)
rc = self.manager(MGR_CMD_CREATE, NODE, attrib, id=_n)
if rc != 0:
self.logger.error("error creating node " + _n)
error = True
if error:
return False
return True
def create_hook(self, name, attrs):
"""
Helper function to create a hook by name.
:param name: The name of the hook to create
:type name: str
:param attrs: The attributes to create the hook with.
:type attrs: str
:returns: False if hook already exists
:raises: PbsManagerError, otherwise return True.
"""
hooks = self.status(HOOK)
if ((hooks is None or len(hooks) == 0) or
(name not in map(lambda x: x['id'], hooks))):
self.manager(MGR_CMD_CREATE, HOOK, None, name)
else:
self.logger.error('hook named ' + name + ' exists')
return False
self.manager(MGR_CMD_SET, HOOK, attrs, id=name, expect=True)
return True
def import_hook(self, name, body):
"""
Helper function to import hook body into hook by name.
The hook must have been created prior to calling this
function.
:param name: The name of the hook to import body to
:type name: str
:param body: The body of the hook as a string.
:type body: str
:returns: True on success.
:raises: PbsManagerError
"""
(fd, fn) = self.du.mkstemp()
os.write(fd, body)
os.close(fd)
if not self._is_local:
tmpdir = self.du.get_tempdir(self.hostname)
rfile = os.path.join(tmpdir, os.path.basename(fn))
self.du.run_copy(self.hostname, fn, rfile)
else:
rfile = fn
a = {'content-type': 'application/x-python',
'content-encoding': 'default',
'input-file': rfile}
self.manager(MGR_CMD_IMPORT, HOOK, a, name)
os.remove(rfile)
if not self._is_local:
self.du.rm(self.hostname, rfile)
self.logger.info('server ' + self.shortname +
': imported hook body\n---\n' + body + '---')
return True
def create_import_hook(self, name, attrs=None, body=None, overwrite=True):
"""
Helper function to create a hook, import content into it,
set the event and enable it.
:param name: The name of the hook to create
:type name: str
:param attrs: The attributes to create the hook with.
Event and Enabled are mandatory. No defaults.
:type attrs: str
:param body: The hook body as a string
:type body: str
:param overwrite: If True, if a hook of the same name
already exists, bypass its creation.
Defaults to True
:returns: True on success and False otherwise
"""
if 'event' not in attrs:
self.logger.error('attrs must specify at least an event and key')
return False
hook_exists = False
hooks = self.status(HOOK)
for h in hooks:
if h['id'] == name:
hook_exists = True
if not hook_exists or not overwrite:
rv = self.create_hook(name, attrs)
if not rv:
return False
else:
if attrs is None:
attrs = {'enabled': 'true'}
rc = self.manager(MGR_CMD_SET, HOOK, attrs, id=name)
if rc != 0:
return False
# In 12.0 A MoM hook must be enabled and the event set prior to
# importing, otherwise the MoM does not get the hook content
return self.import_hook(name, body)
def evaluate_formula(self, jobid=None, formula=None, full=True,
include_running_jobs=False, exclude_subjobs=True):
"""
Evaluate the job sort formula
:param jobid: If set, evaluate the formula for the given
jobid, if not set,formula is evaluated for
all jobs in state Q
:type jobid: str or None
:param formula: If set use the given formula. If not set,
the server's formula, if any, is used
:param full: If True, returns a dictionary of job
identifiers as keys and the evaluated formula
as values. Returns None if no formula is used.
Each job id formula is returned as a tuple
(s,e) where s is the formula expression
associated to the job and e is the evaluated
numeric value of that expression, for example,
if job_sort_formula is ncpus + mem
a job requesting 2 cpus and 100kb of memory
would return ('2 + 100', 102). If False, if
a jobid is specified, return the integer
value of the evaluated formula.
:type full: bool
:param include_running_jobs: If True, reports formula
value of running jobs.
Defaults to False.
:type include_running_jobs: bool
:param exclude_subjobs: If True, only report formula of
parent job array
:type exclude_subjobs: bool
"""
_f_builtins = ['queue_priority', 'job_priority', 'eligible_time',
'fair_share_perc']
if formula is None:
d = self.status(SERVER, 'job_sort_formula')
if len(d) > 0 and 'job_sort_formula' in d[0]:
formula = d[0]['job_sort_formula']
else:
return None
template_formula = self.utils._make_template_formula(formula)
# to split up the formula into keywords, first convert all possible
# operators into spaces and split the string.
# TODO: The list of operators may need to be expanded
T = string.maketrans('()%+*/-', ' ' * 7)
fres = string.translate(formula, T).split()
if jobid:
d = self.status(JOB, id=jobid, extend='t')
else:
d = self.status(JOB, extend='t')
ret = {}
for job in d:
if not include_running_jobs and job['job_state'] != 'Q':
continue
f_value = {}
# initialize the formula values to 0
for res in fres:
f_value[res] = 0
if 'queue_priority' in fres:
queue = self.status(JOB, 'queue', id=job['id'])[0]['queue']
d = self.status(QUEUE, 'Priority', id=queue)
if d and 'Priority' in d[0]:
qprio = int(d[0]['Priority'])
f_value['queue_priority'] = qprio
else:
continue
if 'job_priority' in fres:
if 'Priority' in job:
jprio = int(job['Priority'])
f_value['job_priority'] = jprio
else:
continue
if 'eligible_time' in fres:
if 'eligible_time' in job:
f_value['eligible_time'] = self.utils.convert_duration(
job['eligible_time'])
if 'fair_share_perc' in fres:
if self.scheduler is None:
self.scheduler = Scheduler(server=self)
if 'fairshare_entity' in self.scheduler.sched_config:
entity = self.scheduler.sched_config['fairshare_entity']
else:
self.logger.error(self.logprefix +
' no fairshare entity in sched config')
continue
if entity not in job:
self.logger.error(self.logprefix +
' job does not have property ' + entity)
continue
try:
fs_info = self.scheduler.query_fairshare(name=job[entity])
if fs_info is not None and 'TREEROOT' in fs_info.perc:
f_value['fair_share_perc'] = \
(fs_info.perc['TREEROOT'] / 100)
except PbsFairshareError:
f_value['fair_share_perc'] = 0
for job_res, val in job.items():
val = self.utils.decode_value(val)
if job_res.startswith('Resource_List.'):
job_res = job_res.replace('Resource_List.', '')
if job_res in fres and job_res not in _f_builtins:
f_value[job_res] = val
tf = string.Template(template_formula)
tfstr = tf.safe_substitute(f_value)
if (jobid is not None or not exclude_subjobs or
(exclude_subjobs and not self.utils.is_subjob(job['id']))):
ret[job['id']] = (tfstr, eval(tfstr))
if not full and jobid is not None and jobid in ret:
return ret[job['id']][1]
return ret
def _parse_limits(self, container=None, dictlist=None, id=None,
db_access=None):
"""
Helper function to parse limits syntax on a given
container.
:param container: The PBS object to query, one of ``QUEUE``
or ``SERVER``.Metascheduling node group
limits are not yet queri-able
:type container: str or None
:param dictlist: A list of dictionaries off of a batch
status
:type diclist: List
:param id: Optional id of the object to query
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if container is None:
self.logger.error('parse_limits expect container to be set')
return {}
if dictlist is None:
d = self.status(container, db_access=db_access)
else:
d = dictlist
if not d:
return {}
limits = {}
for obj in d:
# filter the id here instead of during the stat call so that
# we can call a full stat once rather than one stat per object
if id is not None and obj['id'] != id:
continue
for k, v in obj.items():
if k.startswith('max_run'):
v = v.split(',')
for rval in v:
rval = rval.strip("'")
l = self.utils.parse_fgc_limit(k + '=' + rval)
if l is None:
self.logger.error("Couldn't parse limit: " +
k + str(rval))
continue
(lim_type, resource, etype, ename, value) = l
if (etype, ename) not in self.entities:
entity = Entity(etype, ename)
self.entities[(etype, ename)] = entity
else:
entity = self.entities[(etype, ename)]
lim = Limit(lim_type, resource, entity, value,
container, obj['id'])
if container in limits:
limits[container].append(lim)
else:
limits[container] = [lim]
entity.set_limit(lim)
return limits
def parse_server_limits(self, server=None, db_access=None):
"""
Parse all server limits
:param server: list of dictionary of server data
:type server: List
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
return self._parse_limits(SERVER, server, db_access=db_access)
def parse_queue_limits(self, queues=None, id=None, db_access=None):
"""
Parse queue limits
:param queues: list of dictionary of queue data
:type queues: List
:param id: The id of the queue to parse limit for. If None,
all queue limits are parsed
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
return self._parse_limits(QUEUE, queues, id=id, db_access=db_access)
def parse_all_limits(self, server=None, queues=None, db_access=None):
"""
Parse all server and queue limits
:param server: list of dictionary of server data
:type server: List
:param queues: list of dictionary of queue data
:type queues: List
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if hasattr(self, 'limits'):
del self.limits
slim = self.parse_server_limits(server, db_access=db_access)
qlim = self.parse_queue_limits(queues, id=None, db_access=db_access)
self.limits = dict(slim.items() + qlim.items())
del slim
del qlim
return self.limits
def limits_info(self, etype=None, ename=None, server=None, queues=None,
jobs=None, db_access=None, over=False):
"""
Collect limit information for each entity on which a
``server/queue`` limit is applied.
:param etype: entity type, one of u, g, p, o
:type etype: str or None
:param ename: entity name
:type ename: str or None
:param server: optional list of dictionary representation
of server object
:type server: List
:param queues: optional list of dictionary representation
of queues object
:type queues: List
:param jobs: optional list of dictionary representation of
jobs object
:type jobs: List
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
:param over: If True, show only entities that are over their
limit.Default is False.
:type over: bool
:returns: A list of dictionary similar to that returned by
a converted batch_status object, i.e., can be
displayed using the Utils.show method
"""
def create_linfo(lim, entity_type, id, used):
"""
Create limit information
:param lim: Limit to apply
:param entity_type: Type of entity
"""
tmp = {}
tmp['id'] = entity_type + ':' + id
c = [PBS_OBJ_MAP[lim.container]]
if lim.container_id:
c += [':', lim.container_id]
tmp['container'] = "".join(c)
s = [str(lim.limit_type)]
if lim.resource:
s += ['.', lim.resource]
tmp['limit_type'] = "".join(s)
tmp['usage/limit'] = "".join([str(used), '/', str(lim.value)])
tmp['remainder'] = int(lim.value) - int(used)
return tmp
def calc_usage(jobs, attr, name=None, resource=None):
"""
Calculate the usage for the entity
:param attr: Job attribute
:param name: Entity name
:type name: str or None
:param resource: PBS resource
:type resource: str or None
:returns: The usage
"""
usage = {}
# initialize usage of the named entity
if name is not None and name not in ('PBS_GENERIC', 'PBS_ALL'):
usage[name] = 0
for j in jobs:
entity = j[attr]
if entity not in usage:
if resource:
usage[entity] = int(
self.utils.decode_value(
j['Resource_List.' + resource]))
else:
usage[entity] = 1
else:
if resource:
usage[entity] += int(
self.utils.decode_value(
j['Resource_List.' + resource]))
else:
usage[entity] += 1
return usage
self.parse_all_limits(server, queues, db_access)
entities_p = self.entities.values()
linfo = []
cache = {}
if jobs is None:
jobs = self.status(JOB)
for entity in sorted(entities_p, key=lambda e: e.name):
for lim in entity.limits:
_t = entity.type
# skip non-matching entity types. We can't skip the entity
# name due to proper handling of the PBS_GENERIC limits
# we also can't skip overall limits
if (_t != 'o') and (etype is not None and etype != _t):
continue
_n = entity.name
a = {}
if lim.container == QUEUE and lim.container_id is not None:
a['queue'] = (EQ, lim.container_id)
if lim.resource:
resource = 'Resource_List.' + lim.resource
a[resource] = (GT, 0)
a['job_state'] = (EQ, 'R')
a['substate'] = (EQ, 42)
if etype == 'u' and ename is not None:
a['euser'] = (EQ, ename)
else:
a['euser'] = (SET, '')
if etype == 'g' and ename is not None:
a['egroup'] = (EQ, ename)
else:
a['egroup'] = (SET, '')
if etype == 'p' and ename is not None:
a['project'] = (EQ, ename)
else:
a['project'] = (SET, '')
# optimization: cache filtered results
d = None
for v in cache.keys():
if cmp(a, eval(v)) == 0:
d = cache[v]
break
if d is None:
d = self.filter(JOB, a, bslist=jobs, attrop=PTL_AND,
idonly=False, db_access=db_access)
cache[str(a)] = d
if not d or 'job_state=R' not in d:
# in the absence of jobs, display limits defined with usage
# of 0
if ename is not None:
_u = {ename: 0}
else:
_u = {_n: 0}
else:
if _t in ('u', 'o'):
_u = calc_usage(
d['job_state=R'], 'euser', _n, lim.resource)
# an overall limit applies across all running jobs
if _t == 'o':
all_used = sum(_u.values())
for k in _u.keys():
_u[k] = all_used
elif _t == 'g':
_u = calc_usage(
d['job_state=R'], 'egroup', _n, lim.resource)
elif _t == 'p':
_u = calc_usage(
d['job_state=R'], 'project', _n, lim.resource)
for k, used in _u.items():
if not over or (int(used) > int(lim.value)):
if ename is not None and k != ename:
continue
if _n in ('PBS_GENERIC', 'PBS_ALL'):
if k not in ('PBS_GENERIC', 'PBS_ALL'):
k += '/' + _n
elif _n != k:
continue
tmp_linfo = create_linfo(lim, _t, k, used)
linfo.append(tmp_linfo)
del a
del cache
return linfo
def __insert_jobs_in_db(self, jobs, hostname=None):
"""
An experimental interface that converts jobs from file
into entries in the PBS database that can be recovered
upon server restart if all other ``objects``, ``queues``,
``resources``, etc... are already defined.
The interface to PBS used in this method is incomplete
and will most likely cause serious issues. Use only for
development purposes
"""
if not jobs:
return []
if hostname is None:
hostname = socket.gethostname()
# a very crude, and not quite maintainale way to get the flag value
# of an attribute. This is one of the reasons why this conversion
# of jobs is highly experimental
flag_map = {'ctime': 9, 'qtime': 9, 'hop_count': 9, 'queue_rank': 9,
'queue_type': 9, 'etime': 9, 'job_kill_delay': 9,
'run_version': 9, 'job_state': 9, 'exec_host': 9,
'exec_host2': 9, 'exec_vnode': 9, 'mtime': 9, 'stime': 9,
'substate': 9, 'hashname': 9, 'comment': 9, 'run_count': 9,
'schedselect': 13}
state_map = {'Q': 1, 'H': 2, 'W': 3, 'R': 4, 'E': 5, 'X': 6, 'B': 7}
job_attr_stmt = ("INSERT INTO pbs.job_attr (ji_jobid, attr_name, "
"attr_resource, attr_value, attr_flags)")
job_stmt = ("INSERT INTO pbs.job (ji_jobid, ji_sv_name, ji_state, "
"ji_substate,ji_svrflags, ji_numattr,"
" ji_ordering, ji_priority, ji_stime, ji_endtbdry, "
"ji_queue, ji_destin, ji_un_type, ji_momaddr, "
"ji_momport, ji_exitstat, ji_quetime, ji_rteretry, "
"ji_fromsock, ji_fromaddr, ji_4jid, ji_4ash, "
"ji_credtype, ji_qrank, ji_savetm, ji_creattm)")
all_stmts = []
for job in jobs:
keys = []
values = []
flags = []
for k, v in job.items():
if k in ('id', 'Mail_Points', 'Mail_Users'):
continue
keys.append(k)
if not v.isdigit():
values.append("'" + v + "'")
else:
values.append(v)
if k in flag_map:
flags.append(flag_map[k])
elif k.startswith('Resource_List'):
flags.append(15)
else:
flags.append(11)
jobid = job['id'].split('.')[0] + '.' + hostname
for i in range(len(keys)):
stmt = job_attr_stmt
stmt += " VALUES('" + jobid + "', "
if '.' in keys[i]:
k, v = keys[i].split('.')
stmt += "'" + k + "', '" + v + "'" + ", "
else:
stmt += "'" + keys[i] + "', ''" + ", "
stmt += values[i] + "," + str(flags[i])
stmt += ");"
self.logger.debug(stmt)
all_stmts.append(stmt)
js = job['job_state']
svrflags = 1
state = 1
if js in state_map:
state = state_map[js]
if state == 4:
# Other states svrflags aren't handled and will
# cause issues, another reason this is highly experimental
svrflags = 12289
tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
stmt = job_stmt
stmt += " VALUES('" + jobid + "', 1, "
stmt += str(state) + ", " + job['substate']
stmt += ", " + str(svrflags)
stmt += ", 0, 0, 0"
if 'stime' in job:
print job['stime']
st = time.strptime(job['stime'], "%a %b %d %H:%M:%S %Y")
stmt += ", " + str(time.mktime(st))
else:
stmt += ", 0"
stmt += ", 0"
stmt += ", '" + job['queue'] + "'"
if 'exec_host2' in job:
stmt += ", " + job['exec_host2']
else:
stmt += ", ''"
stmt += ", 0, 0, 0, 0, 0, 0, 0, 0, '', '', 0, 0"
stmt += ", '" + tm + "', '" + tm + "');"
self.logger.debug(stmt)
all_stmts.append(stmt)
return all_stmts
def clusterize(self, conf_file=None, hosts=None, import_jobs=False,
db_creds_file=None):
"""
Mimic a ``pbs_diag`` snapshot onto a set of hosts running
a PBS ``server``,``scheduler``, and ``MoM``.
This method clones the following information from the diag:
``Server attributes``
``Server resourcedef``
``Hooks``
``Scheduler configuration``
``Scheduler resource_group``
``Scheduler holiday file``
``Per Queue attributes``
Nodes are copied as a vnode definition file inserted into
each host's MoM instance.
Currently no support for cloning the server 'sched' object,
nor to copy nodes to multi-mom instances.
Jobs are copied over only if import_jobs is True, see below
for details
:param asdiag: Path to the pbs_diag snapshot to use
:type asdiag: str
:param conf_file: Configuration file for the MoM instance
:param hosts: List of hosts on which to clone the diag
snapshot
:type hosts: List
:param include_jobs: [Experimental] if True jobs from the
pbs_diag are imported into the host's
database. There are several caveats to
this option:
The scripts are not imported
The users and groups are not created on
the local system.There are no actual
processes created on the MoM for each
job so operations on the job such as
signals or delete will fail (delete -W
force will still work)
:type include_jobs: bool
:param db_creds_file: Path to file containing credentials
to access the DB
:type db_creds_file: str or None
"""
if not self.has_diag:
return
if hosts is None:
return
for h in hosts:
svr = Server(h)
sched = Scheduler(server=svr, diag=self.diag, diagmap=self.diagmap)
try:
svr.manager(MGR_CMD_DELETE, NODE, None, id="")
except:
pass
svr.revert_to_defaults(delqueues=True, delhooks=True)
local = svr.pbs_conf['PBS_HOME']
diag_rdef = os.path.join(self.diag, 'server_priv', 'resourcedef')
diag_sc = os.path.join(self.diag, 'sched_priv', 'sched_config')
diag_rg = os.path.join(self.diag, 'sched_priv', 'resource_group')
diag_hldy = os.path.join(self.diag, 'sched_priv', 'holidays')
nodes = os.path.join(self.diag, 'pbsnodes_va.out')
diag_hooks = os.path.join(self.diag, 'qmgr_ph.out')
diag_ps = os.path.join(self.diag, 'qmgr_ps.out')
local_rdef = os.path.join(local, 'server_priv', 'resourcedef')
local_sc = os.path.join(local, 'sched_priv', 'sched_config')
local_rg = os.path.join(local, 'sched_priv', 'resource_group')
local_hldy = os.path.join(local, 'sched_priv', 'holidays')
_fcopy = [(diag_rdef, local_rdef), (diag_sc, local_sc),
(diag_rg, local_rg), (diag_hldy, local_hldy)]
# Restart since resourcedef may have changed
svr.restart()
if os.path.isfile(diag_ps):
tmp_ps = open(diag_ps)
cmd = [os.path.join(svr.pbs_conf['PBS_EXEC'], 'bin', 'qmgr')]
self.du.run_cmd(h, cmd, stdin=tmp_ps, sudo=True, logerr=False)
tmp_ps.close()
# Unset any site-sensitive attributes
for a in ['pbs_license_info', 'manager', 'operators',
'mail_from', 'acl_roots', 'acl_hosts']:
try:
svr.manager(MGR_CMD_UNSET, SERVER, a, sudo=True)
except:
pass
for (d, l) in _fcopy:
if os.path.isfile(d):
self.logger.info('copying ' + d + ' to ' + l)
self.du.run_copy(h, src=d, dest=l, sudo=True)
diag_sched = self.status(SCHED)
for ds in diag_sched:
for k, v in ds.items():
if k != 'id':
try:
svr.manager(MGR_CMD_SET, SCHED, {k: v},
logerr=False)
except PbsManagerError:
self.logger.warning(
'Skipping sched attribute ' + k)
sched.signal('-HUP')
if os.path.isfile(nodes):
f = open(nodes)
lines = f.readlines()
f.close()
dl = self.utils.convert_to_dictlist(lines)
vdef = self.utils.dictlist_to_vnodedef(dl)
if vdef:
try:
svr.manager(MGR_CMD_DELETE, NODE, None, "")
except:
pass
MoM(h, pbsconf_file=conf_file).insert_vnode_def(vdef)
svr.restart()
svr.manager(MGR_CMD_CREATE, NODE, id=svr.shortname)
# check if any node is associated to a queue.
# This is needed because the queues 'hasnodes' attribute
# does not get set through vnode def update and must be set
# via qmgr. It only needs to be set once, not for each node
qtoset = {}
for n in dl:
if 'queue' in n and n['queue'] not in qtoset:
qtoset[n['queue']] = n['id']
# before setting queue on nodes make sure that the vnode
# def is all set
svr.expect(NODE, {'state=free': (GE, len(dl))}, interval=3)
for k, v in qtoset.items():
svr.manager(MGR_CMD_SET, NODE, {'queue': k}, id=v)
# populate hooks
if os.path.isfile(diag_hooks):
tmp_hook = open(diag_hooks)
cmd = [os.path.join(svr.pbs_conf['PBS_EXEC'], 'bin', 'qmgr')]
self.du.run_cmd(h, cmd, stdin=tmp_hook, sudo=True)
tmp_hook.close()
# import jobs
if import_jobs is not None:
jobs = self.status(JOB)
sql_stmt = self.__insert_jobs_in_db(jobs, h)
print "\n".join(sql_stmt)
if db_creds_file is not None:
pass
class EquivClass(PBSObject):
"""
Equivalence class holds information on a collection of entities
grouped according to a set of attributes
:param attributes: Dictionary of attributes
:type attributes: Dictionary
:param entities: List of entities
:type entities: List
"""
def __init__(self, name, attributes={}, entities=[]):
self.name = name
self.attributes = attributes
self.entities = entities
self.logger = logging.getLogger(__name__)
def add_entity(self, entity):
"""
Add entities
:param entity: Entity to add
:type entity: str
"""
if entity not in self.entities:
self.entities.append(entity)
def __str__(self):
s = [str(len(self.entities)), ":", ":".join(self.name)]
return "".join(s)
def show(self, showobj=False):
"""
Show the entities
:param showobj: If true then show the entities
:type showobj: bool
"""
s = " && ".join(self.name) + ': '
if showobj:
s += str(self.entities)
else:
s += str(len(self.entities))
print s
return s
class Resource(PBSObject):
"""
PBS resource referenced by name, type and flag
:param name: Resource name
:type name: str or None
:param type: Type of resource
"""
def __init__(self, name=None, type=None, flag=None):
PBSObject.__init__(self, name)
self.set_name(name)
self.set_type(type)
self.set_flag(flag)
def set_name(self, name):
"""
Set the resource name
"""
self.name = name
self.attributes['id'] = name
def set_type(self, type):
"""
Set the resource type
"""
self.type = type
self.attributes['type'] = type
def set_flag(self, flag):
"""
Set the flag
"""
self.flag = flag
self.attributes['flag'] = flag
def __str__(self):
s = [self.attributes['id']]
if 'type' in self.attributes:
s.append('type=' + self.attributes['type'])
if 'flag' in self.attributes:
s.append('flag=' + self.attributes['flag'])
return " ".join(s)
class Holidays():
"""
Descriptive calss for Holiday file.
"""
def __init__(self):
self.year = {'id': "YEAR", 'value': None, 'valid': False}
self.weekday = {'id': "weekday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.monday = {'id': "monday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.tuesday = {'id': "tuesday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.wednesday = {'id': "wednesday", 'p': None, 'np': None,
'valid': None, 'position': None}
self.thursday = {'id': "thursday", 'p': None, 'np': None,
'valid': None, 'position': None}
self.friday = {'id': "friday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.saturday = {'id': "saturday", 'p': None, 'np': None,
'valid': None, 'position': None}
self.sunday = {'id': "sunday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.days_set = [] # list of set days
self._days_map = {'weekday': self.weekday, 'monday': self.monday,
'tuesday': self.tuesday, 'wednesday': self.wednesday,
'thursday': self.thursday, 'friday': self.friday,
'saturday': self.saturday, 'sunday': self.sunday}
self.holidays = [] # list of calendar holidays
def __str__(self):
"""
Return the content to write to holidays file as a string
"""
content = []
if self.year['valid']:
content.append(self.year['id'] + "\t" +
self.year['value'])
for i in range(0, len(self.days_set)):
content.append(self.days_set[i]['id'] + "\t" +
self.days_set[i]['p'] + "\t" +
self.days_set[i]['np'])
# Add calendar holidays
for day in self.holidays:
content.append(day)
return "\n".join(content)
class Scheduler(PBSService):
"""
Container of Scheduler related properties
:param hostname: The hostname on which the scheduler instance
is operating
:type hostname: str or None
:param server: A PBS server instance to which this scheduler
is associated
:param pbsconf_file: path to a PBS configuration file
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects (node,server,etc)
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory (This will overrides
diagmap)
:type diag: str or None
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
# A vanilla scheduler configuration. This set may change based on
# updates to PBS
sched_dflt_config = {
"backfill": "true ALL",
"backfill_prime": "false ALL",
"help_starving_jobs": "true ALL",
"max_starve": "24:00:00",
"strict_ordering": "false ALL",
"provision_policy": "\"aggressive_provision\"",
"preempt_order": "\"SCR\"",
"fairshare_entity": "euser",
"dedicated_prefix": "ded",
"primetime_prefix": "p_",
"nonprimetime_prefix": "np_",
"preempt_queue_prio": "150",
"preempt_prio": "\"express_queue, normal_jobs\"",
"load_balancing": "false ALL",
"prime_exempt_anytime_queues": "false",
"round_robin": "False all",
"fairshare_usage_res": "cput",
"smp_cluster_dist": "pack",
"fair_share": "false ALL",
"preempt_sort": "min_time_since_start",
"node_sort_key": "\"sort_priority HIGH\" ALL",
"sort_queues": "true ALL",
"by_queue": "True ALL",
"preemptive_sched": "true ALL",
"resources": "\"ncpus, mem, arch, host, vnode, aoe\"",
"log_filter": "3328 ",
}
sched_config_options = ["node_group_key",
"dont_preempt_starving",
"fairshare_enforce_no_shares",
"strict_ordering",
"resource_unset_infinite",
"sync_time",
"unknown_shares",
"log_filter",
"dedicated_prefix",
"load_balancing",
"help_starving_jobs",
"max_starve",
"sort_queues",
"backfill",
"primetime_prefix",
"nonprimetime_prefix",
"backfill_prime",
"prime_exempt_anytime_queues",
"prime_spill",
"prime_exempt_anytime_queues",
"prime_spill",
"resources",
"mom_resources",
"smp_cluster_dist",
"preempt_queue_prio",
"preempt_suspend",
"preempt_checkpoint",
"preempt_requeue",
"preemptive_sched",
"dont_preempt_starving",
"node_group_key",
"dont_preempt_starving",
"fairshare_enforce_no_shares",
"strict_ordering",
"resource_unset_infinite",
"provision_policy",
"resv_confirm_ignore",
"allow_aoe_calendar",
"max_job_check",
"preempt_attempts",
"update_comments",
"sort_by",
"key",
"preempt_starving",
"preempt_fairshare",
"load_balancing_rr",
"assign_ssinodes",
"cpus_per_ssinode",
"mem_per_ssinode",
"strict_fifo",
"mem_per_ssinode",
"strict_fifo"
]
fs_re = '(?P<name>[\S]+)[\s]*:[\s]*Grp:[\s]*(?P<Grp>[-]*[0-9]*)' + \
'[\s]*cgrp:[\s]*(?P<cgrp>[-]*[0-9]*)[\s]*' + \
'Shares:[\s]*(?P<Shares>[-]*[0-9]*)[\s]*Usage:[\s]*' + \
'(?P<Usage>[0-9]+)[\s]*Perc:[\s]*(?P<Perc>.*)%'
fs_tag = re.compile(fs_re)
def __init__(self, hostname=None, server=None, pbsconf_file=None,
diagmap={}, diag=None, db_access=None):
self.sched_config_file = None
self.dflt_holidays_file = None
self.holidays_file = None
self.sched_config = {}
self._sched_config_comments = {}
self._config_order = []
self.dedicated_time_file = None
self.dedicated_time = None
self.dedicated_time_as_str = None
self.fairshare_tree = None
self.resource_group = None
self.server = None
self.server_dyn_res = None
self.deletable_files = ['usage']
self.logger = logging.getLogger(__name__)
if server is not None:
self.server = server
if diag is None and self.server.diag is not None:
diag = self.server.diag
if (len(diagmap) == 0) and (len(self.server.diagmap) != 0):
diagmap = self.server.diagmap
else:
self.server = Server(hostname, pbsconf_file=pbsconf_file,
db_access=db_access, diag=diag,
diagmap=diagmap)
if hostname is None:
hostname = self.server.hostname
self.server.scheduler = self
PBSService.__init__(self, hostname, pbsconf_file=pbsconf_file,
diag=diag, diagmap=diagmap)
_m = ['scheduler ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
self.pbs_conf = self.server.pbs_conf
self.sched_config_file = os.path.join(self.pbs_conf['PBS_HOME'],
'sched_priv', 'sched_config')
self.dflt_sched_config_file = os.path.join(self.pbs_conf['PBS_EXEC'],
'etc', 'pbs_sched_config')
self.parse_sched_config(self.sched_config_file)
self.dflt_holidays_file = os.path.join(self.pbs_conf['PBS_EXEC'],
'etc', 'pbs_holidays')
self.holidays_file = os.path.join(self.pbs_conf['PBS_HOME'],
'sched_priv', 'holidays')
self.dflt_resource_group_file = os.path.join(self.pbs_conf['PBS_EXEC'],
'etc',
'pbs_resource_group')
self.resource_group_file = os.path.join(self.pbs_conf['PBS_HOME'],
'sched_priv', 'resource_group')
self.fairshare_tree = self.query_fairshare()
rg = self.parse_resource_group(self.hostname, self.resource_group_file)
self.resource_group = rg
try:
attrs = self.server.status(SCHED, level=logging.DEBUG,
db_access=db_access)
if attrs is not None and len(attrs) > 0:
self.attributes = attrs[0]
except (PbsManagerError, PbsStatusError), e:
self.logger.error('Error querying scheduler %s' % e.msg)
self.version = None
self.holidays_obj = Holidays()
self.holidays_parse_file(level=logging.DEBUG)
def isUp(self):
"""
Check for PBS scheduler up
"""
return super(Scheduler, self)._isUp(self)
def signal(self, sig):
"""
Send a signal to PBS scheduler
"""
self.logger.info('scheduler ' + self.shortname + ': sent signal ' +
sig)
return super(Scheduler, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the PBS scheduler pid
"""
return super(Scheduler, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get the all pids for the instance
"""
return super(Scheduler, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the scheduler
:param args: Arguments required to start the scheduler
:type args: str
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list
"""
if args is not None or launcher is not None:
return super(Scheduler, self)._start(inst=self, args=args,
launcher=launcher)
else:
try:
rv = self.pi.start_sched()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return rv
def stop(self, sig=None):
"""
Stop the PBS scheduler
:param sig: Signal to stop the PBS scheduler
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping Scheduler on host ' +
self.hostname)
return super(Scheduler, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_sched()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return True
def restart(self):
"""
Restart the PBS scheduler
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None, level=logging.INFO):
"""
Match the scheduler logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp, day,
max_attempts, interval, starttime, endtime,
level=level)
def pbs_version(self):
"""
Get the version of the scheduler instance
"""
if self.version:
return self.version
version = self.log_match('pbs_version', tail=False)
if version:
version = version[1].strip().split('=')[1]
else:
version = "unknown"
self.version = LooseVersion(version)
return self.version
def parse_sched_config(self, schd_cnfg=None):
"""
Parse a sceduling configuration file into a dictionary.
Special handling of identical keys ``(e.g., node_sort_key)``
is done by appending a delimiter, '%', between each value
of the key. When printed back to file, each delimited entry
gets written on a line of its own. For example, the python
dictionary entry:
``{'node_sort_key':
["ncpus HIGH unusued" prime", "node_priority HIH"
non-prime"]}``
will get written as:
``node_sort_key: "ncpus HIGH unusued" prime``
``node_sort_key: "node_priority HIGH" non-prime``
Returns sched_config dictionary that gets reinitialized
every time this method is called.
"""
# sched_config is initialized
if self.sched_config:
del(self.sched_config)
self.sched_config = {}
self._sched_config_comments = {}
self._config_order = []
if schd_cnfg is None:
if self.sched_config_file is not None:
schd_cnfg = self.sched_config_file
else:
self.logger.error('no scheduler configuration file to parse')
return False
try:
conf_opts = self.du.cat(self.hostname, schd_cnfg,
sudo=(not self.has_diag),
level=logging.DEBUG2)['out']
except:
self.logger.error('error parsing scheduler configuration')
return False
_comment = []
conf_re = re.compile(
'[#]?[\s]*(?P<conf_id>[\w]+):[\s]*(?P<conf_val>.*)')
for line in conf_opts:
m = conf_re.match(line)
if m:
key = m.group('conf_id')
val = m.group('conf_val')
# line is a comment, it could be a commented out scheduling
# option, or the description of an option. It could also be
# that part of the description is an example setting of the
# option.
# We must keep track of commented out options in order to
# rewrite the configuration in the same order as it was defined
if line.startswith('#'):
if key in self.sched_config_options:
_comment += [line]
if key in self._sched_config_comments:
self._sched_config_comments[key] += _comment
_comment = []
else:
self._sched_config_comments[key] = _comment
_comment = []
if key not in self._config_order:
self._config_order.append(key)
else:
_comment += [line]
continue
if key not in self._sched_config_comments:
self._sched_config_comments[key] = _comment
else:
self._sched_config_comments[key] += _comment
if key not in self._config_order:
self._config_order.append(key)
_comment = []
if key in self.sched_config:
if isinstance(self.sched_config[key], list):
if isinstance(val, list):
self.sched_config[key].extend(val)
else:
self.sched_config[key].append(val)
else:
if isinstance(val, list):
self.sched_config[key] = [self.sched_config[key]]
self.sched_config[key].extend(val)
else:
self.sched_config[key] = [self.sched_config[key],
val]
else:
self.sched_config[key] = val
else:
_comment += [line]
self._sched_config_comments['PTL_SCHED_CONFIG_TAIL'] = _comment
return True
def check_defaults(self, config):
"""
Check the values in argument config against default values
"""
if len(config.keys()) == 0:
return
for k, v in self.sched_dflt_config.items():
if k in config:
s1 = v
s1 = s1.replace(" ", "")
s1 = s1.replace("\t", "").strip()
s2 = config[k]
s2 = s2.replace(" ", "")
s2 = s2.replace("\t", "").strip()
if s1 != s2:
self.logger.debug(k + ' non-default: ' + v +
' != ' + config[k])
def apply_config(self, config=None, validate=True, path=None):
"""
Apply the configuration specified by config
:param config: Configurations to set. Default: self.
sched_config
:param validate: If True (the default) validate that
settings did not yield an error.
Validation is done by parsing the
scheduler log which, in some cases may
be slow and therefore undesirable.
:type validate: bool
:param path: Optional path to file to which configuration
is written. If None, the configuration is
written to PBS_HOME/sched_priv/sched_config
:type path: str
:returns: True on success and False otherwise. Success
means that upon applying the new configuration
the scheduler did not emit an
"Error reading line" in its log file.
"""
if config is None:
config = self.sched_config
if len(config) == 0:
return True
reconfig_time = int(time.time())
try:
(fd, fn) = self.du.mkstemp()
for k in self._config_order:
if k in config:
if k in self._sched_config_comments:
os.write(fd, "\n".join(self._sched_config_comments[k]))
os.write(fd, "\n")
v = config[k]
if isinstance(v, list):
for val in v:
os.write(fd, k + ": " + str(val) + "\n")
else:
os.write(fd, k + ": " + str(v) + "\n")
elif k in self._sched_config_comments:
os.write(fd, "\n".join(self._sched_config_comments[k]))
os.write(fd, "\n")
for k, v in self.sched_config.items():
if k not in self._config_order:
os.write(fd, k + ": " + str(v).strip() + "\n")
if 'PTL_SCHED_CONFIG_TAIL' in self._sched_config_comments:
os.write(fd, "\n".join(
self._sched_config_comments['PTL_SCHED_CONFIG_TAIL']))
os.write(fd, "\n")
os.close(fd)
if path is None:
sp = os.path.join(self.pbs_conf['PBS_HOME'], "sched_priv",
"sched_config")
if self.du.is_localhost(self.hostname):
self.du.run_copy(self.hostname, sp, sp + '.bak', sudo=True)
else:
cmd = ['mv', sp, sp + '.bak']
self.du.run_cmd(self.hostname, cmd, sudo=True)
else:
sp = path
self.du.run_copy(self.hostname, fn, sp, mode=0644, sudo=True)
os.remove(fn)
self.du.chown(self.hostname, path=sp, uid=0, gid=0, sudo=True)
self.logger.debug(self.logprefix + "updated configuration")
except:
m = self.logprefix + 'error in apply_config '
self.logger.error(m + str(traceback.print_exc()))
raise PbsSchedConfigError(rc=1, rv=False, msg=m)
if validate:
self.signal('-HUP')
m = self.log_match("Error reading line", n=10,
starttime=reconfig_time)
if m is None:
# no match, successful config
return True
raise PbsSchedConfigError(rc=1, rv=False, msg=str(m))
return True
def set_sched_config(self, confs={}, apply=True, validate=True):
"""
set a ``sched_config`` property
:param confs: dictionary of key value sched_config entries
:type confs: Dictionary
:param apply: if True (the default), apply configuration.
:type apply: bool
:param validate: if True (the default), validate the
configuration settings.
:type validate: bool
"""
self.logger.info(self.logprefix + "config " + str(confs))
self.sched_config = dict(self.sched_config.items() + confs.items())
if apply:
try:
self.apply_config(validate=validate)
except PbsSchedConfigError:
for k in confs:
del self.sched_config[k]
self.apply_config(validate=validate)
return True
def add_server_dyn_res(self, custom_resource, script_body=None, file=None,
apply=True, validate=True):
"""
Add a server dynamic resource script or file to the scheduler
configuration
:param custom_resource: The name of the custom resource to
define
:type custom_resource: str
:param script_body: The body of the server dynamic resource
:param file: Alternatively to passing the script body, use
the file instead
:type file: str or None
:param apply: if True (the default), apply configuration.
:type apply: bool
:param validate: if True (the default), validate the
configuration settings.
:type validate: bool
"""
if file is not None:
f = open(file)
script_body = f.readlines()
f.close()
else:
(fd, file) = self.du.mkstemp(prefix='PtlPbsSchedConfig')
f = open(file, "w")
f.write(script_body)
f.close()
os.close(fd)
self.server_dyn_res = file
self.logger.info(self.logprefix + "adding server dyn res " + file)
self.logger.info("-" * 30)
self.logger.info(script_body)
self.logger.info("-" * 30)
self.du.chmod(self.hostname, path=file, mode=0755)
a = {'server_dyn_res': '"' + custom_resource + ' !' + file + '"'}
self.set_sched_config(a, apply=apply, validate=validate)
def unset_sched_config(self, name, apply=True):
"""
Delete a ``sched_config`` entry
:param name: the entry to delete from sched_config
:type name: str
:param apply: if True, apply configuration. Defaults to True
:type apply: bool
"""
self.parse_sched_config()
if name not in self.sched_config:
return True
self.logger.info(self.logprefix + "unsetting config " + name)
del self.sched_config[name]
if apply:
return self.apply_config()
def set_dedicated_time_file(self, file):
"""
Set the path to a dedicated time
"""
self.logger.info(self.logprefix + " setting dedicated time file to " +
str(file))
self.dedicated_time_file = file
def revert_to_defaults(self):
"""
Revert scheduler configuration to defaults.
:returns: True on success, False otherwise
"""
self.logger.info(self.logprefix +
"reverting configuration to defaults")
self.server.manager(MGR_CMD_LIST, SCHED)
ignore_attrs = ['id', 'pbs_version', 'sched_host']
unsetattrs = []
for k in self.attributes.keys():
if k not in ignore_attrs:
unsetattrs.append(k)
if len(unsetattrs) > 0:
self.server.manager(MGR_CMD_UNSET, SCHED, unsetattrs)
self.clear_dedicated_time(hup=False)
if self.du.cmp(self.hostname, self.dflt_resource_group_file,
self.resource_group_file) != 0:
self.du.run_copy(self.hostname, self.dflt_resource_group_file,
self.resource_group_file, mode=0644, sudo=True)
if self.server_dyn_res is not None:
self.du.rm(self.hostname, self.server_dyn_res, force=True,
sudo=True)
self.server_dyn_res = None
rc = self.holidays_revert_to_default()
if self.du.cmp(self.hostname, self.dflt_sched_config_file,
self.sched_config_file) != 0:
self.du.run_copy(self.hostname, self.dflt_sched_config_file,
self.sched_config_file, mode=0644, sudo=True)
self.signal('-HUP')
for f in self.deletable_files:
fn = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv', f)
if fn is not None:
self.du.rm(self.hostname, fn, sudo=True, force=True)
self.parse_sched_config()
self.fairshare_tree = None
self.resource_group = None
return self.isUp()
def save_configuration(self, outfile, mode='a'):
"""
Save scheduler configuration
:param outfile: Path to a file to which configuration
is saved
:type outfile: str
:param mode: mode to use to access outfile. Defaults to
append, 'a'.
:type mode: str
:returns: True on success and False otherwise
"""
conf = {}
sconf = {MGR_OBJ_SCHED: conf}
sched_priv = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv')
sc = os.path.join(sched_priv, 'sched_config')
self._save_config_file(conf, sc)
rg = os.path.join(sched_priv, 'resource_group')
self._save_config_file(conf, rg)
dt = os.path.join(sched_priv, 'dedicated_time')
self._save_config_file(conf, dt)
hd = os.path.join(sched_priv, 'holidays')
self._save_config_file(conf, hd)
try:
f = open(outfile, mode)
cPickle.dump(sconf, f)
f.close()
except:
self.logger.error('error saving configuration ' + outfile)
return False
return True
def load_configuration(self, infile):
"""
load configuration from saved file infile
"""
self._load_configuration(infile, MGR_OBJ_SCHED)
def get_resources(self, exclude=[]):
"""
returns a list of allocatable resources.
:param exclude: if set, excludes the named resources, if
they exist, from the resulting list
:type exclude: List
"""
if 'resources' not in self.sched_config:
return None
resources = self.sched_config['resources']
resources = resources.replace('"', '')
resources = resources.replace(' ', '')
res = resources.split(',')
if len(exclude) > 0:
for e in exclude:
if e in res:
res.remove(e)
return res
def add_resource(self, name, apply=True):
"""
Add a resource to ``sched_config``.
:param name: the resource name to add
:type name: str
:param apply: if True, apply configuration. Defaults to True
:type apply: bool
:returns: True on success and False otherwise.
Return True if the resource is already defined.
"""
# if the sched_config has not been read in yet, parse it
if not self.sched_config:
self.parse_sched_config()
if 'resources' in self.sched_config:
resources = self.sched_config['resources']
resources = resources.replace('"', '')
splitres = [r.strip() for r in resources.split(",")]
if name in splitres:
return True
resources = '"' + resources + ', ' + name + '"'
else:
resources = '"' + name + '"'
return self.set_sched_config({'resources': resources}, apply=apply)
def remove_resource(self, name, apply=True):
"""
Remove a resource to ``sched_config``.
:param name: the resource name to remove
:type name: str
:param apply: if True, apply configuration. Defaults to True
:type apply: bool
:returns: True on success and False otherwise
"""
# if the sched_config has not been read in yet, parse it
if not self.sched_config:
self.parse_sched_config()
if 'resources' in self.sched_config:
resources = self.sched_config['resources']
resources = resources.replace('"', '')
splitres = [r.strip() for r in resources.split(",")]
if name not in splitres:
return True
newres = []
for r in splitres:
if r != name:
newres.append(r)
resources = '"' + ",".join(newres) + '"'
return self.set_sched_config({'resources': resources}, apply=apply)
def holidays_revert_to_default(self, level=logging.INFO):
"""
Revert holidays file to default
"""
self.logger.log(level, self.logprefix +
"reverting holidays file to default")
rc = None
# Copy over the holidays file from PBS_EXEC if it exists
if self.du.cmp(self.hostname, self.dflt_holidays_file,
self.holidays_file) != 0:
ret = self.du.run_copy(self.hostname, self.dflt_holidays_file,
self.holidays_file, mode=0644, sudo=True,
logerr=True)
rc = ret['rc']
# Update the internal data structures for the updated file
self.holidays_parse_file(level=level)
else:
rc = 1
return rc
def holidays_parse_file(self, path=None, obj=None, level=logging.INFO):
"""
Parse the existing holidays file
:param path: optional path to the holidays file to parse
:type path: str or None
:param obj: optional holidays object to be used instead
of internal
:returns: The content of holidays file as a list of lines
"""
self.logger.log(level, self.logprefix + "Parsing holidays file")
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
if path is None:
path = self.holidays_file
lines = self.du.cat(self.hostname, path, sudo=True)['out']
content = [] # valid content to return
self.holidays_delete_entry(
'a', apply=False, obj=obj, level=logging.DEBUG)
for line in lines:
entry = str(line).split()
if len(entry) == 0:
continue
tag = entry[0].lower()
if tag == "year": # initialize year
content.append("\t".join(entry))
obj.year['valid'] = True
if len(entry) > 1:
obj.year['value'] = entry[1]
elif tag in days_map.keys(): # initialize a day
content.append("\t".join(entry))
day = days_map[tag]
day['valid'] = True
days_set.append(day)
day['position'] = len(days_set) - 1
if len(entry) > 1:
day['p'] = entry[1]
if len(entry) > 2:
day['np'] = entry[2]
elif tag.isdigit(): # initialize a holiday
content.append("\t".join(entry))
obj.holidays.append(tag)
else:
pass
return content
def holidays_set_day(self, day_id, prime="", nonprime="", apply=True,
obj=None, level=logging.INFO):
"""
Set prime time values for a day
:param day_id: the day to be set (string)
:type day_id: str
:param prime: the prime time value
:param nonprime: the non-prime time value
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
:returns: The position ``(0-7)`` of the set day
"""
self.logger.log(level, self.logprefix +
"setting holidays file entry for %s",
day_id)
if obj is None:
obj = self.holidays_obj
day = obj._days_map[str(day_id).lower()]
days_set = obj.days_set
if day['valid'] is None: # Fresh entry
days_set.append(day)
day['position'] = len(days_set) - 1
elif day['valid'] is False: # Previously invalidated entry
days_set.insert(day['position'], day)
else:
pass
day['valid'] = True
day['p'] = str(prime)
day['np'] = str(nonprime)
self.logger.debug("holidays_set_day(): changed day struct: " +
str(day))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
return day['position']
def holidays_get_day(self, day_id, obj=None, level=logging.INFO):
"""
:param obj: optional holidays object to be used instead
of internal
:param day_id: either a day's name or "all"
:type day_id: str
:returns: A copy of info about a day/all set days
"""
self.logger.log(level, self.logprefix +
"getting holidays file entry for " +
day_id)
if obj is None:
obj = self.holidays_obj
days_set = obj.days_set
days_map = obj._days_map
if day_id == "all":
return days_set[:]
else:
return days_map[day_id].copy()
def holidays_reposition_day(self, day_id, new_pos, apply=True, obj=None,
level=logging.INFO):
"""
Change position of a day ``(0-7)`` as it appears in the
holidays file
:param day_id: name of the day
:type day_id: str
:param new_pos: new position
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
:returns: The new position of the day
"""
self.logger.log(level, self.logprefix +
"repositioning holidays file entry for " +
day_id + " to position " + str(new_pos))
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
day = days_map[str(day_id).lower()]
if new_pos == day['position']:
return
# We also want to update order of invalid days, so add them to
# days_set temporarily
invalid_days = []
for name in days_map:
if days_map[name]['valid'] is False:
invalid_days.append(days_map[name])
days_set += invalid_days
# Sort the old list
days_set.sort(key=itemgetter('position'))
# Change position of 'day_id'
day['position'] = new_pos
days_set.remove(day)
days_set.insert(new_pos, day)
# Update the 'position' field
for i in range(0, len(days_set)):
days_set[i]['position'] = i
# Remove invalid days from days_set
len_days_set = len(days_set)
days_set = [days_set[i] for i in range(0, len_days_set)
if days_set[i] not in invalid_days]
self.logger.debug("holidays_reposition_day(): List of days after " +
" re-positioning " + str(day_id) + " is:\n" +
str(days_set))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
return new_pos
def holidays_unset_day(self, day_id, apply=True, obj=None,
level=logging.INFO):
"""
Unset prime time values for a day
:param day_id: day to unset (string)
:type day_id: str
:param apply: to reflect the changes to file
:param obj: optional holidays object to be used instead
of internal
.. note:: we do not unset the 'valid' field here so the entry
will still be displayed but without any values
"""
self.logger.log(level, self.logprefix +
"unsetting holidays file entry for " + day_id)
if obj is None:
obj = self.holidays_obj
day = obj._days_map[str(day_id).lower()]
day['p'] = ""
day['np'] = ""
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_invalidate_day(self, day_id, apply=True, obj=None,
level=logging.INFO):
"""
Remove a day's entry from the holidays file
:param day_id: the day to remove (string)
:type day_id: str
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"invalidating holidays file entry for " +
day_id)
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
day = days_map[str(day_id).lower()]
day['valid'] = False
days_set.remove(day)
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_validate_day(self, day_id, apply=True, obj=None,
level=logging.INFO):
"""
Make valid a previously set day's entry
:param day_id: the day to validate (string)
:type day_id: str
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
.. note:: The day will retain its previous position in
the file
"""
self.logger.log(level, self.logprefix +
"validating holidays file entry for " +
day_id)
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
day = days_map[str(day_id).lower()]
if day in days_set: # do not insert a pre-existing day
self.logger.debug("holidays_validate_day(): " +
day_id + " is already valid!")
return
day['valid'] = True
days_set.insert(day['position'], day)
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_delete_entry(self, entry_type, idx=None, apply=True,
obj=None, level=logging.INFO):
"""
Delete ``one/all`` entries from holidays file
:param entry_type: 'y':year, 'd':day, 'h':holiday or 'a': all
:type entry_type: str
:param idx: either a day of week (monday, tuesday etc.)
or Julian date of a holiday
:type idx: str or None
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead of
internal
:returns: False if entry_type is invalid, otherwise True
.. note:: The day cannot be validated and will lose it's
position in the file
"""
self.logger.log(level, self.logprefix +
"Deleting entries from holidays file")
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
holiday_list = obj.holidays
year = obj.year
if entry_type not in ['a', 'y', 'd', 'h']:
return False
if entry_type == 'y' or entry_type == 'a':
self.logger.debug(self.logprefix +
"deleting year entry from holidays file")
# Delete year entry
year['value'] = None
year['valid'] = False
if entry_type == 'd' or entry_type == 'a':
# Delete one/all day entries
num_days_to_delete = 1
if entry_type == 'a':
self.logger.debug(self.logprefix +
"deleting all days from holidays file")
num_days_to_delete = len(days_set)
for i in range(0, num_days_to_delete):
if (entry_type == 'd'):
self.logger.debug(self.logprefix +
"deleting " + str(idx) +
" entry from holidays file")
day = days_map[str(idx).lower()]
else:
day = days_set[0]
day['p'] = None
day['np'] = None
day['valid'] = None
day['position'] = None
days_set.remove(day)
if entry_type == 'd':
# Correct 'position' field of every day
for i in range(0, len(days_set)):
days_set[i]['position'] = i
if entry_type == 'h' or entry_type == 'a':
# Delete one/all calendar holiday entries
if entry_type == 'a':
self.logger.debug(self.logprefix +
"deleting all holidays from holidays file")
del holiday_list[:]
else:
self.logger.debug(self.logprefix +
"deleting holiday on " + str(idx) +
" from holidays file")
holiday_list.remove(str(idx))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
return True
def holidays_set_year(self, new_year="", apply=True, obj=None,
level=logging.INFO):
"""
Set the year value
:param newyear: year value to set
:type newyear: str
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"setting holidays file year entry to " +
str(new_year))
if obj is None:
obj = self.holidays_obj
year = obj.year
year['value'] = str(new_year)
year['valid'] = True
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_unset_year(self, apply=True, obj=None, level=logging.INFO):
"""
Unset the year value
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"unsetting holidays file year entry")
if obj is None:
obj = self.holidays_obj
obj.year['value'] = ""
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_get_year(self, obj=None, level=logging.INFO):
"""
:param obj: optional holidays object to be used instead
of internal
:returns: The year entry of holidays file
"""
self.logger.log(level, self.logprefix +
"getting holidays file year entry")
if obj is None:
obj = self.holidays_obj
year = obj.year
return year.copy()
def holidays_add_holiday(self, date=None, apply=True, obj=None,
level=logging.INFO):
"""
Add a calendar holiday to the holidays file
:param date: Date value for the holiday
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"adding holiday " + str(date) +
" to holidays file")
if obj is None:
obj = self.holidays_obj
holiday_list = obj.holidays
if date is not None:
holiday_list.append(str(date))
else:
pass
self.logger.debug("holidays list after adding one: " +
str(holiday_list))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_get_holidays(self, obj=None, level=logging.INFO):
"""
:param obj: optional holidays object to be used instead
of internal
:returns: The list of holidays in holidays file
"""
self.logger.log(level, self.logprefix +
"retrieving list of holidays")
if obj is None:
obj = self.holidays_obj
holiday_list = obj.holidays
return holiday_list[:]
def _holidays_process_content(self, content, obj=None):
"""
Process a user provided list of holidays file content
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.debug("_holidays_process_content(): " +
"Processing user provided holidays content:\n" +
str(content))
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
year = obj.year
holiday_list = obj.holidays
days_set = obj.days_set
self.holidays_delete_entry(
'a', apply=False, obj=obj, level=logging.DEBUG)
if content is None:
self.logger.debug("Holidays file was wiped out")
return
for line in content:
entry = line.split()
if len(entry) == 0:
continue
tag = entry[0].lower()
if tag == "year": # initialize self.year
year['valid'] = True
if len(entry) > 1:
year['value'] = entry[1]
elif tag in days_map.keys(): # initialize self.<day>
day = days_map[tag]
day['valid'] = True
days_set.append(day)
day['position'] = len(days_set) - 1
if len(entry) > 1:
day['p'] = entry[1]
if len(entry) > 2:
day['np'] = entry[2]
elif tag.isdigit(): # initialize self.holiday
holiday_list.append(tag)
else:
pass
def holidays_write_file(self, content=None, out_path=None,
hup=True, obj=None, level=logging.INFO):
"""
Write to the holidays file with content ``given/generated``
:param hup: SIGHUP the scheduler after writing the holidays
file
:type hup: bool
:param obj: optional holidays object to be used instead of
internal
"""
self.logger.log(level, self.logprefix +
"Writing to the holidays file")
if obj is None:
obj = self.holidays_obj
if out_path is None:
out_path = self.holidays_file
if content is not None:
self._holidays_process_content(content, obj)
else:
content = str(obj)
self.logger.debug("content being written:\n" + str(content))
(fd, fn) = self.du.mkstemp(self.hostname, body=content)
ret = self.du.run_copy(self.hostname, fn, out_path, mode=0644,
sudo=True)
self.du.rm(self.hostname, fn)
self.du.chown(self.hostname, out_path, uid=0, gid=0,
sudo=True)
if ret['rc'] != 0:
raise PbsSchedConfigError(rc=ret['rc'], rv=ret['out'],
msg=('error applying holidays file' +
ret['err']))
if hup:
rv = self.signal('-HUP')
if not rv:
raise PbsSchedConfigError(rc=1, rv=False,
msg='error applying holidays file')
self.du.chown(self.hostname, path=out_path, uid=0,
gid=0, sudo=True)
return True
def parse_dedicated_time(self, file=None):
"""
Parse the dedicated_time file and populate dedicated times
as both a string dedicated_time array of dictionaries defined
as ``[{'from': datetime, 'to': datetime}, ...]`` as well as a
dedicated_time_as_str array with a string representation of
each entry
:param file: optional file to parse. Defaults to the one under
``PBS_HOME/sched_priv``
:type file: str or None
:returns: The dedicated_time list of dictionaries or None on
error.Return an empty array if dedicated time file
is empty.
"""
self.dedicated_time_as_str = []
self.dedicated_time = []
if file:
dt_file = file
elif self.dedicated_time_file:
dt_file = self.dedicated_time_file
else:
dt_file = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv',
'dedicated_time')
try:
lines = self.du.cat(self.hostname, dt_file, sudo=True)['out']
if lines is None:
return []
for line in lines:
if not line.startswith('#') and len(line) > 0:
self.dedicated_time_as_str.append(line)
(dtime_from, dtime_to) = self.utils.convert_dedtime(line)
self.dedicated_time.append({'from': dtime_from,
'to': dtime_to})
except:
self.logger.error('error in parse_dedicated_time')
return None
return self.dedicated_time
def clear_dedicated_time(self, hup=True):
"""
Clear the dedicated time file
"""
self.parse_dedicated_time()
if ((len(self.dedicated_time) == 0) and
(len(self.dedicated_time_as_str) == 0)):
return True
if self.dedicated_time:
for d in self.dedicated_time:
del d
if self.dedicated_time_as_str:
for d in self.dedicated_time_as_str:
del d
self.dedicated_time = []
self.dedicated_time_as_str = []
dt = "# FORMAT: MM/DD/YYYY HH:MM MM/DD/YYYY HH:MM"
return self.add_dedicated_time(dt, hup=hup)
def add_dedicated_time(self, as_str=None, start=None, end=None, hup=True):
"""
Append a dedicated time entry. The function can be called
in one of two ways, either by passing in start and end as
time values, or by passing as_str, a string that gets
appended to the dedicated time entries and formatted as
follows, note that no check on validity of the format will
be made the function uses strftime to parse the datetime
and will fail if the strftime can not convert the string.
``MM/DD/YYYY HH:MM MM/DD/YYYY HH:MM``
:returns: True on success and False otherwise
"""
if self.dedicated_time is None:
self.parse_dedicated_time()
if start is not None and end is not None:
dtime_from = time.strftime("%m/%d/%Y %H:%M", time.localtime(start))
dtime_to = time.strftime("%m/%d/%Y %H:%M", time.localtime(end))
dedtime = dtime_from + " " + dtime_to
elif as_str is not None:
(dtime_from, dtime_to) = self.utils.convert_dedtime(as_str)
dedtime = as_str
else:
self.logger.warning("no dedicated from/to specified")
return True
for d in self.dedicated_time_as_str:
if dedtime == d:
if dtime_from is None or dtime_to is None:
self.logger.info(self.logprefix +
"dedicated time already defined")
else:
self.logger.info(self.logprefix +
"dedicated time from " + dtime_from +
" to " + dtime_to + " already defined")
return True
if dtime_from is not None and dtime_to is not None:
self.logger.info(self.logprefix +
"adding dedicated time " + dedtime)
self.dedicated_time_as_str.append(dedtime)
if dtime_from is not None and dtime_to is not None:
self.dedicated_time.append({'from': dtime_from, 'to': dtime_to})
try:
(fd, fn) = self.du.mkstemp()
for l in self.dedicated_time_as_str:
os.write(fd, l + '\n')
os.close(fd)
ddfile = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv',
'dedicated_time')
self.du.run_copy(self.hostname, fn, ddfile, mode=0644, uid=0,
gid=0, sudo=True)
os.remove(fn)
except:
raise PbsSchedConfigError(rc=1, rv=False,
msg='error adding dedicated time')
if hup:
ret = self.signal('-HUP')
if ret['rc'] != 0:
raise PbsSchedConfigError(rc=1, rv=False,
msg='error adding dedicated time')
return True
def terminate(self):
self.signal('-KILL')
def valgrind(self):
"""
run scheduler instance through valgrind
"""
if self.isUp():
self.terminate()
rv = CliUtils().check_bin('valgrind')
if not rv:
self.logger.error(self.logprefix + 'valgrind not available')
return None
cmd = ['valgrind']
cmd += ["--log-file=" + os.path.join(tempfile.gettempdir(),
'schd.vlgrd')]
cmd += [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_sched')]
return self.du.run_cmd(self.hostname, cmd, sudo=True)
def alloc_to_execvnode(self, chunks):
"""
convert a resource allocation to an execvnode string representation
"""
execvnode = []
for chunk in chunks:
execvnode += ["(" + chunk.vnode]
for res, val in chunk.resources.items():
execvnode += [":" + str(res) + "=" + str(val)]
for vchk in chunk.vchunk:
execvnode += ["+" + vchk.vnode]
for res, val in vchk.resources():
execvnode += [":" + str(res) + "=" + str(val)]
execvnode += [")+"]
if len(execvnode) != 0:
ev = execvnode[len(execvnode) - 1]
ev = ev[:-1]
execvnode[len(execvnode) - 1] = ev
return "".join(execvnode)
def cycles(self, start=None, end=None, firstN=None, lastN=None):
"""
Analyze scheduler log and return cycle information
:param start: Optional setting of the start time to consider
:param end: Optional setting of the end time to consider
:param firstN: Optional setting to consider the given first
N cycles
:param lastN: Optional setting to consider only the given
last N cycles
"""
try:
from ptl.utils.pbs_logutils import PBSSchedulerLog
except:
self.logger.error('error loading ptl.utils.pbs_logutils')
return None
sl = PBSSchedulerLog()
sl.analyze(self.logfile, start, end, self.hostname)
cycles = sl.cycles
if not cycles or cycles is None:
return []
if lastN is not None:
return cycles[-lastN:]
elif firstN is not None:
return cycles[:firstN]
return cycles
def query_fairshare(self, name=None, id=None):
"""
Parse fairshare data using ``pbsfs`` and populates
fairshare_tree.If name or id are specified, return the data
associated to that id.Otherwise return the entire fairshare
tree
"""
if self.has_diag:
return None
tree = FairshareTree()
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False)
if ret['rc'] != 0:
raise PbsFairshareError(rc=ret['rc'], rv=None,
msg=str(ret['err']))
pbsfs = ret['out']
for p in pbsfs:
m = self.fs_tag.match(p)
if m:
usage = int(m.group('Usage'))
perc = float(m.group('Perc'))
nm = m.group('name')
cgrp = int(m.group('cgrp'))
pid = int(m.group('Grp'))
nd = tree.get_node(id=pid)
if nd:
pname = nd.parent_name
else:
pname = None
# if an entity has a negative cgroup it should belong
# to the unknown resource, we work around the fact that
# PBS Pro (up to 13.0) sets this cgroup id to -1 by
# reassigning it to 0
# TODO: cleanup once PBS code is updated
if cgrp < 0:
cgrp = 0
node = FairshareNode(name=nm,
id=cgrp,
parent_id=pid,
parent_name=pname,
nshares=int(m.group('Shares')),
usage=usage,
perc={'TREEROOT': perc})
if perc:
node.prio['TREEROOT'] = float(usage) / perc
if nm == name or id == cgrp:
return node
tree.add_node(node, apply=False)
# now that all nodes are known, update parent and child
# relationship of the tree
tree.update()
for node in tree.nodes.values():
pnode = node._parent
while pnode is not None and pnode.id != 0:
if pnode.perc['TREEROOT']:
node.perc[pnode.name] = \
(node.perc['TREEROOT'] * 100 / pnode.perc[
'TREEROOT'])
if pnode.name in node.perc and node.perc[pnode.name]:
node.prio[pnode.name] = (
node.usage / node.perc[pnode.name])
pnode = pnode._parent
if name:
n = tree.get_node(name)
if n is None:
raise PbsFairshareError(rc=1, rv=None,
msg='Unknown entity ' + name)
return n
if id:
n = tree.get_node(id=id)
raise PbsFairshareError(rc=1, rv=None,
msg='Unknown entity ' + str(id))
return n
return tree
def set_fairshare_usage(self, name=None, usage=None):
"""
Set the fairshare usage associated to a given entity.
:param name: The entity to set the fairshare usage of
:type name: str or None
:param usage: The usage value to set
"""
if self.has_diag:
return True
if name is None:
self.logger.error(self.logprefix + ' an entity name required')
return False
if usage is None:
self.logger.error(self.logprefix + ' a usage is required')
return False
self.stop()
pbsfs = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')
cmd = [pbsfs, '-s', name, str(usage)]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True)
self.start()
if ret['rc'] == 0:
return True
return False
def decay_fairshare_tree(self):
"""
Decay the fairshare tree through pbsfs
"""
if self.has_diag:
return True
self.stop()
pbsfs = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')
cmd = [pbsfs, '-d']
ret = self.du.run_cmd(self.hostname, cmd, sudo=True)
self.start()
if ret['rc'] == 0:
return True
return False
def cmp_fairshare_entities(self, name1=None, name2=None):
"""
Compare two fairshare entities. Wrapper of ``pbsfs -c e1 e2``
:param name1: name of first entity to compare
:type name1: str or None
:param name2: name of second entity to compare
:type name1: str or None
:returns: the name of the entity of higher priority or None on error
"""
if self.has_diag:
return None
if name1 is None or name2 is None:
self.logger.erro(self.logprefix + 'two fairshare entity names ' +
'required')
return None
pbsfs = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')
cmd = [pbsfs, '-c', name1, name2]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True)
if ret['rc'] == 0:
return ret['out'][0]
return None
def parse_resource_group(self, hostname=None, resource_group=None):
"""
Parse the Scheduler's ``resource_group`` file
:param hostname: The name of the host from which to parse
resource_group
:type hostname: str or None
:param resource_group: The path to a resource_group file
:type resource_group: str or None
:returns: A fairshare tree
"""
if hostname is None:
hostname = self.hostname
if resource_group is None:
resource_group = self.resource_group_file
# if has_diag is True acces to sched_priv may not require su privilege
ret = self.du.cat(hostname, resource_group, sudo=(not self.has_diag))
if ret['rc'] != 0:
self.logger.error(hostname + ' error reading ' + resource_group)
tree = FairshareTree(hostname, resource_group)
root = FairshareNode('root', -1, parent_id=0, nshares=100)
tree.add_node(root, apply=False)
lines = ret['out']
for line in lines:
line = line.strip()
if not line.startswith("#") and len(line) > 0:
# could have 5th column but we only need the first 4
(name, id, parent, nshares) = line.split()[:4]
node = FairshareNode(name, id, parent_name=parent,
nshares=nshares)
tree.add_node(node, apply=False)
tree.update()
return tree
def add_to_resource_group(self, name, id, parent, nshares):
"""
Add an entry to the resource group file
:param name: The name of the entity to add
:type name: str
:param id: The numeric identifier of the entity to add
:type id: int
:param parent: The name of the parent group
:type parent: str
:param nshares: The number of shares associated to the entity
:type nshares: int
"""
if self.resource_group is None:
self.resource_group = self.parse_resource_group(
self.hostname, self.resource_group_file)
if not self.resource_group:
self.resource_group = FairshareTree(
self.hostname, self.resource_group_file)
return self.resource_group.create_node(name, id, parent_name=parent,
nshares=nshares)
def job_formula(self, jobid=None, starttime=None, max_attempts=5):
"""
Extract formula value out of scheduler log
:param jobid: Optional, the job identifier for which to get
the formula.
:type jobid: str or int
:param starttime: The time at which to start parsing the
scheduler log
:param max_attempts: The number of attempts to search for
formula in the logs
:type max_attempts: int
:returns: If jobid is specified, return the formula value
associated to that job if no jobid is specified,
returns a dictionary mapping job ids to formula
"""
if jobid is None:
jobid = "(?P<jobid>.*)"
_alljobs = True
else:
if isinstance(jobid, int):
jobid = str(jobid)
_alljobs = False
formula_pat = (".*Job;" + jobid +
".*;Formula Evaluation = (?P<fval>.*)")
rv = self.log_match(formula_pat, regexp=True, starttime=starttime,
n='ALL', allmatch=True, max_attempts=5)
ret = {}
if rv:
for _, l in rv:
m = re.match(formula_pat, l)
if m:
if _alljobs:
jobid = m.group('jobid')
ret[jobid] = float(m.group('fval').strip())
if not _alljobs:
if jobid in ret:
return ret[jobid]
else:
return
return ret
class FairshareTree(object):
"""
Object representation of the Scheduler's resource_group
file and pbsfs data
:param hostname: Hostname of the machine
:type hostname: str
"""
du = DshUtils()
def __init__(self, hostname=None, resource_group=None):
self.logger = logging.getLogger(__name__)
self.hostname = hostname
self.resource_group = resource_group
self.nodes = {}
self.root = None
self._next_id = -1
def update_resource_group(self):
if self.resource_group:
(fd, fn) = self.du.mkstemp()
os.write(fd, self.__str__())
os.close(fd)
ret = self.du.run_copy(self.hostname, fn, self.resource_group,
mode=0644, sudo=True)
self.du.chown(self.hostname, self.resource_group, uid=0,
gid=0, sudo=True)
os.remove(fn)
if ret['rc'] != 0:
raise PbsFairshareError(rc=1, rv=False,
msg='error updating resource group')
return True
def update(self):
for node in self.nodes.values():
if node._parent is None:
pnode = self.get_node(id=node.parent_id)
if pnode:
node._parent = pnode
if node not in pnode._child:
pnode._child.append(node)
def _add_node(self, node):
if node.name == 'TREEROOT' or node.name == 'root':
self.root = node
self.nodes[node.name] = node
if node.parent_name in self.nodes:
self.nodes[node.parent_name]._child.append(node)
node._parent = self.nodes[node.parent_name]
def add_node(self, node, apply=True):
"""
add node to the fairshare tree
"""
self._add_node(node)
if apply:
return self.update_resource_group()
return True
def create_node(self, name, id, parent_name, nshares):
"""
Add an entry to the ``resource_group`` file
:param name: The name of the entity to add
:type name: str
:param id: The uniqe numeric identifier of the entity
:type id: int
:param parent: The name of the parent/group of the entity
:type parent: str
:param nshares: The number of shares assigned to this entity
:type nshares: int
:returns: True on success, False otherwise
"""
if name in self.nodes:
self.logger.warning('fairshare: node ' + name + ' already defined')
return True
self.logger.info('creating tree node: ' + name)
node = FairshareNode(name, id, parent_name=parent_name,
nshares=nshares)
self._add_node(node)
return self.update_resource_group()
def get_node(self, name=None, id=None):
"""
Return a node of the fairshare tree identified by either
name or id.
:param name: The name of the entity to query
:type name: str or None
:param id: The id of the entity to query
:returns: The fairshare information of the entity when
found, if not, returns None
.. note:: The name takes precedence over the id.
"""
for node in self.nodes.values():
if name is not None and node.name == name:
return node
if id is not None and node.id == id:
return node
return None
def __batch_status__(self):
"""
Convert fairshare tree object to a batch status format
"""
dat = []
for node in self.nodes.values():
if node.name == 'root':
continue
einfo = {}
einfo['cgroup'] = node.id
einfo['id'] = node.name
einfo['group'] = node.parent_id
einfo['nshares'] = node.nshares
if len(node.prio) > 0:
p = []
for k, v in node.prio.items():
p += ["%s:%d" % (k, int(v))]
einfo['penalty'] = ", ".join(p)
einfo['usage'] = node.usage
if node.perc:
p = []
for k, v in node.perc.items():
p += ["%s:%.3f" % (k, float(v))]
einfo['shares_perc'] = ", ".join(p)
ppnode = self.get_node(id=node.parent_id)
if ppnode:
ppname = ppnode.name
ppid = ppnode.id
else:
ppnode = self.get_node(name=node.parent_name)
if ppnode:
ppname = ppnode.name
ppid = ppnode.id
else:
ppname = ''
ppid = None
einfo['parent'] = "%s (%s) " % (str(ppid), ppname)
dat.append(einfo)
return dat
def get_next_id(self):
self._next_id -= 1
return self._next_id
def __repr__(self):
return self.__str__()
def _dfs(self, node, dat):
if node.name != 'root':
s = []
if node.name is not None:
s += [node.name]
if node.id is not None:
s += [str(node.id)]
if node.parent_name is not None:
s += [node.parent_name]
if node.nshares is not None:
s += [str(node.nshares)]
if node.usage is not None:
s += [str(node.usage)]
dat.append("\t".join(s))
for n in node._child:
self._dfs(n, dat)
def __str__(self):
dat = []
if self.root:
self._dfs(self.root, dat)
if len(dat) > 0:
dat += ['\n']
return "\n".join(dat)
class FairshareNode(object):
"""
Object representation of the fairshare data as queryable through
the command ``pbsfs``.
:param nshares: Number of shares
:type nshares: int or None
:param usage: Fairshare usage
:param perc: Percentage the entity has of the tree
"""
def __init__(self, name=None, id=None, parent_name=None, parent_id=None,
nshares=None, usage='unknown', perc=None):
self.name = name
self.id = id
self.parent_name = parent_name
self.parent_id = parent_id
self.nshares = nshares
self.usage = usage
self.perc = perc
self.prio = {}
self._parent = None
self._child = []
def __str__(self):
ret = []
if self.name is not None:
ret.append(self.name)
if self.id is not None:
ret.append(str(self.id))
if self.parent_name is not None:
ret.append(str(self.parent_name))
if self.nshares is not None:
ret.append(str(self.nshares))
if self.usage is not None:
ret.append(str(self.usage))
if self.perc is not None:
ret.append(str(self.perc))
return "\t".join(ret)
class MoM(PBSService):
"""
Container for MoM properties.
Provides various MoM operations, such as creation, insertion,
deletion of vnodes.
:param name: The hostname of the server. Defaults to calling
pbs_default()
:type name: str or None
:param attrs: Dictionary of attributes to set, these will
override defaults.
:type attrs: Dictionary
:param pbsconf_file: path to config file to parse for
``PBS_HOME``, ``PBS_EXEC``, etc
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects ``(node,server,etc)``
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory (This will overrides
diagmap)
:type diag: str or None
:param server: A PBS server instance to which this mom is associated
:param db_acccess: set to either file containing credentials to DB
access or dictionary containing
{'dbname':...,'user':...,'port':...}
:type db_access: str or dictionary
"""
dflt_attributes = {}
conf_to_cmd_map = {'PBS_MOM_SERVICE_PORT': '-M',
'PBS_MANAGER_SERVICE_PORT': '-R',
'PBS_HOME': '-d'}
def __init__(self, name=None, attrs={}, pbsconf_file=None, diagmap={},
diag=None, server=None, db_access=None):
self.logger = logging.getLogger(__name__)
if server is not None:
self.server = server
if diag is None and self.server.diag is not None:
diag = self.server.diag
if (len(diagmap) == 0) and (len(self.server.diagmap) != 0):
diagmap = self.server.diagmap
else:
self.server = Server(name, pbsconf_file=pbsconf_file,
db_access=db_access, diag=diag,
diagmap=diagmap)
PBSService.__init__(self, name, attrs, self.dflt_attributes,
pbsconf_file, diag=diag, diagmap=diagmap)
_m = ['mom ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
self.configd = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv',
'config.d')
self.config = {}
self.dflt_config = {'$clienthost': self.server.hostname}
self.version = None
self._is_cpuset_mom = None
def isUp(self):
"""
Check for PBS mom up
"""
return super(MoM, self)._isUp(self)
def signal(self, sig):
"""
Send signal to PBS mom
"""
self.logger.info(self.logprefix + 'sent signal ' + sig)
return super(MoM, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the PBS mom pid
"""
return super(MoM, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get all pids of a instance
"""
return super(MoM, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the PBS mom
:param args: Arguments to start the mom
:type args: str or None
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list or None
"""
if args is not None or launcher is not None:
return super(MoM, self)._start(inst=self, args=args,
cmd_map=self.conf_to_cmd_map,
launcher=launcher)
else:
try:
rv = self.pi.start_mom()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return rv
def stop(self, sig=None):
"""
Stop the PBS mom
:param sig: Signal to stop the PBS mom
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping MoM on host ' +
self.hostname)
return super(MoM, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_mom()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return True
def restart(self):
"""
Restart the PBS mom
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None):
"""
Match the PBS mom logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp, day,
max_attempts, interval, starttime, endtime)
def pbs_version(self):
"""
Get the PBS version
"""
if self.version:
return self.version
exe = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')
version = self.du.run_cmd(self.hostname,
[exe, '--version'], sudo=True)['out']
if version:
self.logger.debug(version)
# in some cases pbs_mom --version may return multiple lines, we
# only care about the one that carries pbs_version information
for ver in version:
if 'pbs_version' in ver:
version = ver.split('=')[1].strip()
break
else:
version = self.log_match('pbs_version', tail=False)
if version:
version = version[1].strip().split('=')[1].strip()
else:
version = "unknown"
self.version = LooseVersion(version)
return self.version
def delete_vnodes(self):
rah = ATTR_rescavail + '.host'
rav = ATTR_rescavail + '.vnode'
a = {rah: self.hostname, rav: None}
try:
_vs = self.server.status(HOST, a, id=self.hostname)
except PbsStatusError:
try:
_vs = self.server.status(HOST, a, id=self.shortname)
except PbsStatusError as e:
if e.msg[0].endswith('Server has no node list'):
_vs = []
else:
raise e
vs = []
for v in _vs:
if v[rav].split('.')[0] != v[rah].split('.')[0]:
vs.append(v['id'])
if len(vs) > 0:
self.server.manager(MGR_CMD_DELETE, VNODE, id=vs)
def revert_to_defaults(self, delvnodedefs=True):
"""
1. ``Revert MoM configuration to defaults.``
2. ``Remove epilogue and prologue``
3. ``Delete all vnode definitions
HUP MoM``
:param delvnodedefs: if True (the default) delete all vnode
definitions and restart the MoM
:type delvnodedefs: bool
:returns: True on success and False otherwise
"""
self.logger.info(self.logprefix +
'reverting configuration to defaults')
restart = False
if not self.has_diag:
self.delete_pelog()
if delvnodedefs and self.has_vnode_defs():
restart = True
if not self.delete_vnode_defs():
return False
self.delete_vnodes()
if cmp(self.config, self.dflt_config) != 0:
self.apply_config(self.dflt_config, hup=False, restart=False)
if restart:
self.restart()
else:
self.signal('-HUP')
return self.isUp()
return True
def save_configuration(self, outfile, mode='a'):
"""
Save a MoM ``mom_priv/config``
:param outfile: the output file to which onfiguration is
saved
:type outfile: str
:param mode: the mode in which to open outfile to save
configuration.
:type mode: str
:returns: True on success, False on error
.. note:: first object being saved should open this file
with 'w' and subsequent calls from other objects
should save with mode 'a' or 'a+'. Defaults to a+
"""
conf = {}
mconf = {MGR_OBJ_NODE: conf}
mpriv = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv')
cf = os.path.join(mpriv, 'config')
self._save_config_file(conf, cf)
if os.path.isdir(os.path.join(mpriv, 'config.d')):
for f in os.listdir(os.path.join(mpriv, 'config.d')):
self._save_config_file(conf,
os.path.join(mpriv, 'config.d', f))
try:
f = open(outfile, mode)
cPickle.dump(mconf, f)
f.close()
except:
self.logger.error('error saving configuration to ' + outfile)
return False
return True
def load_configuration(self, infile):
"""
load configuration from saved file infile
"""
self._load_configuration(infile, MGR_OBJ_NODE)
def is_cray(self):
"""
Returns True if the version of PBS used was built for Cray platforms
"""
rv = self.log_match("alps_client", tail=False, max_attempts=10)
if rv:
return True
return False
def is_cpuset_mom(self):
"""
Check for cpuset mom
"""
if self._is_cpuset_mom is not None:
return self._is_cpuset_mom
raa = ATTR_rescavail + '.arch'
a = {raa: None}
try:
rv = self.server.status(NODE, a, id=self.shortname)
except PbsStatusError:
try:
rv = self.server.status(NODE, a, id=self.hostname)
except PbsStatusError as e:
if e.msg[0].endswith('Server has no node list'):
return False
else:
raise e
if rv[0][raa] == 'linux_cpuset':
self._is_cpuset_mom = True
else:
self._is_cpuset_mom = False
return self._is_cpuset_mom
def create_vnode_def(self, name, attrs={}, numnodes=1, sharednode=True,
pre='[', post=']', usenatvnode=False, attrfunc=None,
vnodes_per_host=1):
"""
Create a vnode definition string representation
:param name: The prefix for name of vnode to create,
name of vnode will be prefix + pre + <num> +
post
:type name: str
:param attrs: Dictionary of attributes to set on each vnode
:type attrs: Dictionary
:param numnodes: The number of vnodes to create
:type numnodes: int
:param sharednode: If true vnodes are shared on a host
:type sharednode: bool
:param pre: The symbol preceding the numeric value of that
vnode.
:type pre: str
:param post: The symbol following the numeric value of that
vnode.
:type post: str
:param usenatvnode: use the natural vnode as the first vnode
to allocate this only makes sense
starting with PBS 11.3 when natural
vnodes are reported as a allocatable
:type usenatvnode: bool
:param attrfunc: function to customize the attributes,
signature is (name, numnodes, curnodenum,
attrs), must return a dict that contains
new or modified attrs that will be added to
the vnode def. The function is called once
per vnode being created, it does not modify
attrs itself across calls.
:param vnodes_per_host: number of vnodes per host
:type vnodes_per_host: int
:returns: A string representation of the vnode definition
file
"""
sethost = False
attribs = attrs.copy()
if not sharednode and 'resources_available.host' not in attrs:
sethost = True
if attrfunc is None:
customattrs = attribs
vdef = ["$configversion 2"]
# altering the natural vnode information
if numnodes == 0:
for k, v in attribs.items():
vdef += [name + ": " + str(k) + "=" + str(v)]
else:
if usenatvnode:
if attrfunc:
customattrs = attrfunc(name, numnodes, "", attribs)
for k, v in customattrs.items():
vdef += [self.shortname + ": " + str(k) + "=" + str(v)]
# account for the use of the natural vnode
numnodes -= 1
else:
# ensure that natural vnode is not allocatable by the scheduler
vdef += [self.shortname + ": resources_available.ncpus=0"]
vdef += [self.shortname + ": resources_available.mem=0"]
for n in xrange(numnodes):
vnid = name + pre + str(n) + post
if sethost:
if vnodes_per_host > 1:
if n % vnodes_per_host == 0:
_nid = vnid
else:
_nid = name + pre + str(n - n % vnodes_per_host) + post
attribs['resources_available.host'] = _nid
else:
attribs['resources_available.host'] = vnid
if attrfunc:
customattrs = attrfunc(vnid, numnodes, n, attribs)
for k, v in customattrs.items():
vdef += [vnid + ": " + str(k) + "=" + str(v)]
if numnodes == 0:
nn = 1
else:
nn = numnodes
if numnodes > 1:
vnn_msg = ' vnodes '
else:
vnn_msg = ' vnode '
self.logger.info(self.logprefix + 'created ' + str(nn) +
vnn_msg + name + ' with attr ' +
str(attribs) + ' on host ' + self.hostname)
vdef += ["\n"]
del attribs
return "\n".join(vdef)
def parse_config(self):
"""
Parse mom config file into a dictionary of configuration
options.
:returns: A dictionary of configuration options on success,
and None otherwise
"""
try:
mconf = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv',
'config')
ret = self.du.cat(self.hostname, mconf, sudo=True)
if ret['rc'] != 0:
self.logger.error('error parsing configuration file')
return None
self.config = {}
lines = ret['out']
for line in lines:
(k, v) = line.split()
if k in self.config:
if isinstance(self.config[k], list):
self.config[k].append(v)
else:
self.config[k] = [self.config[k], v]
else:
self.config[k] = v
except:
self.logger.error('error in parse_config')
return None
return self.config
def add_config(self, conf={}, hup=True):
"""
Add config options to mom_priv_config.
:param conf: The configurations to add to ``mom_priv/config``
:type conf: Dictionary
:param hup: If True (default) ``HUP`` the MoM
:type hup: bool
:returns: True on success and False otherwise
"""
doconfig = False
if not self.config:
self.parse_config()
mc = self.config
if mc is None:
mc = {}
for k, v in conf.items():
if k in mc and (mc[k] == v or (isinstance(v, list) and
mc[k] in v)):
self.logger.debug(self.logprefix + 'config ' + k +
' already set to ' + str(v))
continue
else:
doconfig = True
break
if not doconfig:
return True
self.logger.info(self.logprefix + "config " + str(conf))
return self.apply_config(conf, hup)
def unset_mom_config(self, name, hup=True):
"""
Delete a mom_config entry
:param name: The entry to remove from ``mom_priv/config``
:type name: String
:param hup: if True (default) ``HUP`` the MoM
:type hup: bool
:returns: True on success and False otherwise
"""
mc = self.parse_config()
if mc is None or name not in mc:
return True
self.logger.info(self.logprefix + "unsetting config " + name)
del mc[name]
return self.apply_config(mc, hup)
def apply_config(self, conf={}, hup=True, restart=False):
"""
Apply configuration options to MoM.
:param conf: A dictionary of configuration options to apply
to MoM
:type conf: Dictionary
:param hup: If True (default) , HUP the MoM to apply the
configuration
:type hup: bool
:returns: True on success and False otherwise.
"""
self.config = dict(self.config.items() + conf.items())
try:
(_, fn) = self.du.mkstemp()
f = open(fn, 'w+')
for k, v in self.config.items():
if isinstance(v, list):
for eachprop in v:
f.write(str(k) + ' ' + str(eachprop) + '\n')
else:
f.write(str(k) + ' ' + str(v) + '\n')
f.close()
dest = os.path.join(
self.pbs_conf['PBS_HOME'], 'mom_priv', 'config')
self.du.run_copy(self.hostname, fn, dest, mode=0644, sudo=True)
self.du.chown(self.hostname, path=dest, uid=0, gid=0, sudo=True)
os.remove(fn)
except:
raise PbsMomConfigError(rc=1, rv=False,
msg='error processing add_config')
if restart:
return self.restart()
elif hup:
return self.signal('-HUP')
return True
def get_vnode_def(self, vnodefile=None):
"""
:returns: A vnode def file as a single string
"""
if vnodefile is None:
return None
f = open(vnodefile)
lines = f.readlines()
f.close()
return "".join(lines)
def insert_vnode_def(self, vdef, fname=None, additive=False, restart=True):
"""
Insert and enable a vnode definition. Root privilege
is required
:param vdef: The vnode definition string as created by
create_vnode_def
:type vdef: str
:param fname: The filename to write the vnode def string to
:type fname: str or None
:param additive: If True, keep all other vnode def files
under config.d Default is False
:type additive: bool
:param delete: If True, delete all nodes known to the server.
Default is True
:type delete: bool
:param restart: If True, restart the MoM. Default is True
:type restart: bool
"""
try:
(fd, fn) = self.du.mkstemp(self.hostname)
os.write(fd, vdef)
os.close(fd)
except:
raise PbsMomConfigError(rc=1, rv=False,
msg="Failed to insert vnode definition")
if fname is None:
fname = 'pbs_vnode.def'
if not additive:
self.delete_vnode_defs()
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')]
cmd += ['-s', 'insert', fname, fn]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False,
level=logging.INFOCLI)
self.du.rm(hostname=self.hostname, path=fn, force=True)
if ret['rc'] != 0:
raise PbsMomConfigError(rc=1, rv=False, msg="\n".join(ret['err']))
msg = self.logprefix + 'inserted vnode definition file '
msg += fname + ' on host: ' + self.hostname
self.logger.info(msg)
if restart:
self.restart()
def has_vnode_defs(self):
"""
Check for vnode definition(s)
"""
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')]
cmd += ['-s', 'list']
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False,
level=logging.INFOCLI)
if ret['rc'] == 0:
files = [x for x in ret['out'] if not x.startswith('PBS')]
if len(files) > 0:
return True
else:
return False
else:
return False
def delete_vnode_defs(self, vdefname=None):
"""
delete vnode definition(s) on this MoM
:param vdefname: name of a vnode definition file to delete,
if None all vnode definitions are deleted
:type vdefname: str
:returns: True if delete succeed otherwise False
"""
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')]
cmd += ['-s', 'list']
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False,
level=logging.INFOCLI)
if ret['rc'] != 0:
return False
rv = True
if len(ret['out']) > 0:
for vnodedef in ret['out']:
vnodedef = vnodedef.strip()
if (vnodedef == vdefname) or vdefname is None:
if vnodedef.startswith('PBS'):
continue
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin',
'pbs_mom')]
cmd += ['-s', 'remove', vnodedef]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True,
logerr=False, level=logging.INFOCLI)
if ret['rc'] != 0:
return False
else:
rv = True
return rv
def has_pelog(self, filename=None):
"""
Check for prologue and epilogue
"""
_has_pro = False
_has_epi = False
phome = self.pbs_conf['PBS_HOME']
prolog = os.path.join(phome, 'mom_priv', 'prologue')
epilog = os.path.join(phome, 'mom_priv', 'epilogue')
if self.du.isfile(self.hostname, path=prolog, sudo=True):
_has_pro = True
if filename == 'prologue':
return _has_pro
if self.du.isfile(self.hostname, path=epilog, sudo=True):
_has_epi = True
if filename == 'epilogue':
return _has_pro
if _has_epi or _has_pro:
return True
return False
def has_prologue(self):
"""
Check for prologue
"""
return self.has_pelog('prolouge')
def has_epilogue(self):
"""
Check for epilogue
"""
return self.has_pelog('epilogue')
def delete_pelog(self):
"""
Delete any prologue and epilogue files that may have been
defined on this MoM
"""
phome = self.pbs_conf['PBS_HOME']
prolog = os.path.join(phome, 'mom_priv', 'prologue')
epilog = os.path.join(phome, 'mom_priv', 'epilogue')
ret = self.du.rm(self.hostname, epilog, force=True,
sudo=True, logerr=False)
if ret:
ret = self.du.rm(self.hostname, prolog, force=True,
sudo=True, logerr=False)
if not ret:
self.logger.error('problem deleting prologue/epilogue')
# we don't bail because the problem may be that files did not
# exist. Let tester fix the issue
return ret
def create_pelog(self, body=None, src=None, filename=None):
"""
create ``prologue`` and ``epilogue`` files, functionality
accepts either a body of the script or a source file.
:returns: True on success and False on error
"""
if self.has_diag:
_msg = 'MoM is in loaded from diag so bypassing pelog creation'
self.logger.info(_msg)
return False
if (src is None and body is None) or (filename is None):
self.logger.error('file and body of script are required')
return False
pelog = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv', filename)
self.logger.info(self.logprefix +
' creating ' + filename + ' with body\n' + '---')
if body is not None:
self.logger.info(body)
(fd, src) = self.du.mkstemp(prefix='pbs-pelog')
os.write(fd, body)
os.close(fd)
elif src is not None:
_b = open(src)
self.logger.info("\n".join(_b.readlines()))
_b.close()
self.logger.info('---')
ret = self.du.run_copy(self.hostname, src, pelog, sudo=True)
if body is not None:
os.remove(src)
if ret['rc'] != 0:
self.logger.error('error creating pelog ')
return False
ret = self.du.chown(self.hostname, path=pelog, uid=0, gid=0, sudo=True,
logerr=False)
if not ret:
self.logger.error('error chowning pelog to root')
return False
ret = self.du.chmod(self.hostname, path=pelog, mode=0755, sudo=True)
if not ret:
self.logger.error('error changing mode of pelog')
return False
return True
def prologue(self, body=None, src=None):
"""
create prologue
"""
return self.create_pelog(body, src, 'prologue')
def epilogue(self, body=None, src=None):
"""
Create epilogue
"""
return self.create_pelog(body, src, 'epilogue')
def action(self, act, script):
"""
Define action script. Not currently implemented
"""
pass
class Hook(PBSObject):
"""
PBS hook objects. Holds attributes information and pointer
to server
:param name: Hook name
:type name: str or None
:param attrs: Hook attributes
:type attrs: Dictionary
:param server: Pointer to server
"""
dflt_attributes = {}
def __init__(self, name=None, attrs={}, server=None):
self.logger = logging.getLogger(__name__)
PBSObject.__init__(self, name, attrs, self.dflt_attributes)
self.server = server
class ResourceResv(PBSObject):
"""
Generic PBS resource reservation, i.e., job or
``advance/standing`` reservation
"""
def execvnode(self, attr='exec_vnode'):
"""
PBS type execution vnode
"""
if attr in self.attributes:
return PbsTypeExecVnode(self.attributes[attr])
else:
return None
def exechost(self):
"""
PBS type execution host
"""
if 'exec_host' in self.attributes:
return PbsTypeExecHost(self.attributes['exec_host'])
else:
return None
def select(self):
if hasattr(self, '_select') and self._select is not None:
return self._select
if 'schedselect' in self.attributes:
self._select = PbsTypeSelect(self.attributes['schedselect'])
elif 'select' in self.attributes:
self._select = PbsTypeSelect(self.attributes['select'])
else:
return None
return self._select
@classmethod
def get_hosts(cls, exechost=None):
"""
:returns: The hosts portion of the exec_host
"""
hosts = []
exechosts = cls.utils.parse_exechost(exechost)
if exechosts:
for h in exechosts:
eh = h.keys()[0]
if eh not in hosts:
hosts.append(eh)
return hosts
def get_vnodes(self, execvnode=None):
"""
:returns: The unique vnode names of an execvnode as a list
"""
if execvnode is None:
if 'exec_vnode' in self.attributes:
execvnode = self.attributes['exec_vnode']
elif 'resv_nodes' in self.attributes:
execvnode = self.attributes['resv_nodes']
else:
return []
vnodes = []
execvnodes = PbsTypeExecVnode(execvnode)
if execvnodes:
for n in execvnodes:
ev = n.keys()[0]
if ev not in vnodes:
vnodes.append(ev)
return vnodes
def walltime(self, attr='Resource_List.walltime'):
if attr in self.attributes:
return self.utils.convert_duration(self.attributes[attr])
class Job(ResourceResv):
"""
PBS Job. Attributes and Resources
:param username: Job username
:type username: str or None
:param attrs: Job attributes
:type attrs: Dictionary
:param jobname: Name of the PBS job
:type jobname: str or None
"""
dflt_attributes = {
ATTR_N: 'STDIN',
ATTR_j: 'n',
ATTR_m: 'a',
ATTR_p: '0',
ATTR_r: 'y',
ATTR_k: 'oe',
}
runtime = 100
logger = logging.getLogger(__name__)
def __init__(self, username=None, attrs={}, jobname=None):
self.server = {}
self.script = None
self.script_body = None
if username is not None:
self.username = str(username)
else:
self.username = None
self.du = None
self.interactive_handle = None
PBSObject.__init__(self, None, attrs, self.dflt_attributes)
if jobname is not None:
self.custom_attrs[ATTR_N] = jobname
self.attributes[ATTR_N] = jobname
self.set_variable_list(self.username)
self.set_sleep_time(100)
def set_variable_list(self, user=None, workdir=None):
"""
Customize the ``Variable_List`` job attribute to ``<user>``
"""
if user is None:
userinfo = pwd.getpwuid(os.getuid())
user = userinfo[0]
homedir = userinfo[5]
else:
try:
homedir = pwd.getpwnam(user)[5]
except:
homedir = ""
self.username = user
s = ['PBS_O_HOME=' + homedir]
s += ['PBS_O_LANG=en_US.UTF-8']
s += ['PBS_O_LOGNAME=' + user]
s += ['PBS_O_PATH=/usr/bin:/bin:/usr/bin:/usr/local/bin']
s += ['PBS_O_MAIL=/var/spool/mail/' + user]
s += ['PBS_O_SHELL=/bin/bash']
s += ['PBS_O_SYSTEM=Linux']
if workdir is not None:
wd = workdir
else:
wd = os.getcwd()
s += ['PBS_O_WORKDIR=' + str(wd)]
self.attributes[ATTR_v] = ",".join(s)
self.set_attributes()
def set_sleep_time(self, duration):
"""
Set the sleep duration for this job.
:param duration: The duration, in seconds, to sleep
:type duration: int
"""
self.set_execargs('/bin/sleep', duration)
def set_execargs(self, executable, arguments=None):
"""
Set the executable and arguments to use for this job
:param executable: path to an executable. No checks are made.
:type executable: str
:param arguments: arguments to executable.
:type arguments: str or list or int
"""
msg = ['job: executable set to ' + str(executable)]
if arguments is not None:
msg += [' with arguments: ' + str(arguments)]
self.logger.info("".join(msg))
self.attributes[ATTR_executable] = executable
if arguments is not None:
args = ''
xml_beginargs = '<jsdl-hpcpa:Argument>'
xml_endargs = '</jsdl-hpcpa:Argument>'
if isinstance(arguments, list):
for a in arguments:
args += xml_beginargs + str(a) + xml_endargs
elif isinstance(arguments, str):
args = xml_beginargs + arguments + xml_endargs
elif isinstance(arguments, int):
args = xml_beginargs + str(arguments) + xml_endargs
self.attributes[ATTR_Arglist] = args
else:
self.unset_attributes([ATTR_Arglist])
self.set_attributes()
def create_script(self, body=None, uid=None, gid=None, hostname=None):
"""
Create a job script from a given body of text into a
temporary location
:param body: the body of the script
:param owner: Optionally the user to own this script,
defaults ot current user
:type owner: str
:param hostname: The host on which the job script is to
be created
:type hostname: str or None
"""
if body is None:
return None
if isinstance(body, list):
body = '\n'.join(body)
self.script_body = body
if self.du is None:
self.du = DshUtils()
# First create the temporary file as current user and only change
# its mode once the current user has written to it
(fd, fn) = self.du.mkstemp(hostname, prefix='PtlPbsJobScript', uid=uid,
gid=gid, mode=0755, body=body)
os.close(fd)
if not self.du.is_localhost(hostname):
self.du.run_copy(hostname, fn, fn)
self.script = fn
return fn
class Reservation(ResourceResv):
"""
PBS Reservation. Attributes and Resources
:param attrs: Reservation attributes
:type attrs: Dictionary
"""
dflt_attributes = {}
def __init__(self, username=None, attrs={}):
self.server = {}
self.script = None
self.attributes = attrs
if username is None:
userinfo = pwd.getpwuid(os.getuid())
self.username = userinfo[0]
else:
self.username = str(username)
# These are not in dflt_attributes because of the conversion to CLI
# options is done strictly
if ATTR_resv_start not in attrs:
attrs[ATTR_resv_start] = str(int(time.time()) + 36 * 3600)
if ATTR_resv_end not in attrs:
if ATTR_resv_duration not in attrs:
attrs[ATTR_resv_end] = str(int(time.time()) + 72 * 3600)
PBSObject.__init__(self, None, attrs, self.dflt_attributes)
self.set_attributes()
def set_variable_list(self, user, workdir=None):
pass
class InteractiveJob(threading.Thread):
"""
An Interactive Job thread
Interactive Jobs are submitted as a thread that sets the jobid
as soon as it is returned by ``qsub -I``, such that the caller
can get back to monitoring the state of PBS while the interactive
session goes on in the thread.
The commands to be run within an interactive session are
specified in the job's interactive_script attribute as a list of
tuples, where the first item in each tuple is the command to run,
and the subsequent items are the expected returned data.
Implementation details:
Support for interactive jobs is currently done through the
pexpect module which must be installed separately from PTL.
Interactive jobs are submitted through ``CLI`` only, there is no
API support for this operation yet.
The submission of an interactive job requires passing in job
attributes,the command to execute ``(i.e. path to qsub -I)``
and the hostname
when not impersonating:
pexpect spawns the ``qsub -I`` command and expects a prompt
back, for each tuple in the interactive_script, it sends the
command and expects to match the return value.
when impersonating:
pexpect spawns ``sudo -u <user> qsub -I``. The rest is as
described in non- impersonating mode.
"""
logger = logging.getLogger(__name__)
pexpect_timeout = 15
pexpect_sleep_time = .1
du = DshUtils()
def __init__(self, job, cmd, host):
threading.Thread.__init__(self)
self.job = job
self.cmd = cmd
self.jobid = None
self.hostname = host
def run(self):
"""
Run the interactive job
"""
try:
import pexpect
except:
self.logger.error('pexpect module is required for '
'interactive jobs')
return None
job = self.job
cmd = self.cmd
self.jobid = None
self.logger.info("submit interactive job as " + job.username +
": " + " ".join(cmd))
if not hasattr(job, 'interactive_script'):
self.logger.debug('no interactive_script attribute on job')
return None
try:
# sleep to allow server to communicate with client
# this value is set empirically so tweaking may be
# needed
_st = self.pexpect_sleep_time
_to = self.pexpect_timeout
_sc = job.interactive_script
cmd = ['sudo', '-u', job.username] + cmd
self.logger.debug(cmd)
_p = pexpect.spawn(" ".join(cmd), timeout=_to)
self.job.interactive_handle = _p
time.sleep(_st)
_p.expect('qsub: waiting for job (?P<jobid>[\d\w.]+) to start.*')
if _p.match:
self.jobid = _p.match.group('jobid')
else:
_p.close()
self.job.interactive_handle = None
return None
self.logger.debug(_p.after.decode())
for _l in _sc:
self.logger.debug('sending: ' + _l[0])
_p.sendline(_l[0])
time.sleep(_st)
# only way I could figure out to catch a sleep command
# within a spawned pexpect child. Might need revisiting
if 'sleep' in _l[0]:
_secs = _l[0].split()[1]
self.logger.debug('sleeping ' + str(_secs))
time.sleep(float(_secs))
if len(_l) > 1:
for _r in range(1, len(_l)):
self.logger.debug('expecting: ' + _l[_r])
_p.expect(_l[_r])
time.sleep(_st)
self.logger.debug('received: ' + _p.after.decode())
time.sleep(_st)
self.logger.debug('received: ' + _p.after.decode())
self.logger.debug('sending Ctrl-D')
_p.sendcontrol('d')
time.sleep(_st)
_p.close()
self.job.interactive_handle = None
self.logger.debug(_p.exitstatus)
except Exception:
self.logger.error(traceback.print_exc())
return None
return self.jobid
class Queue(PBSObject):
"""
PBS Queue container, holds attributes of the queue and
pointer to server
:param name: Queue name
:type name: str or None
:param attrs: Queue attributes
:type attrs: Dictionary
"""
dflt_attributes = {}
def __init__(self, name=None, attrs={}, server=None):
self.logger = logging.getLogger(__name__)
PBSObject.__init__(self, name, attrs, self.dflt_attributes)
self.server = server
m = ['queue']
if server is not None:
m += ['@' + server.shortname]
if self.name is not None:
m += [' ', self.name]
m += [': ']
self.logprefix = "".join(m)
def revert_to_defaults(self):
"""
reset queue attributes to defaults
"""
ignore_attrs = ['id', ATTR_count, ATTR_rescassn]
ignore_attrs += [ATTR_qtype, ATTR_enable, ATTR_start, ATTR_total]
ignore_attrs += ['THE_END']
len_attrs = len(ignore_attrs)
unsetlist = []
setdict = {}
self.logger.info(
self.logprefix +
"reverting configuration to defaults")
if self.server is not None:
self.server.status(QUEUE, id=self.name, level=logging.DEBUG)
for k in self.attributes.keys():
for i in range(len_attrs):
if k.startswith(ignore_attrs[i]):
break
if (i == (len_attrs - 1)) and k not in self.dflt_attributes:
unsetlist.append(k)
if len(unsetlist) != 0 and self.server is not None:
try:
self.server.manager(MGR_CMD_UNSET, MGR_OBJ_QUEUE, unsetlist,
self.name)
except PbsManagerError, e:
self.logger.error(e.msg)
for k in self.dflt_attributes.keys():
if (k not in self.attributes or
self.attributes[k] != self.dflt_attributes[k]):
setdict[k] = self.dflt_attributes[k]
if len(setdict.keys()) != 0 and self.server is not None:
self.server.manager(MGR_CMD_SET, MGR_OBJ_QUEUE, setdict)
class PBSInitServices(object):
"""
PBS initialization services
:param hostname: Machine hostname
:type hostname: str or None
:param conf: PBS configuaration file
:type conf: str or None
"""
def __init__(self, hostname=None, conf=None):
self.logger = logging.getLogger(__name__)
self.hostname = hostname
if self.hostname is None:
self.hostname = socket.gethostname()
self.dflt_conf_file = os.environ.get('PBS_CONF_FILE', '/etc/pbs.conf')
self.conf_file = conf
self.du = DshUtils()
self.is_sunos = sys.platform.startswith('sunos')
self.is_aix = sys.platform.startswith('aix')
self.is_linux = sys.platform.startswith('linux')
def initd(self, hostname=None, op='status', conf_file=None,
init_script=None, daemon='all'):
"""
Run the init script for a given operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param op: one of status, start, stop, restart
:type op: str
:param conf_file: optional path to a configuration file
:type conf_file: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
:param daemon: name of daemon to operate on. one of server, mom,
sched, comm or all
:type daemon: str
"""
if hostname is None:
hostname = self.hostname
if conf_file is None:
conf_file = self.conf_file
return self._unix_initd(hostname, op, conf_file, init_script, daemon)
def restart(self, hostname=None, init_script=None):
"""
Run the init script for a restart operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script)
def restart_server(self, hostname=None, init_script=None):
"""
Run the init script for a restart server
:param hostname: hostname on which to restart server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='server')
def restart_mom(self, hostname=None, init_script=None):
"""
Run the init script for a restart mom
:param hostname: hostname on which to restart mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='mom')
def restart_sched(self, hostname=None, init_script=None):
"""
Run the init script for a restart sched
:param hostname: hostname on which to restart sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='sched')
def restart_comm(self, hostname=None, init_script=None):
"""
Run the init script for a restart comm
:param hostname: hostname on which to restart comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='comm')
def start(self, hostname=None, init_script=None):
"""
Run the init script for a start operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script)
def start_server(self, hostname=None, init_script=None):
"""
Run the init script for a start server
:param hostname: hostname on which to start server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='server')
def start_mom(self, hostname=None, init_script=None):
"""
Run the init script for a start mom
:param hostname: hostname on which to start mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='mom')
def start_sched(self, hostname=None, init_script=None):
"""
Run the init script for a start sched
:param hostname: hostname on which to start sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='sched')
def start_comm(self, hostname=None, init_script=None):
"""
Run the init script for a start comm
:param hostname: hostname on which to start comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='comm')
def stop(self, hostname=None, init_script=None):
"""
Run the init script for a stop operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script)
def stop_server(self, hostname=None, init_script=None):
"""
Run the init script for a stop server
:param hostname: hostname on which to stop server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='server')
def stop_mom(self, hostname=None, init_script=None):
"""
Run the init script for a stop mom
:param hostname: hostname on which to stop mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='mom')
def stop_sched(self, hostname=None, init_script=None):
"""
Run the init script for a stop sched
:param hostname: hostname on which to stop sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='sched')
def stop_comm(self, hostname=None, init_script=None):
"""
Run the init script for a stop comm
:param hostname: hostname on which to stop comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='comm')
def status(self, hostname=None, init_script=None):
"""
Run the init script for a status operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script)
def status_server(self, hostname=None, init_script=None):
"""
Run the init script for a status server
:param hostname: hostname on which to status server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='server')
def status_mom(self, hostname=None, init_script=None):
"""
Run the init script for a status mom
:param hostname: hostname on which to status mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='mom')
def status_sched(self, hostname=None, init_script=None):
"""
Run the init script for a status sched
:param hostname: hostname on which to status sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='sched')
def status_comm(self, hostname=None, init_script=None):
"""
Run the init script for a status comm
:param hostname: hostname on which to status comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='comm')
def _unix_initd(self, hostname, op, conf_file, init_script, daemon):
"""
Helper function for initd ``(*nix version)``
:param hostname: hostname on which init script should run
:type hostname: str
:param op: Operation on daemons - start, stop, restart or status
:op type: str
:param conf_file: Optional path to the pbs configuration file
:type conf_file: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
:param daemon: name of daemon to operate on. one of server, mom,
sched, comm or all
:type daemon: str
"""
if daemon is not None and daemon != 'all':
conf = self.du.parse_pbs_config(hostname, conf_file)
dconf = {
'PBS_START_SERVER': 0,
'PBS_START_MOM': 0,
'PBS_START_SCHED': 0,
'PBS_START_COMM': 0
}
if daemon == 'server' and conf.get('PBS_START_SERVER', 0) != 0:
dconf['PBS_START_SERVER'] = 1
elif daemon == 'mom' and conf.get('PBS_START_MOM', 0) != 0:
dconf['PBS_START_MOM'] = 1
elif daemon == 'sched' and conf.get('PBS_START_SCHED', 0) != 0:
dconf['PBS_START_SCHED'] = 1
elif daemon == 'comm' and conf.get('PBS_START_COMM', 0) != 0:
dconf['PBS_START_COMM'] = 1
(fd, fn) = self.du.mkstemp(hostname)
os.close(fd)
self.du.set_pbs_config(hostname, fin=conf_file, fout=fn,
confs=dconf)
init_cmd = ['PBS_CONF_FILE=' + fn]
_as = True
else:
fn = None
if (conf_file is not None) and (conf_file != self.dflt_conf_file):
init_cmd = ['PBS_CONF_FILE=' + conf_file]
_as = True
else:
init_cmd = []
_as = False
conf = self.du.parse_pbs_config(hostname, conf_file)
if (init_script is None) or (not init_script.startswith('/')):
if 'PBS_EXEC' not in conf:
msg = 'Missing PBS_EXEC setting in pbs config'
raise PbsInitServicesError(rc=1, rv=False, msg=msg)
if init_script is None:
init_script = os.path.join(conf['PBS_EXEC'], 'libexec',
'pbs_init.d')
else:
init_script = os.path.join(conf['PBS_EXEC'], 'etc',
init_script)
if not self.du.isfile(hostname, path=init_script, sudo=True):
# Could be Type 3 installation where we will not have
# PBS_EXEC/libexec/pbs_init.d
return []
init_cmd += [init_script, op]
msg = 'running init script to ' + op + ' pbs'
if daemon is not None and daemon != 'all':
msg += ' ' + daemon
msg += ' on ' + hostname
if conf_file is not None:
msg += ' using ' + conf_file
msg += ' init_cmd=%s' % (str(init_cmd))
self.logger.info(msg)
ret = self.du.run_cmd(hostname, init_cmd, sudo=True, as_script=_as,
logerr=False)
if ret['rc'] != 0:
raise PbsInitServicesError(rc=ret['rc'], rv=False,
msg='\n'.join(ret['err']))
else:
return ret
def switch_version(self, hostname=None, version=None):
"""
Switch to another version of PBS installed on the system
:param hostname: The hostname to operate on
:type hostname: str or None
:param version: version to switch
"""
pbs_conf = self.du.parse_pbs_config(hostname)
if 'PBS_EXEC' in pbs_conf:
dn = os.path.dirname(pbs_conf['PBS_EXEC'])
newver = os.path.join(dn, version)
ret = self.du.isdir(hostname, path=newver)
if not ret:
msg = 'no version ' + version + ' on host ' + hostname
raise PbsInitServicesError(rc=0, rv=False, msg=msg)
self.stop(hostname)
dflt = os.path.join(dn, 'default')
ret = self.du.isfile(hostname, path=dflt)
if ret:
self.logger.info('removing symbolic link ' + dflt)
self.du.rm(hostname, dflt, sudo=True, logerr=False)
self.du.set_pbs_config(hostname, confs={'PBS_EXEC': dflt})
else:
self.du.set_pbs_config(hostname, confs={'PBS_EXEC': newver})
self.logger.info('linking ' + newver + ' to ' + dflt)
self.du.run_cmd(hostname, ['ln', '-s', newver, dflt],
sudo=True, logerr=False)
self.start(hostname)
|
vinodchitrali/pbspro
|
test/fw/ptl/lib/pbs_testlib.py
|
Python
|
agpl-3.0
| 502,834 | 0.00042 |
import re
import requests
import threading
from ..common import clean_title,clean_search
import xbmc
from ..scraper import Scraper
sources = []
class scrape_thread(threading.Thread):
def __init__(self,m,match,qual):
self.m = m
self.match = match
self.qual = qual
threading.Thread.__init__(self)
def run(self):
try:
qual = self.qual
url = 'https://yesmovies.to/ajax/movie_token?eid='+self.m+'&mid='+self.match
html3 = requests.get(url).content
x,y = re.findall("_x='(.+?)', _y='(.+?)'",html3)[0]
fin_url = 'https://yesmovies.to/ajax/movie_sources/'+self.m+'?x='+x+'&y='+y
h = requests.get(fin_url).content
playlink = re.findall('"file":"(.+?)"(.+?)}',h)
for p,rest in playlink:
try:
qual = re.findall('"label":"(.+?)"',str(rest))[0]
except:
qual = self.qual
p = p.replace('\\','')
if 'srt' in p:
pass
elif 'spanish' in qual:
pass
elif 'googleapis' in p:
pass
else:
if 'english' in qual:
qual = '720p'
if 'lemon' in p:
p = p+'|User-Agent=Mozilla/5.0 (Windows NT 6.3; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0&Host=streaming.lemonstream.me:1443&Referer=https://yesmovies.to'
if 'http' in p:
sources.append({'source': 'Gvideo', 'quality': qual, 'scraper': 'yesmovies', 'url': p,'direct': True})
except Exception as e:
xbmc.log('get sources: '+str(e),xbmc.LOGNOTICE)
class Yesmovies(Scraper):
domains = ['yesmovies.to']
name = "yesmovies"
def __init__(self):
self.base_link = 'https://yesmovies.to'
self.search_link = '/search/'
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
start_url = self.base_link+self.search_link+title.replace(' ','+')+'.html'
html = requests.get(start_url).content
match = re.compile('<div class="ml-item">.+?<a href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
for url,name in match:
if clean_title(title)+'season'+season == clean_title(name):
html2 = requests.get(url).content
match2 = re.findall('favorite\((.+?),',html2)[0]
get_ep = requests.get('https://yesmovies.to/ajax/v4_movie_episodes/'+match2).content
block = re.compile('data-id="(.+?)".+?title="(.+?)">').findall(get_ep.replace('\\',''))
for ID,name in block:
if 'Episode' in name:
ep = re.findall('Episode (.+?):',str(name))[0]
if len(episode) == 1:
episode = '0'+episode
if episode == ep:
thread = scrape_thread(ID,match2,'SD')
thread.start()
try:
thread.join()
except:
pass
return sources
except Exception as e:
xbmc.log(str(e),xbmc.LOGNOTICE)
return []
def scrape_movie(self, title, year, imdb, debrid = False):
try:
try:
start_url = self.base_link+self.search_link+title.replace(' ','+')+'.html'
title = title
m_list = self.check_for_movie(title,start_url)
except:
start_url2 = self.base_link+self.search_link+title.replace(' ','+')+'+'+year+'.html'
title = title+year
m_list = self.check_for_movie(title,start_url2)
for item in m_list:
m = item[0];match=item[1];qual=item[2]
thread = scrape_thread(m,match,qual)
thread.start()
try:
thread.join()
except:
pass
return sources
except Exception as e:
xbmc.log('scrape movie: '+str(e),xbmc.LOGNOTICE)
return[]
def check_for_movie(self,title,start_url):
try:
m_list = []
html = requests.get(start_url).content
match = re.compile('<div class="ml-item">.+?<a href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
for url,name in match:
if clean_search(title.replace(' ','')) == clean_search(name).replace(' ',''):
html = requests.get(url).content
match = re.findall('favorite\((.+?),',html)[0]
second_url = 'https://yesmovies.to/ajax/v4_movie_episodes/'+match
html2 = requests.get(second_url).content
match2 = re.compile('<li class=.+?data-id=.+?"(.+?)".+?title=.+?"(.+?)"').findall(html2)
for m,qual in match2:
m = m.replace('\\','')
qual = qual.replace('\\','').replace('HD-','')
if len(m)==6 or len(m)==7:
m_list.append((m,match,qual))
return m_list
except Exception as e:
xbmc.log('check for movie '+str(e),xbmc.LOGNOTICE)
#Yesmovies().scrape_movie('baywatch','2017','')
#Yesmovies().scrape_episode('game of thrones', '', '', '7', '7', '', '')
|
mrquim/mrquimrepo
|
script.module.nanscrapers/lib/nanscrapers/scraperplugins/yesmovies.py
|
Python
|
gpl-2.0
| 5,703 | 0.01543 |
import contextlib
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import between
from sqlalchemy import bindparam
from sqlalchemy import Boolean
from sqlalchemy import cast
from sqlalchemy import collate
from sqlalchemy import column
from sqlalchemy import desc
from sqlalchemy import distinct
from sqlalchemy import exc as sa_exc
from sqlalchemy import exists
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import insert
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import null
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import Unicode
from sqlalchemy import union
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.orm import aliased
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import Bundle
from sqlalchemy.orm import column_property
from sqlalchemy.orm import create_session
from sqlalchemy.orm import defer
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Query
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import synonym
from sqlalchemy.orm.util import join
from sqlalchemy.orm.util import with_parent
from sqlalchemy.sql import expression
from sqlalchemy.sql import operators
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not_
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertions import assert_raises
from sqlalchemy.testing.assertions import assert_raises_message
from sqlalchemy.testing.assertions import eq_
from sqlalchemy.testing.assertions import eq_ignore_whitespace
from sqlalchemy.testing.assertions import expect_warnings
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from test.orm import _fixtures
class QueryTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
class MiscTest(QueryTest):
run_create_tables = None
run_inserts = None
def test_with_session(self):
User = self.classes.User
s1 = Session()
s2 = Session()
q1 = s1.query(User)
q2 = q1.with_session(s2)
assert q2.session is s2
assert q1.session is s1
class OnlyReturnTuplesTest(QueryTest):
def test_single_entity_false(self):
User = self.classes.User
row = create_session().query(User).only_return_tuples(False).first()
assert isinstance(row, User)
def test_single_entity_true(self):
User = self.classes.User
row = create_session().query(User).only_return_tuples(True).first()
assert isinstance(row, tuple)
def test_multiple_entity_false(self):
User = self.classes.User
row = (
create_session()
.query(User.id, User)
.only_return_tuples(False)
.first()
)
assert isinstance(row, tuple)
def test_multiple_entity_true(self):
User = self.classes.User
row = (
create_session()
.query(User.id, User)
.only_return_tuples(True)
.first()
)
assert isinstance(row, tuple)
class RowTupleTest(QueryTest):
run_setup_mappers = None
def test_custom_names(self):
User, users = self.classes.User, self.tables.users
mapper(User, users, properties={"uname": users.c.name})
row = (
create_session()
.query(User.id, User.uname)
.filter(User.id == 7)
.first()
)
assert row.id == 7
assert row.uname == "jack"
def test_column_metadata(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses)
sess = create_session()
user_alias = aliased(User)
user_alias_id_label = user_alias.id.label("foo")
address_alias = aliased(Address, name="aalias")
fn = func.count(User.id)
name_label = User.name.label("uname")
bundle = Bundle("b1", User.id, User.name)
cte = sess.query(User.id).cte()
for q, asserted in [
(
sess.query(User),
[
{
"name": "User",
"type": User,
"aliased": False,
"expr": User,
"entity": User,
}
],
),
(
sess.query(User.id, User),
[
{
"name": "id",
"type": users.c.id.type,
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": "User",
"type": User,
"aliased": False,
"expr": User,
"entity": User,
},
],
),
(
sess.query(User.id, user_alias),
[
{
"name": "id",
"type": users.c.id.type,
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": None,
"type": User,
"aliased": True,
"expr": user_alias,
"entity": user_alias,
},
],
),
(
sess.query(user_alias.id),
[
{
"name": "id",
"type": users.c.id.type,
"aliased": True,
"expr": user_alias.id,
"entity": user_alias,
}
],
),
(
sess.query(user_alias_id_label),
[
{
"name": "foo",
"type": users.c.id.type,
"aliased": True,
"expr": user_alias_id_label,
"entity": user_alias,
}
],
),
(
sess.query(address_alias),
[
{
"name": "aalias",
"type": Address,
"aliased": True,
"expr": address_alias,
"entity": address_alias,
}
],
),
(
sess.query(name_label, fn),
[
{
"name": "uname",
"type": users.c.name.type,
"aliased": False,
"expr": name_label,
"entity": User,
},
{
"name": None,
"type": fn.type,
"aliased": False,
"expr": fn,
"entity": User,
},
],
),
(
sess.query(cte),
[
{
"aliased": False,
"expr": cte.c.id,
"type": cte.c.id.type,
"name": "id",
"entity": None,
}
],
),
(
sess.query(users),
[
{
"aliased": False,
"expr": users.c.id,
"type": users.c.id.type,
"name": "id",
"entity": None,
},
{
"aliased": False,
"expr": users.c.name,
"type": users.c.name.type,
"name": "name",
"entity": None,
},
],
),
(
sess.query(users.c.name),
[
{
"name": "name",
"type": users.c.name.type,
"aliased": False,
"expr": users.c.name,
"entity": None,
}
],
),
(
sess.query(bundle),
[
{
"aliased": False,
"expr": bundle,
"type": Bundle,
"name": "b1",
"entity": User,
}
],
),
]:
eq_(q.column_descriptions, asserted)
def test_unhashable_type(self):
from sqlalchemy.types import TypeDecorator, Integer
from sqlalchemy.sql import type_coerce
class MyType(TypeDecorator):
impl = Integer
hashable = False
def process_result_value(self, value, dialect):
return [value]
User, users = self.classes.User, self.tables.users
mapper(User, users)
s = Session()
q = s.query(User, type_coerce(users.c.id, MyType).label("foo")).filter(
User.id == 7
)
row = q.first()
eq_(row, (User(id=7), [7]))
class BindSensitiveStringifyTest(fixtures.TestBase):
def _fixture(self, bind_to=None):
# building a totally separate metadata /mapping here
# because we need to control if the MetaData is bound or not
class User(object):
pass
m = MetaData(bind=bind_to)
user_table = Table(
"users",
m,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
)
mapper(User, user_table)
return User
def _dialect_fixture(self):
class MyDialect(default.DefaultDialect):
default_paramstyle = "qmark"
from sqlalchemy.engine import base
return base.Engine(mock.Mock(), MyDialect(), mock.Mock())
def _test(
self, bound_metadata, bound_session, session_present, expect_bound
):
if bound_metadata or bound_session:
eng = self._dialect_fixture()
else:
eng = None
User = self._fixture(bind_to=eng if bound_metadata else None)
s = Session(eng if bound_session else None)
q = s.query(User).filter(User.id == 7)
if not session_present:
q = q.with_session(None)
eq_ignore_whitespace(
str(q),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = ?"
if expect_bound
else "SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :id_1",
)
def test_query_unbound_metadata_bound_session(self):
self._test(False, True, True, True)
def test_query_bound_metadata_unbound_session(self):
self._test(True, False, True, True)
def test_query_unbound_metadata_no_session(self):
self._test(False, False, False, False)
def test_query_unbound_metadata_unbound_session(self):
self._test(False, False, True, False)
def test_query_bound_metadata_bound_session(self):
self._test(True, True, True, True)
class RawSelectTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_select_from_entity(self):
User = self.classes.User
self.assert_compile(
select(["*"]).select_from(User), "SELECT * FROM users"
)
def test_where_relationship(self):
User = self.classes.User
self.assert_compile(
select([User]).where(User.addresses),
"SELECT users.id, users.name FROM users, addresses "
"WHERE users.id = addresses.user_id",
)
def test_where_m2m_relationship(self):
Item = self.classes.Item
self.assert_compile(
select([Item]).where(Item.keywords),
"SELECT items.id, items.description FROM items, "
"item_keywords AS item_keywords_1, keywords "
"WHERE items.id = item_keywords_1.item_id "
"AND keywords.id = item_keywords_1.keyword_id",
)
def test_inline_select_from_entity(self):
User = self.classes.User
self.assert_compile(
select(["*"], from_obj=User), "SELECT * FROM users"
)
def test_select_from_aliased_entity(self):
User = self.classes.User
ua = aliased(User, name="ua")
self.assert_compile(
select(["*"]).select_from(ua), "SELECT * FROM users AS ua"
)
def test_correlate_entity(self):
User = self.classes.User
Address = self.classes.Address
self.assert_compile(
select(
[
User.name,
Address.id,
select([func.count(Address.id)])
.where(User.id == Address.user_id)
.correlate(User)
.scalar_subquery(),
]
),
"SELECT users.name, addresses.id, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE users.id = addresses.user_id) AS anon_1 "
"FROM users, addresses",
)
def test_correlate_aliased_entity(self):
User = self.classes.User
Address = self.classes.Address
uu = aliased(User, name="uu")
self.assert_compile(
select(
[
uu.name,
Address.id,
select([func.count(Address.id)])
.where(uu.id == Address.user_id)
.correlate(uu)
.scalar_subquery(),
]
),
# for a long time, "uu.id = address.user_id" was reversed;
# this was resolved as of #2872 and had to do with
# InstrumentedAttribute.__eq__() taking precedence over
# QueryableAttribute.__eq__()
"SELECT uu.name, addresses.id, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE uu.id = addresses.user_id) AS anon_1 "
"FROM users AS uu, addresses",
)
def test_columns_clause_entity(self):
User = self.classes.User
self.assert_compile(
select([User]), "SELECT users.id, users.name FROM users"
)
def test_columns_clause_columns(self):
User = self.classes.User
self.assert_compile(
select([User.id, User.name]),
"SELECT users.id, users.name FROM users",
)
def test_columns_clause_aliased_columns(self):
User = self.classes.User
ua = aliased(User, name="ua")
self.assert_compile(
select([ua.id, ua.name]), "SELECT ua.id, ua.name FROM users AS ua"
)
def test_columns_clause_aliased_entity(self):
User = self.classes.User
ua = aliased(User, name="ua")
self.assert_compile(
select([ua]), "SELECT ua.id, ua.name FROM users AS ua"
)
def test_core_join(self):
User = self.classes.User
Address = self.classes.Address
from sqlalchemy.sql import join
self.assert_compile(
select([User]).select_from(join(User, Address)),
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id",
)
def test_insert_from_query(self):
User = self.classes.User
Address = self.classes.Address
s = Session()
q = s.query(User.id, User.name).filter_by(name="ed")
self.assert_compile(
insert(Address).from_select(("id", "email_address"), q),
"INSERT INTO addresses (id, email_address) "
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.name = :name_1",
)
def test_insert_from_query_col_attr(self):
User = self.classes.User
Address = self.classes.Address
s = Session()
q = s.query(User.id, User.name).filter_by(name="ed")
self.assert_compile(
insert(Address).from_select(
(Address.id, Address.email_address), q
),
"INSERT INTO addresses (id, email_address) "
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.name = :name_1",
)
def test_update_from_entity(self):
from sqlalchemy.sql import update
User = self.classes.User
self.assert_compile(
update(User), "UPDATE users SET id=:id, name=:name"
)
self.assert_compile(
update(User).values(name="ed").where(User.id == 5),
"UPDATE users SET name=:name WHERE users.id = :id_1",
checkparams={"id_1": 5, "name": "ed"},
)
def test_delete_from_entity(self):
from sqlalchemy.sql import delete
User = self.classes.User
self.assert_compile(delete(User), "DELETE FROM users")
self.assert_compile(
delete(User).where(User.id == 5),
"DELETE FROM users WHERE users.id = :id_1",
checkparams={"id_1": 5},
)
def test_insert_from_entity(self):
from sqlalchemy.sql import insert
User = self.classes.User
self.assert_compile(
insert(User), "INSERT INTO users (id, name) VALUES (:id, :name)"
)
self.assert_compile(
insert(User).values(name="ed"),
"INSERT INTO users (name) VALUES (:name)",
checkparams={"name": "ed"},
)
def test_col_prop_builtin_function(self):
class Foo(object):
pass
mapper(
Foo,
self.tables.users,
properties={
"foob": column_property(
func.coalesce(self.tables.users.c.name)
)
},
)
self.assert_compile(
select([Foo]).where(Foo.foob == "somename").order_by(Foo.foob),
"SELECT users.id, users.name FROM users "
"WHERE coalesce(users.name) = :param_1 "
"ORDER BY coalesce(users.name)",
)
class GetTest(QueryTest):
def test_get_composite_pk_keyword_based_no_result(self):
CompositePk = self.classes.CompositePk
s = Session()
is_(s.query(CompositePk).get({"i": 100, "j": 100}), None)
def test_get_composite_pk_keyword_based_result(self):
CompositePk = self.classes.CompositePk
s = Session()
one_two = s.query(CompositePk).get({"i": 1, "j": 2})
eq_(one_two.i, 1)
eq_(one_two.j, 2)
eq_(one_two.k, 3)
def test_get_composite_pk_keyword_based_wrong_keys(self):
CompositePk = self.classes.CompositePk
s = Session()
q = s.query(CompositePk)
assert_raises(sa_exc.InvalidRequestError, q.get, {"i": 1, "k": 2})
def test_get_composite_pk_keyword_based_too_few_keys(self):
CompositePk = self.classes.CompositePk
s = Session()
q = s.query(CompositePk)
assert_raises(sa_exc.InvalidRequestError, q.get, {"i": 1})
def test_get_composite_pk_keyword_based_too_many_keys(self):
CompositePk = self.classes.CompositePk
s = Session()
q = s.query(CompositePk)
assert_raises(
sa_exc.InvalidRequestError, q.get, {"i": 1, "j": "2", "k": 3}
)
def test_get(self):
User = self.classes.User
s = create_session()
assert s.query(User).get(19) is None
u = s.query(User).get(7)
u2 = s.query(User).get(7)
assert u is u2
s.expunge_all()
u2 = s.query(User).get(7)
assert u is not u2
def test_get_composite_pk_no_result(self):
CompositePk = self.classes.CompositePk
s = Session()
assert s.query(CompositePk).get((100, 100)) is None
def test_get_composite_pk_result(self):
CompositePk = self.classes.CompositePk
s = Session()
one_two = s.query(CompositePk).get((1, 2))
assert one_two.i == 1
assert one_two.j == 2
assert one_two.k == 3
def test_get_too_few_params(self):
CompositePk = self.classes.CompositePk
s = Session()
q = s.query(CompositePk)
assert_raises(sa_exc.InvalidRequestError, q.get, 7)
def test_get_too_few_params_tuple(self):
CompositePk = self.classes.CompositePk
s = Session()
q = s.query(CompositePk)
assert_raises(sa_exc.InvalidRequestError, q.get, (7,))
def test_get_too_many_params(self):
CompositePk = self.classes.CompositePk
s = Session()
q = s.query(CompositePk)
assert_raises(sa_exc.InvalidRequestError, q.get, (7, 10, 100))
def test_get_against_col(self):
User = self.classes.User
s = Session()
q = s.query(User.id)
assert_raises(sa_exc.InvalidRequestError, q.get, (5,))
def test_get_null_pk(self):
"""test that a mapping which can have None in a
PK (i.e. map to an outerjoin) works with get()."""
users, addresses = self.tables.users, self.tables.addresses
s = users.outerjoin(addresses)
class UserThing(fixtures.ComparableEntity):
pass
mapper(
UserThing,
s,
properties={
"id": (users.c.id, addresses.c.user_id),
"address_id": addresses.c.id,
},
)
sess = create_session()
u10 = sess.query(UserThing).get((10, None))
eq_(u10, UserThing(id=10))
def test_no_criterion(self):
"""test that get()/load() does not use preexisting filter/etc.
criterion"""
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(User).join("addresses").filter(Address.user_id == 8)
assert_raises(sa_exc.InvalidRequestError, q.get, 7)
assert_raises(
sa_exc.InvalidRequestError,
s.query(User).filter(User.id == 7).get,
19,
)
# order_by()/get() doesn't raise
s.query(User).order_by(User.id).get(8)
def test_no_criterion_when_already_loaded(self):
"""test that get()/load() does not use preexisting filter/etc.
criterion, even when we're only using the identity map."""
User, Address = self.classes.User, self.classes.Address
s = create_session()
s.query(User).get(7)
q = s.query(User).join("addresses").filter(Address.user_id == 8)
assert_raises(sa_exc.InvalidRequestError, q.get, 7)
def test_unique_param_names(self):
users = self.tables.users
class SomeUser(object):
pass
s = users.select(users.c.id != 12).alias("users")
m = mapper(SomeUser, s)
assert s.primary_key == m.primary_key
sess = create_session()
assert sess.query(SomeUser).get(7).name == "jack"
def test_load(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
assert s.query(User).populate_existing().get(19) is None
u = s.query(User).populate_existing().get(7)
u2 = s.query(User).populate_existing().get(7)
assert u is u2
s.expunge_all()
u2 = s.query(User).populate_existing().get(7)
assert u is not u2
u2.name = "some name"
a = Address(email_address="some other name")
u2.addresses.append(a)
assert u2 in s.dirty
assert a in u2.addresses
s.query(User).populate_existing().get(7)
assert u2 not in s.dirty
assert u2.name == "jack"
assert a not in u2.addresses
@testing.provide_metadata
@testing.requires.unicode_connections
def test_unicode(self):
"""test that Query.get properly sets up the type for the bind
parameter. using unicode would normally fail on postgresql, mysql and
oracle unless it is converted to an encoded string"""
metadata = self.metadata
table = Table(
"unicode_data",
metadata,
Column("id", Unicode(40), primary_key=True),
Column("data", Unicode(40)),
)
metadata.create_all()
ustring = util.b("petit voix m\xe2\x80\x99a").decode("utf-8")
table.insert().execute(id=ustring, data=ustring)
class LocalFoo(self.classes.Base):
pass
mapper(LocalFoo, table)
eq_(
create_session().query(LocalFoo).get(ustring),
LocalFoo(id=ustring, data=ustring),
)
def test_populate_existing(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
userlist = s.query(User).all()
u = userlist[0]
u.name = "foo"
a = Address(name="ed")
u.addresses.append(a)
self.assert_(a in u.addresses)
s.query(User).populate_existing().all()
self.assert_(u not in s.dirty)
self.assert_(u.name == "jack")
self.assert_(a not in u.addresses)
u.addresses[0].email_address = "lala"
u.orders[1].items[2].description = "item 12"
# test that lazy load doesn't change child items
s.query(User).populate_existing().all()
assert u.addresses[0].email_address == "lala"
assert u.orders[1].items[2].description == "item 12"
# eager load does
s.query(User).options(
joinedload("addresses"), joinedload("orders").joinedload("items")
).populate_existing().all()
assert u.addresses[0].email_address == "jack@bean.com"
assert u.orders[1].items[2].description == "item 5"
class InvalidGenerationsTest(QueryTest, AssertsCompiledSQL):
def test_no_limit_offset(self):
User = self.classes.User
s = create_session()
for q in (
s.query(User).limit(2),
s.query(User).offset(2),
s.query(User).limit(2).offset(2),
):
assert_raises(sa_exc.InvalidRequestError, q.join, "addresses")
assert_raises(
sa_exc.InvalidRequestError, q.filter, User.name == "ed"
)
assert_raises(sa_exc.InvalidRequestError, q.filter_by, name="ed")
assert_raises(sa_exc.InvalidRequestError, q.order_by, "foo")
assert_raises(sa_exc.InvalidRequestError, q.group_by, "foo")
assert_raises(sa_exc.InvalidRequestError, q.having, "foo")
q.enable_assertions(False).join("addresses")
q.enable_assertions(False).filter(User.name == "ed")
q.enable_assertions(False).order_by("foo")
q.enable_assertions(False).group_by("foo")
def test_no_from(self):
users, User = self.tables.users, self.classes.User
s = create_session()
q = s.query(User).select_from(users)
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
q = s.query(User).join("addresses")
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
q = s.query(User).order_by(User.id)
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
q.enable_assertions(False).select_from(users)
# this is fine, however
q.from_self()
def test_invalid_select_from(self):
User = self.classes.User
s = create_session()
q = s.query(User)
assert_raises(sa_exc.ArgumentError, q.select_from, User.id == 5)
assert_raises(sa_exc.ArgumentError, q.select_from, User.id)
def test_invalid_from_statement(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
s = create_session()
q = s.query(User)
assert_raises(sa_exc.ArgumentError, q.from_statement, User.id == 5)
assert_raises(
sa_exc.ArgumentError, q.from_statement, users.join(addresses)
)
def test_invalid_column(self):
User = self.classes.User
s = create_session()
q = s.query(User)
assert_raises(sa_exc.InvalidRequestError, q.add_column, object())
def test_invalid_column_tuple(self):
User = self.classes.User
s = create_session()
q = s.query(User)
assert_raises(sa_exc.InvalidRequestError, q.add_column, (1, 1))
def test_distinct(self):
"""test that a distinct() call is not valid before 'clauseelement'
conditions."""
User = self.classes.User
s = create_session()
q = s.query(User).distinct()
assert_raises(sa_exc.InvalidRequestError, q.select_from, User)
assert_raises(
sa_exc.InvalidRequestError,
q.from_statement,
text("select * from table"),
)
assert_raises(sa_exc.InvalidRequestError, q.with_polymorphic, User)
def test_order_by(self):
"""test that an order_by() call is not valid before 'clauseelement'
conditions."""
User = self.classes.User
s = create_session()
q = s.query(User).order_by(User.id)
assert_raises(sa_exc.InvalidRequestError, q.select_from, User)
assert_raises(
sa_exc.InvalidRequestError,
q.from_statement,
text("select * from table"),
)
assert_raises(sa_exc.InvalidRequestError, q.with_polymorphic, User)
def test_only_full_mapper_zero(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(User, Address)
assert_raises(sa_exc.InvalidRequestError, q.get, 5)
def test_entity_or_mapper_zero(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(User, Address)
is_(q._mapper_zero(), inspect(User))
is_(q._entity_zero(), inspect(User))
u1 = aliased(User)
q = s.query(u1, Address)
is_(q._mapper_zero(), inspect(User))
is_(q._entity_zero(), inspect(u1))
q = s.query(User).select_from(Address)
is_(q._mapper_zero(), inspect(User))
is_(q._entity_zero(), inspect(Address))
q = s.query(User.name, Address)
is_(q._mapper_zero(), inspect(User))
is_(q._entity_zero(), inspect(User))
q = s.query(u1.name, Address)
is_(q._mapper_zero(), inspect(User))
is_(q._entity_zero(), inspect(u1))
q1 = s.query(User).exists()
q = s.query(q1)
is_(q._mapper_zero(), None)
is_(q._entity_zero(), None)
q1 = s.query(Bundle("b1", User.id, User.name))
is_(q1._mapper_zero(), inspect(User))
is_(q1._entity_zero(), inspect(User))
def test_from_statement(self):
User = self.classes.User
s = create_session()
for meth, arg, kw in [
(Query.filter, (User.id == 5,), {}),
(Query.filter_by, (), {"id": 5}),
(Query.limit, (5,), {}),
(Query.group_by, (User.name,), {}),
(Query.order_by, (User.name,), {}),
]:
q = s.query(User)
q = meth(q, *arg, **kw)
assert_raises(
sa_exc.InvalidRequestError, q.from_statement, text("x")
)
q = s.query(User)
q = q.from_statement(text("x"))
assert_raises(sa_exc.InvalidRequestError, meth, q, *arg, **kw)
def test_illegal_coercions(self):
User = self.classes.User
assert_raises_message(
sa_exc.ArgumentError,
"SQL expression element expected, got .*User",
distinct,
User,
)
ua = aliased(User)
assert_raises_message(
sa_exc.ArgumentError,
"SQL expression element expected, got .*User",
distinct,
ua,
)
s = Session()
assert_raises_message(
sa_exc.ArgumentError,
"SQL expression element or literal value expected, got .*User",
lambda: s.query(User).filter(User.name == User),
)
u1 = User()
assert_raises_message(
sa_exc.ArgumentError,
"SQL expression element expected, got .*User",
distinct,
u1,
)
assert_raises_message(
sa_exc.ArgumentError,
"SQL expression element or literal value expected, got .*User",
lambda: s.query(User).filter(User.name == u1),
)
class OperatorTest(QueryTest, AssertsCompiledSQL):
"""test sql.Comparator implementation for MapperProperties"""
__dialect__ = "default"
def _test(self, clause, expected, entity=None, checkparams=None):
dialect = default.DefaultDialect()
if entity is not None:
# specify a lead entity, so that when we are testing
# correlation, the correlation actually happens
sess = Session()
lead = sess.query(entity)
context = lead._compile_context()
context.statement.use_labels = True
lead = context.statement.compile(dialect=dialect)
expected = (str(lead) + " WHERE " + expected).replace("\n", "")
clause = sess.query(entity).filter(clause)
self.assert_compile(clause, expected, checkparams=checkparams)
def _test_filter_aliases(
self, clause, expected, from_, onclause, checkparams=None
):
dialect = default.DefaultDialect()
sess = Session()
lead = sess.query(from_).join(onclause, aliased=True)
full = lead.filter(clause)
context = lead._compile_context()
context.statement.use_labels = True
lead = context.statement.compile(dialect=dialect)
expected = (str(lead) + " WHERE " + expected).replace("\n", "")
self.assert_compile(full, expected, checkparams=checkparams)
def test_arithmetic(self):
User = self.classes.User
create_session().query(User)
for (py_op, sql_op) in (
(operators.add, "+"),
(operators.mul, "*"),
(operators.sub, "-"),
(operators.truediv, "/"),
(operators.div, "/"),
):
for (lhs, rhs, res) in (
(5, User.id, ":id_1 %s users.id"),
(5, literal(6), ":param_1 %s :param_2"),
(User.id, 5, "users.id %s :id_1"),
(User.id, literal("b"), "users.id %s :param_1"),
(User.id, User.id, "users.id %s users.id"),
(literal(5), "b", ":param_1 %s :param_2"),
(literal(5), User.id, ":param_1 %s users.id"),
(literal(5), literal(6), ":param_1 %s :param_2"),
):
self._test(py_op(lhs, rhs), res % sql_op)
def test_comparison(self):
User = self.classes.User
create_session().query(User)
ualias = aliased(User)
for (py_op, fwd_op, rev_op) in (
(operators.lt, "<", ">"),
(operators.gt, ">", "<"),
(operators.eq, "=", "="),
(operators.ne, "!=", "!="),
(operators.le, "<=", ">="),
(operators.ge, ">=", "<="),
):
for (lhs, rhs, l_sql, r_sql) in (
("a", User.id, ":id_1", "users.id"),
("a", literal("b"), ":param_2", ":param_1"), # note swap!
(User.id, "b", "users.id", ":id_1"),
(User.id, literal("b"), "users.id", ":param_1"),
(User.id, User.id, "users.id", "users.id"),
(literal("a"), "b", ":param_1", ":param_2"),
(literal("a"), User.id, ":param_1", "users.id"),
(literal("a"), literal("b"), ":param_1", ":param_2"),
(ualias.id, literal("b"), "users_1.id", ":param_1"),
(User.id, ualias.name, "users.id", "users_1.name"),
(User.name, ualias.name, "users.name", "users_1.name"),
(ualias.name, User.name, "users_1.name", "users.name"),
):
# the compiled clause should match either (e.g.):
# 'a' < 'b' -or- 'b' > 'a'.
compiled = str(
py_op(lhs, rhs).compile(dialect=default.DefaultDialect())
)
fwd_sql = "%s %s %s" % (l_sql, fwd_op, r_sql)
rev_sql = "%s %s %s" % (r_sql, rev_op, l_sql)
self.assert_(
compiled == fwd_sql or compiled == rev_sql,
"\n'"
+ compiled
+ "'\n does not match\n'"
+ fwd_sql
+ "'\n or\n'"
+ rev_sql
+ "'",
)
def test_o2m_compare_to_null(self):
User = self.classes.User
self._test(User.id == None, "users.id IS NULL") # noqa
self._test(User.id != None, "users.id IS NOT NULL") # noqa
self._test(~(User.id == None), "users.id IS NOT NULL") # noqa
self._test(~(User.id != None), "users.id IS NULL") # noqa
self._test(None == User.id, "users.id IS NULL") # noqa
self._test(~(None == User.id), "users.id IS NOT NULL") # noqa
def test_m2o_compare_to_null(self):
Address = self.classes.Address
self._test(Address.user == None, "addresses.user_id IS NULL") # noqa
self._test(
~(Address.user == None), "addresses.user_id IS NOT NULL" # noqa
)
self._test(
~(Address.user != None), "addresses.user_id IS NULL" # noqa
)
self._test(None == Address.user, "addresses.user_id IS NULL") # noqa
self._test(
~(None == Address.user), "addresses.user_id IS NOT NULL" # noqa
)
def test_o2m_compare_to_null_orm_adapt(self):
User, Address = self.classes.User, self.classes.Address
self._test_filter_aliases(
User.id == None, # noqa
"users_1.id IS NULL",
Address,
Address.user,
),
self._test_filter_aliases(
User.id != None, # noqa
"users_1.id IS NOT NULL",
Address,
Address.user,
),
self._test_filter_aliases(
~(User.id == None), # noqa
"users_1.id IS NOT NULL",
Address,
Address.user,
),
self._test_filter_aliases(
~(User.id != None), # noqa
"users_1.id IS NULL",
Address,
Address.user,
),
def test_m2o_compare_to_null_orm_adapt(self):
User, Address = self.classes.User, self.classes.Address
self._test_filter_aliases(
Address.user == None, # noqa
"addresses_1.user_id IS NULL",
User,
User.addresses,
),
self._test_filter_aliases(
Address.user != None, # noqa
"addresses_1.user_id IS NOT NULL",
User,
User.addresses,
),
self._test_filter_aliases(
~(Address.user == None), # noqa
"addresses_1.user_id IS NOT NULL",
User,
User.addresses,
),
self._test_filter_aliases(
~(Address.user != None), # noqa
"addresses_1.user_id IS NULL",
User,
User.addresses,
),
def test_o2m_compare_to_null_aliased(self):
User = self.classes.User
u1 = aliased(User)
self._test(u1.id == None, "users_1.id IS NULL") # noqa
self._test(u1.id != None, "users_1.id IS NOT NULL") # noqa
self._test(~(u1.id == None), "users_1.id IS NOT NULL") # noqa
self._test(~(u1.id != None), "users_1.id IS NULL") # noqa
def test_m2o_compare_to_null_aliased(self):
Address = self.classes.Address
a1 = aliased(Address)
self._test(a1.user == None, "addresses_1.user_id IS NULL") # noqa
self._test(
~(a1.user == None), "addresses_1.user_id IS NOT NULL" # noqa
)
self._test(a1.user != None, "addresses_1.user_id IS NOT NULL") # noqa
self._test(~(a1.user != None), "addresses_1.user_id IS NULL") # noqa
def test_relationship_unimplemented(self):
User = self.classes.User
for op in [
User.addresses.like,
User.addresses.ilike,
User.addresses.__le__,
User.addresses.__gt__,
]:
assert_raises(NotImplementedError, op, "x")
def test_o2m_any(self):
User, Address = self.classes.User, self.classes.Address
self._test(
User.addresses.any(Address.id == 17),
"EXISTS (SELECT 1 FROM addresses "
"WHERE users.id = addresses.user_id AND addresses.id = :id_1)",
entity=User,
)
def test_o2m_any_aliased(self):
User, Address = self.classes.User, self.classes.Address
u1 = aliased(User)
a1 = aliased(Address)
self._test(
u1.addresses.of_type(a1).any(a1.id == 17),
"EXISTS (SELECT 1 FROM addresses AS addresses_1 "
"WHERE users_1.id = addresses_1.user_id AND "
"addresses_1.id = :id_1)",
entity=u1,
)
def test_o2m_any_orm_adapt(self):
User, Address = self.classes.User, self.classes.Address
self._test_filter_aliases(
User.addresses.any(Address.id == 17),
"EXISTS (SELECT 1 FROM addresses "
"WHERE users_1.id = addresses.user_id AND addresses.id = :id_1)",
Address,
Address.user,
)
def test_m2o_compare_instance(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
self._test(Address.user == u7, ":param_1 = addresses.user_id")
def test_m2o_compare_instance_negated(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
self._test(
Address.user != u7,
"addresses.user_id != :user_id_1 OR addresses.user_id IS NULL",
checkparams={"user_id_1": 7},
)
def test_m2o_compare_instance_orm_adapt(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
self._test_filter_aliases(
Address.user == u7,
":param_1 = addresses_1.user_id",
User,
User.addresses,
checkparams={"param_1": 7},
)
def test_m2o_compare_instance_negated_warn_on_none(self):
User, Address = self.classes.User, self.classes.Address
u7_transient = User(id=None)
with expect_warnings("Got None for value of column users.id; "):
self._test_filter_aliases(
Address.user != u7_transient,
"addresses_1.user_id != :user_id_1 "
"OR addresses_1.user_id IS NULL",
User,
User.addresses,
checkparams={"user_id_1": None},
)
def test_m2o_compare_instance_negated_orm_adapt(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
u7_transient = User(id=7)
self._test_filter_aliases(
Address.user != u7,
"addresses_1.user_id != :user_id_1 OR addresses_1.user_id IS NULL",
User,
User.addresses,
checkparams={"user_id_1": 7},
)
self._test_filter_aliases(
~(Address.user == u7),
":param_1 != addresses_1.user_id",
User,
User.addresses,
checkparams={"param_1": 7},
)
self._test_filter_aliases(
~(Address.user != u7),
"NOT (addresses_1.user_id != :user_id_1 "
"OR addresses_1.user_id IS NULL)",
User,
User.addresses,
checkparams={"user_id_1": 7},
)
self._test_filter_aliases(
Address.user != u7_transient,
"addresses_1.user_id != :user_id_1 OR addresses_1.user_id IS NULL",
User,
User.addresses,
checkparams={"user_id_1": 7},
)
self._test_filter_aliases(
~(Address.user == u7_transient),
":param_1 != addresses_1.user_id",
User,
User.addresses,
checkparams={"param_1": 7},
)
self._test_filter_aliases(
~(Address.user != u7_transient),
"NOT (addresses_1.user_id != :user_id_1 "
"OR addresses_1.user_id IS NULL)",
User,
User.addresses,
checkparams={"user_id_1": 7},
)
def test_m2o_compare_instance_aliased(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
u7_transient = User(id=7)
a1 = aliased(Address)
self._test(
a1.user == u7,
":param_1 = addresses_1.user_id",
checkparams={"param_1": 7},
)
self._test(
a1.user != u7,
"addresses_1.user_id != :user_id_1 OR addresses_1.user_id IS NULL",
checkparams={"user_id_1": 7},
)
a1 = aliased(Address)
self._test(
a1.user == u7_transient,
":param_1 = addresses_1.user_id",
checkparams={"param_1": 7},
)
self._test(
a1.user != u7_transient,
"addresses_1.user_id != :user_id_1 OR addresses_1.user_id IS NULL",
checkparams={"user_id_1": 7},
)
def test_selfref_relationship(self):
Node = self.classes.Node
nalias = aliased(Node)
# auto self-referential aliasing
self._test(
Node.children.any(Node.data == "n1"),
"EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE "
"nodes.id = nodes_1.parent_id AND nodes_1.data = :data_1)",
entity=Node,
checkparams={"data_1": "n1"},
)
# needs autoaliasing
self._test(
Node.children == None, # noqa
"NOT (EXISTS (SELECT 1 FROM nodes AS nodes_1 "
"WHERE nodes.id = nodes_1.parent_id))",
entity=Node,
checkparams={},
)
self._test(
Node.parent == None, # noqa
"nodes.parent_id IS NULL",
checkparams={},
)
self._test(
nalias.parent == None, # noqa
"nodes_1.parent_id IS NULL",
checkparams={},
)
self._test(
nalias.parent != None, # noqa
"nodes_1.parent_id IS NOT NULL",
checkparams={},
)
self._test(
nalias.children == None, # noqa
"NOT (EXISTS ("
"SELECT 1 FROM nodes WHERE nodes_1.id = nodes.parent_id))",
entity=nalias,
checkparams={},
)
self._test(
nalias.children.any(Node.data == "some data"),
"EXISTS (SELECT 1 FROM nodes WHERE "
"nodes_1.id = nodes.parent_id AND nodes.data = :data_1)",
entity=nalias,
checkparams={"data_1": "some data"},
)
# this fails because self-referential any() is auto-aliasing;
# the fact that we use "nalias" here means we get two aliases.
# self._test(
# Node.children.any(nalias.data == 'some data'),
# "EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE "
# "nodes.id = nodes_1.parent_id AND nodes_1.data = :data_1)",
# entity=Node
# )
self._test(
nalias.parent.has(Node.data == "some data"),
"EXISTS (SELECT 1 FROM nodes WHERE nodes.id = nodes_1.parent_id "
"AND nodes.data = :data_1)",
entity=nalias,
checkparams={"data_1": "some data"},
)
self._test(
Node.parent.has(Node.data == "some data"),
"EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE "
"nodes_1.id = nodes.parent_id AND nodes_1.data = :data_1)",
entity=Node,
checkparams={"data_1": "some data"},
)
self._test(
Node.parent == Node(id=7),
":param_1 = nodes.parent_id",
checkparams={"param_1": 7},
)
self._test(
nalias.parent == Node(id=7),
":param_1 = nodes_1.parent_id",
checkparams={"param_1": 7},
)
self._test(
nalias.parent != Node(id=7),
"nodes_1.parent_id != :parent_id_1 "
"OR nodes_1.parent_id IS NULL",
checkparams={"parent_id_1": 7},
)
self._test(
nalias.parent != Node(id=7),
"nodes_1.parent_id != :parent_id_1 "
"OR nodes_1.parent_id IS NULL",
checkparams={"parent_id_1": 7},
)
self._test(
nalias.children.contains(Node(id=7, parent_id=12)),
"nodes_1.id = :param_1",
checkparams={"param_1": 12},
)
def test_multilevel_any(self):
User, Address, Dingaling = (
self.classes.User,
self.classes.Address,
self.classes.Dingaling,
)
sess = Session()
q = sess.query(User).filter(
User.addresses.any(
and_(Address.id == Dingaling.address_id, Dingaling.data == "x")
)
)
# new since #2746 - correlate_except() now takes context into account
# so its usage in any() is not as disrupting.
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"WHERE EXISTS (SELECT 1 "
"FROM addresses, dingalings "
"WHERE users.id = addresses.user_id AND "
"addresses.id = dingalings.address_id AND "
"dingalings.data = :data_1)",
)
def test_op(self):
User = self.classes.User
self._test(User.name.op("ilike")("17"), "users.name ilike :name_1")
def test_in(self):
User = self.classes.User
self._test(User.id.in_(["a", "b"]), "users.id IN (:id_1, :id_2)")
def test_in_on_relationship_not_supported(self):
User, Address = self.classes.User, self.classes.Address
assert_raises(NotImplementedError, Address.user.in_, [User(id=5)])
def test_neg(self):
User = self.classes.User
self._test(-User.id, "-users.id")
self._test(User.id + -User.id, "users.id + -users.id")
def test_between(self):
User = self.classes.User
self._test(
User.id.between("a", "b"), "users.id BETWEEN :id_1 AND :id_2"
)
def test_collate(self):
User = self.classes.User
self._test(collate(User.id, "utf8_bin"), "users.id COLLATE utf8_bin")
self._test(User.id.collate("utf8_bin"), "users.id COLLATE utf8_bin")
def test_selfref_between(self):
User = self.classes.User
ualias = aliased(User)
self._test(
User.id.between(ualias.id, ualias.id),
"users.id BETWEEN users_1.id AND users_1.id",
)
self._test(
ualias.id.between(User.id, User.id),
"users_1.id BETWEEN users.id AND users.id",
)
def test_clauses(self):
User, Address = self.classes.User, self.classes.Address
for (expr, compare) in (
(func.max(User.id), "max(users.id)"),
(User.id.desc(), "users.id DESC"),
(
between(5, User.id, Address.id),
":param_1 BETWEEN users.id AND addresses.id",
),
# this one would require adding compile() to
# InstrumentedScalarAttribute. do we want this ?
# (User.id, "users.id")
):
c = expr.compile(dialect=default.DefaultDialect())
assert str(c) == compare, "%s != %s" % (str(c), compare)
class ExpressionTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_deferred_instances(self):
User, addresses, Address = (
self.classes.User,
self.tables.addresses,
self.classes.Address,
)
session = create_session()
s = (
session.query(User)
.filter(
and_(
addresses.c.email_address == bindparam("emailad"),
Address.user_id == User.id,
)
)
.statement
)
result = list(
session.query(User).instances(s.execute(emailad="jack@bean.com"))
)
eq_([User(id=7)], result)
def test_aliased_sql_construct(self):
User, Address = self.classes.User, self.classes.Address
j = join(User, Address)
a1 = aliased(j)
self.assert_compile(
a1.select(),
"SELECT anon_1.users_id, anon_1.users_name, anon_1.addresses_id, "
"anon_1.addresses_user_id, anon_1.addresses_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name, "
"addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM users JOIN addresses "
"ON users.id = addresses.user_id) AS anon_1",
)
def test_aliased_sql_construct_raises_adapt_on_names(self):
User, Address = self.classes.User, self.classes.Address
j = join(User, Address)
assert_raises_message(
sa_exc.ArgumentError,
"adapt_on_names only applies to ORM elements",
aliased,
j,
adapt_on_names=True,
)
def test_scalar_subquery_compile_whereclause(self):
User = self.classes.User
Address = self.classes.Address
session = create_session()
q = session.query(User.id).filter(User.id == 7).scalar_subquery()
q = session.query(Address).filter(Address.user_id == q)
assert isinstance(q._criterion.right, expression.ColumnElement)
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, addresses.user_id "
"AS addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE "
"addresses.user_id = (SELECT users.id "
"FROM users WHERE users.id = :id_1)",
)
def test_subquery_no_eagerloads(self):
User = self.classes.User
s = Session()
self.assert_compile(
s.query(User).options(joinedload(User.addresses)).subquery(),
"SELECT users.id, users.name FROM users",
)
def test_exists_no_eagerloads(self):
User = self.classes.User
s = Session()
self.assert_compile(
s.query(
s.query(User).options(joinedload(User.addresses)).exists()
),
"SELECT EXISTS (SELECT 1 FROM users) AS anon_1",
)
def test_named_subquery(self):
User = self.classes.User
session = create_session()
a1 = session.query(User.id).filter(User.id == 7).subquery("foo1")
a2 = session.query(User.id).filter(User.id == 7).subquery(name="foo2")
a3 = session.query(User.id).filter(User.id == 7).subquery()
eq_(a1.name, "foo1")
eq_(a2.name, "foo2")
eq_(a3.name, "%%(%d anon)s" % id(a3))
def test_labeled_subquery(self):
User = self.classes.User
session = create_session()
a1 = (
session.query(User.id)
.filter(User.id == 7)
.subquery(with_labels=True)
)
assert a1.c.users_id is not None
def test_reduced_subquery(self):
User = self.classes.User
ua = aliased(User)
session = create_session()
a1 = (
session.query(User.id, ua.id, ua.name)
.filter(User.id == ua.id)
.subquery(reduce_columns=True)
)
self.assert_compile(
a1,
"SELECT users.id, users_1.name FROM "
"users, users AS users_1 "
"WHERE users.id = users_1.id",
)
def test_label(self):
User = self.classes.User
session = create_session()
q = session.query(User.id).filter(User.id == 7).label("foo")
self.assert_compile(
session.query(q),
"SELECT (SELECT users.id FROM users "
"WHERE users.id = :id_1) AS foo",
)
def test_scalar_subquery(self):
User = self.classes.User
session = create_session()
q = session.query(User.id).filter(User.id == 7).scalar_subquery()
self.assert_compile(
session.query(User).filter(User.id.in_(q)),
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users WHERE users.id "
"IN (SELECT users.id FROM users WHERE "
"users.id = :id_1)",
)
def test_param_transfer(self):
User = self.classes.User
session = create_session()
q = (
session.query(User.id)
.filter(User.id == bindparam("foo"))
.params(foo=7)
.scalar_subquery()
)
q = session.query(User).filter(User.id.in_(q))
eq_(User(id=7), q.one())
def test_in(self):
User, Address = self.classes.User, self.classes.Address
session = create_session()
s = (
session.query(User.id)
.join(User.addresses)
.group_by(User.id)
.having(func.count(Address.id) > 2)
)
eq_(session.query(User).filter(User.id.in_(s)).all(), [User(id=8)])
def test_union(self):
User = self.classes.User
s = create_session()
q1 = s.query(User).filter(User.name == "ed").with_labels()
q2 = s.query(User).filter(User.name == "fred").with_labels()
eq_(
s.query(User)
.from_statement(union(q1, q2).order_by("users_name"))
.all(),
[User(name="ed"), User(name="fred")],
)
def test_select(self):
User = self.classes.User
s = create_session()
# this is actually not legal on most DBs since the subquery has no
# alias
q1 = s.query(User).filter(User.name == "ed")
self.assert_compile(
select([q1.with_labels().subquery()]),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name "
"FROM users WHERE users.name = :name_1) AS anon_1",
)
def test_join(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
# TODO: do we want aliased() to detect a query and convert to
# subquery() automatically ?
q1 = s.query(Address).filter(Address.email_address == "jack@bean.com")
adalias = aliased(Address, q1.subquery())
eq_(
s.query(User, adalias)
.join(adalias, User.id == adalias.user_id)
.all(),
[
(
User(id=7, name="jack"),
Address(email_address="jack@bean.com", user_id=7, id=1),
)
],
)
def test_group_by_plain(self):
User = self.classes.User
s = create_session()
q1 = s.query(User.id, User.name).group_by(User.name)
self.assert_compile(
select([q1.with_labels().subquery()]),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name FROM users GROUP BY users.name) "
"AS anon_1",
)
def test_group_by_append(self):
User = self.classes.User
s = create_session()
q1 = s.query(User.id, User.name).group_by(User.name)
# test append something to group_by
self.assert_compile(
select([q1.group_by(User.id).with_labels().subquery()]),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name FROM users "
"GROUP BY users.name, users.id) AS anon_1",
)
def test_group_by_cancellation(self):
User = self.classes.User
s = create_session()
q1 = s.query(User.id, User.name).group_by(User.name)
# test cancellation by using None, replacement with something else
self.assert_compile(
select(
[q1.group_by(None).group_by(User.id).with_labels().subquery()]
),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name FROM users GROUP BY users.id) AS anon_1",
)
# test cancellation by using None, replacement with nothing
self.assert_compile(
select([q1.group_by(None).with_labels().subquery()]),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name FROM users) AS anon_1",
)
def test_group_by_cancelled_still_present(self):
User = self.classes.User
s = create_session()
q1 = s.query(User.id, User.name).group_by(User.name).group_by(None)
q1._no_criterion_assertion("foo")
def test_order_by_plain(self):
User = self.classes.User
s = create_session()
q1 = s.query(User.id, User.name).order_by(User.name)
self.assert_compile(
select([q1.with_labels().subquery()]),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name FROM users ORDER BY users.name) "
"AS anon_1",
)
def test_order_by_append(self):
User = self.classes.User
s = create_session()
q1 = s.query(User.id, User.name).order_by(User.name)
# test append something to order_by
self.assert_compile(
select([q1.order_by(User.id).with_labels().subquery()]),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name FROM users "
"ORDER BY users.name, users.id) AS anon_1",
)
def test_order_by_cancellation(self):
User = self.classes.User
s = create_session()
q1 = s.query(User.id, User.name).order_by(User.name)
# test cancellation by using None, replacement with something else
self.assert_compile(
select(
[q1.order_by(None).order_by(User.id).with_labels().subquery()]
),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name FROM users ORDER BY users.id) AS anon_1",
)
# test cancellation by using None, replacement with nothing
self.assert_compile(
select([q1.order_by(None).with_labels().subquery()]),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name FROM users) AS anon_1",
)
def test_order_by_cancellation_false(self):
User = self.classes.User
s = create_session()
q1 = s.query(User.id, User.name).order_by(User.name)
# test cancellation by using None, replacement with something else
self.assert_compile(
select(
[q1.order_by(False).order_by(User.id).with_labels().subquery()]
),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name FROM users ORDER BY users.id) AS anon_1",
)
# test cancellation by using None, replacement with nothing
self.assert_compile(
select([q1.order_by(False).with_labels().subquery()]),
"SELECT anon_1.users_id, anon_1.users_name FROM "
"(SELECT users.id AS users_id, "
"users.name AS users_name FROM users) AS anon_1",
)
def test_order_by_cancelled_allows_assertions(self):
User = self.classes.User
s = create_session()
q1 = s.query(User.id, User.name).order_by(User.name).order_by(None)
q1._no_criterion_assertion("foo")
def test_legacy_order_by_cancelled_allows_assertions(self):
User = self.classes.User
s = create_session()
q1 = s.query(User.id, User.name).order_by(User.name).order_by(False)
q1._no_criterion_assertion("foo")
class ColumnPropertyTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
run_setup_mappers = "each"
def _fixture(self, label=True, polymorphic=False):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
stmt = (
select([func.max(addresses.c.email_address)])
.where(addresses.c.user_id == users.c.id)
.correlate(users)
)
if label:
stmt = stmt.label("email_ad")
else:
stmt = stmt.scalar_subquery()
mapper(
User,
users,
properties={"ead": column_property(stmt)},
with_polymorphic="*" if polymorphic else None,
)
mapper(Address, addresses)
def _func_fixture(self, label=False):
User = self.classes.User
users = self.tables.users
if label:
mapper(
User,
users,
properties={
"foobar": column_property(
func.foob(users.c.name).label(None)
)
},
)
else:
mapper(
User,
users,
properties={
"foobar": column_property(func.foob(users.c.name))
},
)
def test_anon_label_function_auto(self):
self._func_fixture()
User = self.classes.User
s = Session()
u1 = aliased(User)
self.assert_compile(
s.query(User.foobar, u1.foobar),
"SELECT foob(users.name) AS foob_1, foob(users_1.name) AS foob_2 "
"FROM users, users AS users_1",
)
def test_anon_label_function_manual(self):
self._func_fixture(label=True)
User = self.classes.User
s = Session()
u1 = aliased(User)
self.assert_compile(
s.query(User.foobar, u1.foobar),
"SELECT foob(users.name) AS foob_1, foob(users_1.name) AS foob_2 "
"FROM users, users AS users_1",
)
def test_anon_label_ad_hoc_labeling(self):
self._func_fixture()
User = self.classes.User
s = Session()
u1 = aliased(User)
self.assert_compile(
s.query(User.foobar.label("x"), u1.foobar.label("y")),
"SELECT foob(users.name) AS x, foob(users_1.name) AS y "
"FROM users, users AS users_1",
)
def test_order_by_column_prop_string(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = Session()
q = s.query(User).order_by("email_ad")
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses "
"WHERE addresses.user_id = users.id) AS email_ad, "
"users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY email_ad",
)
def test_order_by_column_prop_aliased_string(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = Session()
ua = aliased(User)
q = s.query(ua).order_by("email_ad")
assert_raises_message(
sa.exc.CompileError,
"Can't resolve label reference for ORDER BY / GROUP BY",
q.with_labels().statement.compile,
)
def test_order_by_column_labeled_prop_attr_aliased_one(self):
User = self.classes.User
self._fixture(label=True)
ua = aliased(User)
s = Session()
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1",
)
def test_order_by_column_labeled_prop_attr_aliased_two(self):
User = self.classes.User
self._fixture(label=True)
ua = aliased(User)
s = Session()
q = s.query(ua.ead).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, "
"users AS users_1 WHERE addresses.user_id = users_1.id) "
"AS anon_1 ORDER BY anon_1",
)
# we're also testing that the state of "ua" is OK after the
# previous call, so the batching into one test is intentional
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1",
)
def test_order_by_column_labeled_prop_attr_aliased_three(self):
User = self.classes.User
self._fixture(label=True)
ua = aliased(User)
s = Session()
q = s.query(User.ead, ua.ead).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users WHERE addresses.user_id = users.id) "
"AS email_ad, (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users AS users_1 WHERE addresses.user_id = "
"users_1.id) AS anon_1 ORDER BY email_ad, anon_1",
)
q = s.query(User, ua).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users.id) AS "
"email_ad, users.id AS users_id, users.name AS users_name, "
"(SELECT max(addresses.email_address) AS max_1 FROM addresses "
"WHERE addresses.user_id = users_1.id) AS anon_1, users_1.id "
"AS users_1_id, users_1.name AS users_1_name FROM users, "
"users AS users_1 ORDER BY email_ad, anon_1",
)
def test_order_by_column_labeled_prop_attr_aliased_four(self):
User = self.classes.User
self._fixture(label=True, polymorphic=True)
ua = aliased(User)
s = Session()
q = s.query(ua, User.id).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 FROM "
"addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name, "
"users.id AS users_id FROM users AS users_1, "
"users ORDER BY anon_1",
)
def test_order_by_column_unlabeled_prop_attr_aliased_one(self):
User = self.classes.User
self._fixture(label=False)
ua = aliased(User)
s = Session()
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1",
)
def test_order_by_column_unlabeled_prop_attr_aliased_two(self):
User = self.classes.User
self._fixture(label=False)
ua = aliased(User)
s = Session()
q = s.query(ua.ead).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, "
"users AS users_1 WHERE addresses.user_id = users_1.id) "
"AS anon_1 ORDER BY anon_1",
)
# we're also testing that the state of "ua" is OK after the
# previous call, so the batching into one test is intentional
q = s.query(ua).order_by(ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users_1.id) AS anon_1, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1 ORDER BY anon_1",
)
def test_order_by_column_unlabeled_prop_attr_aliased_three(self):
User = self.classes.User
self._fixture(label=False)
ua = aliased(User)
s = Session()
q = s.query(User.ead, ua.ead).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users WHERE addresses.user_id = users.id) "
"AS anon_1, (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses, users AS users_1 "
"WHERE addresses.user_id = users_1.id) AS anon_2 "
"ORDER BY anon_1, anon_2",
)
q = s.query(User, ua).order_by(User.ead, ua.ead)
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses WHERE addresses.user_id = users.id) AS "
"anon_1, users.id AS users_id, users.name AS users_name, "
"(SELECT max(addresses.email_address) AS max_1 FROM addresses "
"WHERE addresses.user_id = users_1.id) AS anon_2, users_1.id "
"AS users_1_id, users_1.name AS users_1_name FROM users, "
"users AS users_1 ORDER BY anon_1, anon_2",
)
def test_order_by_column_prop_attr(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = Session()
q = s.query(User).order_by(User.ead)
# this one is a bit of a surprise; this is compiler
# label-order-by logic kicking in, but won't work in more
# complex cases.
self.assert_compile(
q,
"SELECT (SELECT max(addresses.email_address) AS max_1 "
"FROM addresses "
"WHERE addresses.user_id = users.id) AS email_ad, "
"users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY email_ad",
)
def test_order_by_column_prop_attr_non_present(self):
User, Address = self.classes("User", "Address")
self._fixture(label=True)
s = Session()
q = s.query(User).options(defer(User.ead)).order_by(User.ead)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY "
"(SELECT max(addresses.email_address) AS max_1 "
"FROM addresses "
"WHERE addresses.user_id = users.id)",
)
class ComparatorTest(QueryTest):
def test_clause_element_query_resolve(self):
from sqlalchemy.orm.properties import ColumnProperty
User = self.classes.User
class Comparator(ColumnProperty.Comparator):
def __init__(self, expr):
self.expr = expr
def __clause_element__(self):
return self.expr
sess = Session()
eq_(
sess.query(Comparator(User.id))
.order_by(Comparator(User.id))
.all(),
[(7,), (8,), (9,), (10,)],
)
# more slice tests are available in test/orm/generative.py
class SliceTest(QueryTest):
__dialect__ = "default"
__backend__ = True
def test_first(self):
User = self.classes.User
assert User(id=7) == create_session().query(User).first()
assert (
create_session().query(User).filter(User.id == 27).first() is None
)
def test_limit_offset_applies(self):
"""Test that the expected LIMIT/OFFSET is applied for slices.
The LIMIT/OFFSET syntax differs slightly on all databases, and
query[x:y] executes immediately, so we are asserting against
SQL strings using sqlite's syntax.
"""
User = self.classes.User
sess = create_session()
q = sess.query(User).order_by(User.id)
self.assert_sql(
testing.db,
lambda: q[10:20],
[
(
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users ORDER BY users.id "
"LIMIT :param_1 OFFSET :param_2",
{"param_1": 10, "param_2": 10},
)
],
)
self.assert_sql(
testing.db,
lambda: q[:20],
[
(
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users ORDER BY users.id "
"LIMIT :param_1",
{"param_1": 20},
)
],
)
self.assert_sql(
testing.db,
lambda: q[5:],
[
(
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users ORDER BY users.id "
"LIMIT -1 OFFSET :param_1",
{"param_1": 5},
)
],
)
self.assert_sql(testing.db, lambda: q[2:2], [])
self.assert_sql(testing.db, lambda: q[-2:-5], [])
self.assert_sql(
testing.db,
lambda: q[-5:-2],
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id",
{},
)
],
)
self.assert_sql(
testing.db,
lambda: q[-5:],
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id",
{},
)
],
)
self.assert_sql(
testing.db,
lambda: q[:],
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id",
{},
)
],
)
@testing.requires.sql_expression_limit_offset
def test_first_against_expression_offset(self):
User = self.classes.User
sess = create_session()
q = (
sess.query(User)
.order_by(User.id)
.offset(literal_column("2") + literal_column("3"))
)
self.assert_sql(
testing.db,
q.first,
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id "
"LIMIT :param_1 OFFSET 2 + 3",
[{"param_1": 1}],
)
],
)
@testing.requires.sql_expression_limit_offset
def test_full_slice_against_expression_offset(self):
User = self.classes.User
sess = create_session()
q = (
sess.query(User)
.order_by(User.id)
.offset(literal_column("2") + literal_column("3"))
)
self.assert_sql(
testing.db,
lambda: q[2:5],
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id "
"LIMIT :param_1 OFFSET 2 + 3 + :param_2",
[{"param_1": 3, "param_2": 2}],
)
],
)
def test_full_slice_against_integer_offset(self):
User = self.classes.User
sess = create_session()
q = sess.query(User).order_by(User.id).offset(2)
self.assert_sql(
testing.db,
lambda: q[2:5],
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id "
"LIMIT :param_1 OFFSET :param_2",
[{"param_1": 3, "param_2": 4}],
)
],
)
@testing.requires.sql_expression_limit_offset
def test_start_slice_against_expression_offset(self):
User = self.classes.User
sess = create_session()
q = sess.query(User).order_by(User.id).offset(literal_column("2"))
self.assert_sql(
testing.db,
lambda: q[2:],
[
(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.id "
"LIMIT -1 OFFSET 2 + :2_1",
[{"2_1": 2}],
)
],
)
class FilterTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_basic(self):
User = self.classes.User
users = create_session().query(User).all()
eq_([User(id=7), User(id=8), User(id=9), User(id=10)], users)
@testing.requires.offset
def test_limit_offset(self):
User = self.classes.User
sess = create_session()
assert [User(id=8), User(id=9)] == sess.query(User).order_by(
User.id
).limit(2).offset(1).all()
assert [User(id=8), User(id=9)] == list(
sess.query(User).order_by(User.id)[1:3]
)
assert User(id=8) == sess.query(User).order_by(User.id)[1]
assert [] == sess.query(User).order_by(User.id)[3:3]
assert [] == sess.query(User).order_by(User.id)[0:0]
@testing.requires.bound_limit_offset
def test_select_with_bindparam_offset_limit(self):
"""Does a query allow bindparam for the limit?"""
User = self.classes.User
sess = create_session()
q1 = (
sess.query(self.classes.User)
.order_by(self.classes.User.id)
.limit(bindparam("n"))
)
for n in range(1, 4):
result = q1.params(n=n).all()
eq_(len(result), n)
eq_(
sess.query(User)
.order_by(User.id)
.limit(bindparam("limit"))
.offset(bindparam("offset"))
.params(limit=2, offset=1)
.all(),
[User(id=8), User(id=9)],
)
@testing.fails_on("mysql", "doesn't like CAST in the limit clause")
@testing.requires.bound_limit_offset
def test_select_with_bindparam_offset_limit_w_cast(self):
User = self.classes.User
sess = create_session()
eq_(
list(
sess.query(User)
.params(a=1, b=3)
.order_by(User.id)[
cast(bindparam("a"), Integer) : cast(
bindparam("b"), Integer
)
]
),
[User(id=8), User(id=9)],
)
@testing.requires.boolean_col_expressions
def test_exists(self):
User = self.classes.User
sess = create_session(testing.db)
assert sess.query(exists().where(User.id == 9)).scalar()
assert not sess.query(exists().where(User.id == 29)).scalar()
def test_one_filter(self):
User = self.classes.User
assert [User(id=8), User(id=9)] == create_session().query(User).filter(
User.name.endswith("ed")
).all()
def test_contains(self):
"""test comparing a collection to an object instance."""
User, Address = self.classes.User, self.classes.Address
sess = create_session()
address = sess.query(Address).get(3)
assert [User(id=8)] == sess.query(User).filter(
User.addresses.contains(address)
).all()
try:
sess.query(User).filter(User.addresses == address)
assert False
except sa_exc.InvalidRequestError:
assert True
assert [User(id=10)] == sess.query(User).filter(
User.addresses == None
).all() # noqa
try:
assert [User(id=7), User(id=9), User(id=10)] == sess.query(
User
).filter(User.addresses != address).all()
assert False
except sa_exc.InvalidRequestError:
assert True
# assert [User(id=7), User(id=9), User(id=10)] ==
# sess.query(User).filter(User.addresses!=address).all()
def test_clause_element_ok(self):
User = self.classes.User
s = Session()
self.assert_compile(
s.query(User).filter(User.addresses),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, addresses WHERE users.id = addresses.user_id",
)
def test_unique_binds_join_cond(self):
"""test that binds used when the lazyclause is used in criterion are
unique"""
User, Address = self.classes.User, self.classes.Address
sess = Session()
a1, a2 = sess.query(Address).order_by(Address.id)[0:2]
self.assert_compile(
sess.query(User)
.filter(User.addresses.contains(a1))
.union(sess.query(User).filter(User.addresses.contains(a2))),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS "
"anon_1_users_name FROM (SELECT users.id AS users_id, "
"users.name AS users_name FROM users WHERE users.id = :param_1 "
"UNION SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_2) AS anon_1",
checkparams={"param_1": 7, "param_2": 8},
)
def test_any(self):
# see also HasAnyTest, a newer suite which tests these at the level of
# SQL compilation
User, Address = self.classes.User, self.classes.Address
sess = create_session()
assert [User(id=8), User(id=9)] == sess.query(User).filter(
User.addresses.any(Address.email_address.like("%ed%"))
).all()
assert [User(id=8)] == sess.query(User).filter(
User.addresses.any(Address.email_address.like("%ed%"), id=4)
).all()
assert [User(id=8)] == sess.query(User).filter(
User.addresses.any(Address.email_address.like("%ed%"))
).filter(User.addresses.any(id=4)).all()
assert [User(id=9)] == sess.query(User).filter(
User.addresses.any(email_address="fred@fred.com")
).all()
# test that the contents are not adapted by the aliased join
assert (
[User(id=7), User(id=8)]
== sess.query(User)
.join("addresses", aliased=True)
.filter(
~User.addresses.any(Address.email_address == "fred@fred.com")
)
.all()
)
assert [User(id=10)] == sess.query(User).outerjoin(
"addresses", aliased=True
).filter(~User.addresses.any()).all()
def test_any_doesnt_overcorrelate(self):
# see also HasAnyTest, a newer suite which tests these at the level of
# SQL compilation
User, Address = self.classes.User, self.classes.Address
sess = create_session()
# test that any() doesn't overcorrelate
assert (
[User(id=7), User(id=8)]
== sess.query(User)
.join("addresses")
.filter(
~User.addresses.any(Address.email_address == "fred@fred.com")
)
.all()
)
def test_has(self):
# see also HasAnyTest, a newer suite which tests these at the level of
# SQL compilation
Dingaling, User, Address = (
self.classes.Dingaling,
self.classes.User,
self.classes.Address,
)
sess = create_session()
assert [Address(id=5)] == sess.query(Address).filter(
Address.user.has(name="fred")
).all()
assert (
[Address(id=2), Address(id=3), Address(id=4), Address(id=5)]
== sess.query(Address)
.filter(Address.user.has(User.name.like("%ed%")))
.order_by(Address.id)
.all()
)
assert (
[Address(id=2), Address(id=3), Address(id=4)]
== sess.query(Address)
.filter(Address.user.has(User.name.like("%ed%"), id=8))
.order_by(Address.id)
.all()
)
# test has() doesn't overcorrelate
assert (
[Address(id=2), Address(id=3), Address(id=4)]
== sess.query(Address)
.join("user")
.filter(Address.user.has(User.name.like("%ed%"), id=8))
.order_by(Address.id)
.all()
)
# test has() doesn't get subquery contents adapted by aliased join
assert (
[Address(id=2), Address(id=3), Address(id=4)]
== sess.query(Address)
.join("user", aliased=True)
.filter(Address.user.has(User.name.like("%ed%"), id=8))
.order_by(Address.id)
.all()
)
dingaling = sess.query(Dingaling).get(2)
assert [User(id=9)] == sess.query(User).filter(
User.addresses.any(Address.dingaling == dingaling)
).all()
def test_contains_m2m(self):
Item, Order = self.classes.Item, self.classes.Order
sess = create_session()
item = sess.query(Item).get(3)
eq_(
sess.query(Order)
.filter(Order.items.contains(item))
.order_by(Order.id)
.all(),
[Order(id=1), Order(id=2), Order(id=3)],
)
eq_(
sess.query(Order)
.filter(~Order.items.contains(item))
.order_by(Order.id)
.all(),
[Order(id=4), Order(id=5)],
)
item2 = sess.query(Item).get(5)
eq_(
sess.query(Order)
.filter(Order.items.contains(item))
.filter(Order.items.contains(item2))
.all(),
[Order(id=3)],
)
def test_comparison(self):
"""test scalar comparison to an object instance"""
Item, Order, Dingaling, User, Address = (
self.classes.Item,
self.classes.Order,
self.classes.Dingaling,
self.classes.User,
self.classes.Address,
)
sess = create_session()
user = sess.query(User).get(8)
assert [Address(id=2), Address(id=3), Address(id=4)] == sess.query(
Address
).filter(Address.user == user).all()
assert [Address(id=1), Address(id=5)] == sess.query(Address).filter(
Address.user != user
).all()
# generates an IS NULL
assert (
[] == sess.query(Address).filter(Address.user == None).all()
) # noqa
assert [] == sess.query(Address).filter(Address.user == null()).all()
assert [Order(id=5)] == sess.query(Order).filter(
Order.address == None
).all() # noqa
# o2o
dingaling = sess.query(Dingaling).get(2)
assert [Address(id=5)] == sess.query(Address).filter(
Address.dingaling == dingaling
).all()
# m2m
eq_(
sess.query(Item)
.filter(Item.keywords == None)
.order_by(Item.id) # noqa
.all(),
[Item(id=4), Item(id=5)],
)
eq_(
sess.query(Item)
.filter(Item.keywords != None)
.order_by(Item.id) # noqa
.all(),
[Item(id=1), Item(id=2), Item(id=3)],
)
def test_filter_by(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
user = sess.query(User).get(8)
assert [Address(id=2), Address(id=3), Address(id=4)] == sess.query(
Address
).filter_by(user=user).all()
# many to one generates IS NULL
assert [] == sess.query(Address).filter_by(user=None).all()
assert [] == sess.query(Address).filter_by(user=null()).all()
# one to many generates WHERE NOT EXISTS
assert [User(name="chuck")] == sess.query(User).filter_by(
addresses=None
).all()
assert [User(name="chuck")] == sess.query(User).filter_by(
addresses=null()
).all()
def test_filter_by_tables(self):
users = self.tables.users
addresses = self.tables.addresses
sess = create_session()
self.assert_compile(
sess.query(users)
.filter_by(name="ed")
.join(addresses, users.c.id == addresses.c.user_id)
.filter_by(email_address="ed@ed.com"),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"WHERE users.name = :name_1 AND "
"addresses.email_address = :email_address_1",
checkparams={"email_address_1": "ed@ed.com", "name_1": "ed"},
)
def test_empty_filters(self):
User = self.classes.User
sess = create_session()
q1 = sess.query(User)
is_(None, q1.filter().whereclause)
is_(None, q1.filter_by().whereclause)
def test_filter_by_no_property(self):
addresses = self.tables.addresses
sess = create_session()
assert_raises_message(
sa.exc.InvalidRequestError,
"Entity 'addresses' has no property 'name'",
sess.query(addresses).filter_by,
name="ed",
)
def test_none_comparison(self):
Order, User, Address = (
self.classes.Order,
self.classes.User,
self.classes.Address,
)
sess = create_session()
# scalar
eq_(
[Order(description="order 5")],
sess.query(Order).filter(Order.address_id == None).all(), # noqa
)
eq_(
[Order(description="order 5")],
sess.query(Order).filter(Order.address_id == null()).all(),
)
# o2o
eq_(
[Address(id=1), Address(id=3), Address(id=4)],
sess.query(Address)
.filter(Address.dingaling == None)
.order_by(Address.id) # noqa
.all(),
)
eq_(
[Address(id=1), Address(id=3), Address(id=4)],
sess.query(Address)
.filter(Address.dingaling == null())
.order_by(Address.id)
.all(),
)
eq_(
[Address(id=2), Address(id=5)],
sess.query(Address)
.filter(Address.dingaling != None)
.order_by(Address.id) # noqa
.all(),
)
eq_(
[Address(id=2), Address(id=5)],
sess.query(Address)
.filter(Address.dingaling != null())
.order_by(Address.id)
.all(),
)
# m2o
eq_(
[Order(id=5)],
sess.query(Order).filter(Order.address == None).all(),
) # noqa
eq_(
[Order(id=1), Order(id=2), Order(id=3), Order(id=4)],
sess.query(Order)
.order_by(Order.id)
.filter(Order.address != None)
.all(),
) # noqa
# o2m
eq_(
[User(id=10)],
sess.query(User).filter(User.addresses == None).all(),
) # noqa
eq_(
[User(id=7), User(id=8), User(id=9)],
sess.query(User)
.filter(User.addresses != None)
.order_by(User.id) # noqa
.all(),
)
def test_blank_filter_by(self):
User = self.classes.User
eq_(
[(7,), (8,), (9,), (10,)],
create_session()
.query(User.id)
.filter_by()
.order_by(User.id)
.all(),
)
eq_(
[(7,), (8,), (9,), (10,)],
create_session()
.query(User.id)
.filter_by(**{})
.order_by(User.id)
.all(),
)
def test_text_coerce(self):
User = self.classes.User
s = create_session()
self.assert_compile(
s.query(User).filter(text("name='ed'")),
"SELECT users.id AS users_id, users.name "
"AS users_name FROM users WHERE name='ed'",
)
class HasAnyTest(fixtures.DeclarativeMappedTest, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class D(Base):
__tablename__ = "d"
id = Column(Integer, primary_key=True)
class C(Base):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
d_id = Column(ForeignKey(D.id))
bs = relationship("B", back_populates="c")
b_d = Table(
"b_d",
Base.metadata,
Column("bid", ForeignKey("b.id")),
Column("did", ForeignKey("d.id")),
)
# note we are using the ForeignKey pattern identified as a bug
# in [ticket:4367]
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
c_id = Column(ForeignKey(C.id))
c = relationship("C", back_populates="bs")
d = relationship("D", secondary=b_d)
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey(B.id))
d = relationship(
"D",
secondary="join(B, C)",
primaryjoin="A.b_id == B.id",
secondaryjoin="C.d_id == D.id",
uselist=False,
)
def test_has_composite_secondary(self):
A, D = self.classes("A", "D")
s = Session()
self.assert_compile(
s.query(A).filter(A.d.has(D.id == 1)),
"SELECT a.id AS a_id, a.b_id AS a_b_id FROM a WHERE EXISTS "
"(SELECT 1 FROM d, b JOIN c ON c.id = b.c_id "
"WHERE a.b_id = b.id AND c.d_id = d.id AND d.id = :id_1)",
)
def test_has_many_to_one(self):
B, C = self.classes("B", "C")
s = Session()
self.assert_compile(
s.query(B).filter(B.c.has(C.id == 1)),
"SELECT b.id AS b_id, b.c_id AS b_c_id FROM b WHERE "
"EXISTS (SELECT 1 FROM c WHERE c.id = b.c_id AND c.id = :id_1)",
)
def test_any_many_to_many(self):
B, D = self.classes("B", "D")
s = Session()
self.assert_compile(
s.query(B).filter(B.d.any(D.id == 1)),
"SELECT b.id AS b_id, b.c_id AS b_c_id FROM b WHERE "
"EXISTS (SELECT 1 FROM b_d, d WHERE b.id = b_d.bid "
"AND d.id = b_d.did AND d.id = :id_1)",
)
def test_any_one_to_many(self):
B, C = self.classes("B", "C")
s = Session()
self.assert_compile(
s.query(C).filter(C.bs.any(B.id == 1)),
"SELECT c.id AS c_id, c.d_id AS c_d_id FROM c WHERE "
"EXISTS (SELECT 1 FROM b WHERE c.id = b.c_id AND b.id = :id_1)",
)
def test_any_many_to_many_doesnt_overcorrelate(self):
B, D = self.classes("B", "D")
s = Session()
self.assert_compile(
s.query(B).join(B.d).filter(B.d.any(D.id == 1)),
"SELECT b.id AS b_id, b.c_id AS b_c_id FROM "
"b JOIN b_d AS b_d_1 ON b.id = b_d_1.bid "
"JOIN d ON d.id = b_d_1.did WHERE "
"EXISTS (SELECT 1 FROM b_d, d WHERE b.id = b_d.bid "
"AND d.id = b_d.did AND d.id = :id_1)",
)
def test_has_doesnt_overcorrelate(self):
B, C = self.classes("B", "C")
s = Session()
self.assert_compile(
s.query(B).join(B.c).filter(B.c.has(C.id == 1)),
"SELECT b.id AS b_id, b.c_id AS b_c_id "
"FROM b JOIN c ON c.id = b.c_id "
"WHERE EXISTS "
"(SELECT 1 FROM c WHERE c.id = b.c_id AND c.id = :id_1)",
)
def test_has_doesnt_get_aliased_join_subq(self):
B, C = self.classes("B", "C")
s = Session()
self.assert_compile(
s.query(B).join(B.c, aliased=True).filter(B.c.has(C.id == 1)),
"SELECT b.id AS b_id, b.c_id AS b_c_id "
"FROM b JOIN c AS c_1 ON c_1.id = b.c_id "
"WHERE EXISTS "
"(SELECT 1 FROM c WHERE c.id = b.c_id AND c.id = :id_1)",
)
def test_any_many_to_many_doesnt_get_aliased_join_subq(self):
B, D = self.classes("B", "D")
s = Session()
self.assert_compile(
s.query(B).join(B.d, aliased=True).filter(B.d.any(D.id == 1)),
"SELECT b.id AS b_id, b.c_id AS b_c_id "
"FROM b JOIN b_d AS b_d_1 ON b.id = b_d_1.bid "
"JOIN d AS d_1 ON d_1.id = b_d_1.did "
"WHERE EXISTS "
"(SELECT 1 FROM b_d, d WHERE b.id = b_d.bid "
"AND d.id = b_d.did AND d.id = :id_1)",
)
class HasMapperEntitiesTest(QueryTest):
def test_entity(self):
User = self.classes.User
s = Session()
q = s.query(User)
assert q._has_mapper_entities
def test_cols(self):
User = self.classes.User
s = Session()
q = s.query(User.id)
assert not q._has_mapper_entities
def test_cols_set_entities(self):
User = self.classes.User
s = Session()
q = s.query(User.id)
q._set_entities(User)
assert q._has_mapper_entities
def test_entity_set_entities(self):
User = self.classes.User
s = Session()
q = s.query(User)
q._set_entities(User.id)
assert not q._has_mapper_entities
class SetOpsTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_union(self):
User = self.classes.User
s = create_session()
fred = s.query(User).filter(User.name == "fred")
ed = s.query(User).filter(User.name == "ed")
jack = s.query(User).filter(User.name == "jack")
eq_(
fred.union(ed).order_by(User.name).all(),
[User(name="ed"), User(name="fred")],
)
eq_(
fred.union(ed, jack).order_by(User.name).all(),
[User(name="ed"), User(name="fred"), User(name="jack")],
)
eq_(
fred.union(ed).union(jack).order_by(User.name).all(),
[User(name="ed"), User(name="fred"), User(name="jack")],
)
def test_statement_labels(self):
"""test that label conflicts don't occur with joins etc."""
User, Address = self.classes.User, self.classes.Address
s = create_session()
q1 = (
s.query(User, Address)
.join(User.addresses)
.filter(Address.email_address == "ed@wood.com")
)
q2 = (
s.query(User, Address)
.join(User.addresses)
.filter(Address.email_address == "jack@bean.com")
)
q3 = q1.union(q2).order_by(User.name)
eq_(
q3.all(),
[
(User(name="ed"), Address(email_address="ed@wood.com")),
(User(name="jack"), Address(email_address="jack@bean.com")),
],
)
def test_union_literal_expressions_compile(self):
"""test that column expressions translate during
the _from_statement() portion of union(), others"""
User = self.classes.User
s = Session()
q1 = s.query(User, literal("x"))
q2 = s.query(User, literal_column("'y'"))
q3 = q1.union(q2)
self.assert_compile(
q3,
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"anon_1.param_1 AS anon_1_param_1 "
"FROM (SELECT users.id AS users_id, users.name AS "
"users_name, :param_1 AS param_1 "
"FROM users UNION SELECT users.id AS users_id, "
"users.name AS users_name, 'y' FROM users) AS anon_1",
)
def test_union_literal_expressions_results(self):
User = self.classes.User
s = Session()
q1 = s.query(User, literal("x"))
q2 = s.query(User, literal_column("'y'"))
q3 = q1.union(q2)
q4 = s.query(User, literal_column("'x'").label("foo"))
q5 = s.query(User, literal("y"))
q6 = q4.union(q5)
eq_([x["name"] for x in q6.column_descriptions], ["User", "foo"])
for q in (
q3.order_by(User.id, text("anon_1_param_1")),
q6.order_by(User.id, "foo"),
):
eq_(
q.all(),
[
(User(id=7, name="jack"), "x"),
(User(id=7, name="jack"), "y"),
(User(id=8, name="ed"), "x"),
(User(id=8, name="ed"), "y"),
(User(id=9, name="fred"), "x"),
(User(id=9, name="fred"), "y"),
(User(id=10, name="chuck"), "x"),
(User(id=10, name="chuck"), "y"),
],
)
def test_union_labeled_anonymous_columns(self):
User = self.classes.User
s = Session()
c1, c2 = column("c1"), column("c2")
q1 = s.query(User, c1.label("foo"), c1.label("bar"))
q2 = s.query(User, c1.label("foo"), c2.label("bar"))
q3 = q1.union(q2)
eq_(
[x["name"] for x in q3.column_descriptions], ["User", "foo", "bar"]
)
self.assert_compile(
q3,
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar "
"FROM (SELECT users.id AS users_id, users.name AS users_name, "
"c1 AS foo, c1 AS bar FROM users UNION SELECT users.id AS "
"users_id, users.name AS users_name, c1 AS foo, c2 AS bar "
"FROM users) AS anon_1",
)
def test_order_by_anonymous_col(self):
User = self.classes.User
s = Session()
c1, c2 = column("c1"), column("c2")
f = c1.label("foo")
q1 = s.query(User, f, c2.label("bar"))
q2 = s.query(User, c1.label("foo"), c2.label("bar"))
q3 = q1.union(q2)
self.assert_compile(
q3.order_by(c1),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS "
"anon_1_users_name, anon_1.foo AS anon_1_foo, anon_1.bar AS "
"anon_1_bar FROM (SELECT users.id AS users_id, users.name AS "
"users_name, c1 AS foo, c2 AS bar "
"FROM users UNION SELECT users.id "
"AS users_id, users.name AS users_name, c1 AS foo, c2 AS bar "
"FROM users) AS anon_1 ORDER BY anon_1.foo",
)
self.assert_compile(
q3.order_by(f),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS "
"anon_1_users_name, anon_1.foo AS anon_1_foo, anon_1.bar AS "
"anon_1_bar FROM (SELECT users.id AS users_id, users.name AS "
"users_name, c1 AS foo, c2 AS bar "
"FROM users UNION SELECT users.id "
"AS users_id, users.name AS users_name, c1 AS foo, c2 AS bar "
"FROM users) AS anon_1 ORDER BY anon_1.foo",
)
def test_union_mapped_colnames_preserved_across_subquery(self):
User = self.classes.User
s = Session()
q1 = s.query(User.name)
q2 = s.query(User.name)
# the label names in the subquery are the typical anonymized ones
self.assert_compile(
q1.union(q2),
"SELECT anon_1.users_name AS anon_1_users_name "
"FROM (SELECT users.name AS users_name FROM users "
"UNION SELECT users.name AS users_name FROM users) AS anon_1",
)
# but in the returned named tuples,
# due to [ticket:1942], this should be 'name', not 'users_name'
eq_([x["name"] for x in q1.union(q2).column_descriptions], ["name"])
@testing.requires.intersect
def test_intersect(self):
User = self.classes.User
s = create_session()
fred = s.query(User).filter(User.name == "fred")
ed = s.query(User).filter(User.name == "ed")
jack = s.query(User).filter(User.name == "jack")
eq_(fred.intersect(ed, jack).all(), [])
eq_(fred.union(ed).intersect(ed.union(jack)).all(), [User(name="ed")])
def test_eager_load(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
fred = s.query(User).filter(User.name == "fred")
ed = s.query(User).filter(User.name == "ed")
def go():
eq_(
fred.union(ed)
.order_by(User.name)
.options(joinedload(User.addresses))
.all(),
[
User(
name="ed", addresses=[Address(), Address(), Address()]
),
User(name="fred", addresses=[Address()]),
],
)
self.assert_sql_count(testing.db, go, 1)
class AggregateTest(QueryTest):
def test_sum(self):
Order = self.classes.Order
sess = create_session()
orders = sess.query(Order).filter(Order.id.in_([2, 3, 4]))
eq_(
next(orders.values(func.sum(Order.user_id * Order.address_id))),
(79,),
)
eq_(orders.value(func.sum(Order.user_id * Order.address_id)), 79)
def test_apply(self):
Order = self.classes.Order
sess = create_session()
assert sess.query(func.sum(Order.user_id * Order.address_id)).filter(
Order.id.in_([2, 3, 4])
).one() == (79,)
def test_having(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
assert (
[User(name="ed", id=8)]
== sess.query(User)
.order_by(User.id)
.group_by(User)
.join("addresses")
.having(func.count(Address.id) > 2)
.all()
)
assert (
[User(name="jack", id=7), User(name="fred", id=9)]
== sess.query(User)
.order_by(User.id)
.group_by(User)
.join("addresses")
.having(func.count(Address.id) < 2)
.all()
)
class ExistsTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_exists(self):
User = self.classes.User
sess = create_session()
q1 = sess.query(User)
self.assert_compile(
sess.query(q1.exists()),
"SELECT EXISTS (" "SELECT 1 FROM users" ") AS anon_1",
)
q2 = sess.query(User).filter(User.name == "fred")
self.assert_compile(
sess.query(q2.exists()),
"SELECT EXISTS ("
"SELECT 1 FROM users WHERE users.name = :name_1"
") AS anon_1",
)
def test_exists_col_warning(self):
User = self.classes.User
Address = self.classes.Address
sess = create_session()
q1 = sess.query(User, Address).filter(User.id == Address.user_id)
self.assert_compile(
sess.query(q1.exists()),
"SELECT EXISTS ("
"SELECT 1 FROM users, addresses "
"WHERE users.id = addresses.user_id"
") AS anon_1",
)
def test_exists_w_select_from(self):
User = self.classes.User
sess = create_session()
q1 = sess.query().select_from(User).exists()
self.assert_compile(
sess.query(q1), "SELECT EXISTS (SELECT 1 FROM users) AS anon_1"
)
class CountTest(QueryTest):
def test_basic(self):
users, User = self.tables.users, self.classes.User
s = create_session()
eq_(s.query(User).count(), 4)
eq_(s.query(User).filter(users.c.name.endswith("ed")).count(), 2)
def test_count_char(self):
User = self.classes.User
s = create_session()
# '*' is favored here as the most common character,
# it is reported that Informix doesn't like count(1),
# rumors about Oracle preferring count(1) don't appear
# to be well founded.
self.assert_sql_execution(
testing.db,
s.query(User).count,
CompiledSQL(
"SELECT count(*) AS count_1 FROM "
"(SELECT users.id AS users_id, users.name "
"AS users_name FROM users) AS anon_1",
{},
),
)
def test_multiple_entity(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(User, Address)
eq_(q.count(), 20) # cartesian product
q = s.query(User, Address).join(User.addresses)
eq_(q.count(), 5)
def test_nested(self):
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(User, Address).limit(2)
eq_(q.count(), 2)
q = s.query(User, Address).limit(100)
eq_(q.count(), 20)
q = s.query(User, Address).join(User.addresses).limit(100)
eq_(q.count(), 5)
def test_cols(self):
"""test that column-based queries always nest."""
User, Address = self.classes.User, self.classes.Address
s = create_session()
q = s.query(func.count(distinct(User.name)))
eq_(q.count(), 1)
q = s.query(func.count(distinct(User.name))).distinct()
eq_(q.count(), 1)
q = s.query(User.name)
eq_(q.count(), 4)
q = s.query(User.name, Address)
eq_(q.count(), 20)
q = s.query(Address.user_id)
eq_(q.count(), 5)
eq_(q.distinct().count(), 3)
class DistinctTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_basic(self):
User = self.classes.User
eq_(
[User(id=7), User(id=8), User(id=9), User(id=10)],
create_session().query(User).order_by(User.id).distinct().all(),
)
eq_(
[User(id=7), User(id=9), User(id=8), User(id=10)],
create_session()
.query(User)
.distinct()
.order_by(desc(User.name))
.all(),
)
def test_columns_augmented_roundtrip_one(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = (
sess.query(User)
.join("addresses")
.distinct()
.order_by(desc(Address.email_address))
)
eq_([User(id=7), User(id=9), User(id=8)], q.all())
def test_columns_augmented_roundtrip_two(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
# test that it works on embedded joinedload/LIMIT subquery
q = (
sess.query(User)
.join("addresses")
.distinct()
.options(joinedload("addresses"))
.order_by(desc(Address.email_address))
.limit(2)
)
def go():
assert [
User(id=7, addresses=[Address(id=1)]),
User(id=9, addresses=[Address(id=5)]),
] == q.all()
self.assert_sql_count(testing.db, go, 1)
def test_columns_augmented_roundtrip_three(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = (
sess.query(User.id, User.name.label("foo"), Address.id)
.filter(User.name == "jack")
.distinct()
.order_by(User.id, User.name, Address.email_address)
)
# even though columns are added, they aren't in the result
eq_(
q.all(),
[
(7, "jack", 3),
(7, "jack", 4),
(7, "jack", 2),
(7, "jack", 5),
(7, "jack", 1),
],
)
for row in q:
eq_(row.keys(), ["id", "foo", "id"])
def test_columns_augmented_sql_one(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = (
sess.query(User.id, User.name.label("foo"), Address.id)
.distinct()
.order_by(User.id, User.name, Address.email_address)
)
# Address.email_address is added because of DISTINCT,
# however User.id, User.name are not b.c. they're already there,
# even though User.name is labeled
self.assert_compile(
q,
"SELECT DISTINCT users.id AS users_id, users.name AS foo, "
"addresses.id AS addresses_id, "
"addresses.email_address AS addresses_email_address FROM users, "
"addresses ORDER BY users.id, users.name, addresses.email_address",
)
def test_columns_augmented_sql_two(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = (
sess.query(User)
.options(joinedload(User.addresses))
.distinct()
.order_by(User.name, Address.email_address)
.limit(5)
)
# addresses.email_address is added to inner query so that
# it is available in ORDER BY
self.assert_compile(
q,
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"anon_1.addresses_email_address AS "
"anon_1_addresses_email_address, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT DISTINCT users.id AS users_id, "
"users.name AS users_name, "
"addresses.email_address AS addresses_email_address "
"FROM users, addresses "
"ORDER BY users.name, addresses.email_address "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN "
"addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_name, "
"anon_1.addresses_email_address, addresses_1.id",
)
def test_columns_augmented_sql_three(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = (
sess.query(User.id, User.name.label("foo"), Address.id)
.distinct(User.name)
.order_by(User.id, User.name, Address.email_address)
)
# no columns are added when DISTINCT ON is used
self.assert_compile(
q,
"SELECT DISTINCT ON (users.name) users.id AS users_id, "
"users.name AS foo, addresses.id AS addresses_id FROM users, "
"addresses ORDER BY users.id, users.name, addresses.email_address",
dialect="postgresql",
)
def test_columns_augmented_sql_four(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = (
sess.query(User)
.join("addresses")
.distinct(Address.email_address)
.options(joinedload("addresses"))
.order_by(desc(Address.email_address))
.limit(2)
)
# but for the subquery / eager load case, we still need to make
# the inner columns available for the ORDER BY even though its
# a DISTINCT ON
self.assert_compile(
q,
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"anon_1.addresses_email_address AS "
"anon_1_addresses_email_address, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT DISTINCT ON (addresses.email_address) "
"users.id AS users_id, users.name AS users_name, "
"addresses.email_address AS addresses_email_address "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"ORDER BY addresses.email_address DESC "
"LIMIT %(param_1)s) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.addresses_email_address DESC, addresses_1.id",
dialect="postgresql",
)
class PrefixWithTest(QueryTest, AssertsCompiledSQL):
def test_one_prefix(self):
User = self.classes.User
sess = create_session()
query = sess.query(User.name).prefix_with("PREFIX_1")
expected = "SELECT PREFIX_1 " "users.name AS users_name FROM users"
self.assert_compile(query, expected, dialect=default.DefaultDialect())
def test_many_prefixes(self):
User = self.classes.User
sess = create_session()
query = sess.query(User.name).prefix_with("PREFIX_1", "PREFIX_2")
expected = (
"SELECT PREFIX_1 PREFIX_2 " "users.name AS users_name FROM users"
)
self.assert_compile(query, expected, dialect=default.DefaultDialect())
def test_chained_prefixes(self):
User = self.classes.User
sess = create_session()
query = (
sess.query(User.name)
.prefix_with("PREFIX_1")
.prefix_with("PREFIX_2", "PREFIX_3")
)
expected = (
"SELECT PREFIX_1 PREFIX_2 PREFIX_3 "
"users.name AS users_name FROM users"
)
self.assert_compile(query, expected, dialect=default.DefaultDialect())
class YieldTest(_fixtures.FixtureTest):
run_setup_mappers = "each"
run_inserts = "each"
def _eagerload_mappings(self, addresses_lazy=True, user_lazy=True):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
mapper(
User,
users,
properties={
"addresses": relationship(
Address,
lazy=addresses_lazy,
backref=backref("user", lazy=user_lazy),
)
},
)
mapper(Address, addresses)
def test_basic(self):
self._eagerload_mappings()
User = self.classes.User
sess = create_session()
q = iter(
sess.query(User)
.yield_per(1)
.from_statement(text("select * from users"))
)
ret = []
eq_(len(sess.identity_map), 0)
ret.append(next(q))
ret.append(next(q))
eq_(len(sess.identity_map), 2)
ret.append(next(q))
ret.append(next(q))
eq_(len(sess.identity_map), 4)
try:
next(q)
assert False
except StopIteration:
pass
def test_yield_per_and_execution_options(self):
self._eagerload_mappings()
User = self.classes.User
sess = create_session()
q = sess.query(User).yield_per(15)
q = q.execution_options(foo="bar")
assert q._yield_per
eq_(
q._execution_options,
{"stream_results": True, "foo": "bar", "max_row_buffer": 15},
)
def test_no_joinedload_opt(self):
self._eagerload_mappings()
User = self.classes.User
sess = create_session()
q = sess.query(User).options(joinedload("addresses")).yield_per(1)
assert_raises_message(
sa_exc.InvalidRequestError,
"The yield_per Query option is currently not compatible with "
"joined collection eager loading. Please specify ",
q.all,
)
def test_no_subqueryload_opt(self):
self._eagerload_mappings()
User = self.classes.User
sess = create_session()
q = sess.query(User).options(subqueryload("addresses")).yield_per(1)
assert_raises_message(
sa_exc.InvalidRequestError,
"The yield_per Query option is currently not compatible with "
"subquery eager loading. Please specify ",
q.all,
)
def test_no_subqueryload_mapping(self):
self._eagerload_mappings(addresses_lazy="subquery")
User = self.classes.User
sess = create_session()
q = sess.query(User).yield_per(1)
assert_raises_message(
sa_exc.InvalidRequestError,
"The yield_per Query option is currently not compatible with "
"subquery eager loading. Please specify ",
q.all,
)
def test_joinedload_m2o_ok(self):
self._eagerload_mappings(user_lazy="joined")
Address = self.classes.Address
sess = create_session()
q = sess.query(Address).yield_per(1)
q.all()
def test_eagerload_opt_disable(self):
self._eagerload_mappings()
User = self.classes.User
sess = create_session()
q = (
sess.query(User)
.options(subqueryload("addresses"))
.enable_eagerloads(False)
.yield_per(1)
)
q.all()
q = (
sess.query(User)
.options(joinedload("addresses"))
.enable_eagerloads(False)
.yield_per(1)
)
q.all()
def test_m2o_joinedload_not_others(self):
self._eagerload_mappings(addresses_lazy="joined")
Address = self.classes.Address
sess = create_session()
q = (
sess.query(Address)
.options(lazyload("*"), joinedload("user"))
.yield_per(1)
.filter_by(id=1)
)
def go():
result = q.all()
assert result[0].user
self.assert_sql_count(testing.db, go, 1)
class HintsTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_hints(self):
User = self.classes.User
from sqlalchemy.dialects import mysql
dialect = mysql.dialect()
sess = create_session()
self.assert_compile(
sess.query(User).with_hint(
User, "USE INDEX (col1_index,col2_index)"
),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users USE INDEX (col1_index,col2_index)",
dialect=dialect,
)
self.assert_compile(
sess.query(User).with_hint(
User, "WITH INDEX col1_index", "sybase"
),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users",
dialect=dialect,
)
ualias = aliased(User)
self.assert_compile(
sess.query(User, ualias)
.with_hint(ualias, "USE INDEX (col1_index,col2_index)")
.join(ualias, ualias.id > User.id),
"SELECT users.id AS users_id, users.name AS users_name, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users INNER JOIN users AS users_1 "
"USE INDEX (col1_index,col2_index) "
"ON users_1.id > users.id",
dialect=dialect,
)
def test_statement_hints(self):
User = self.classes.User
sess = create_session()
stmt = (
sess.query(User)
.with_statement_hint("test hint one")
.with_statement_hint("test hint two")
.with_statement_hint("test hint three", "postgresql")
)
self.assert_compile(
stmt,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users test hint one test hint two",
)
self.assert_compile(
stmt,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users test hint one test hint two test hint three",
dialect="postgresql",
)
class TextTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_fulltext(self):
User = self.classes.User
assert_raises_message(
sa_exc.ArgumentError,
"Textual SQL expression",
create_session().query(User).from_statement,
"select * from users order by id",
)
eq_(
create_session()
.query(User)
.from_statement(text("select * from users order by id"))
.first(),
User(id=7),
)
eq_(
create_session()
.query(User)
.from_statement(
text("select * from users where name='nonexistent'")
)
.first(),
None,
)
def test_whereclause(self):
User = self.classes.User
eq_(
create_session().query(User).filter(text("id in (8, 9)")).all(),
[User(id=8), User(id=9)],
)
eq_(
create_session()
.query(User)
.filter(text("name='fred'"))
.filter(text("id=9"))
.all(),
[User(id=9)],
)
eq_(
create_session()
.query(User)
.filter(text("name='fred'"))
.filter(User.id == 9)
.all(),
[User(id=9)],
)
def test_binds_coerce(self):
User = self.classes.User
assert_raises_message(
sa_exc.ArgumentError,
r"Textual SQL expression 'id in \(:id1, :id2\)' "
"should be explicitly declared",
create_session().query(User).filter,
"id in (:id1, :id2)",
)
def test_as_column(self):
User = self.classes.User
s = create_session()
assert_raises(
sa_exc.InvalidRequestError, s.query, User.id, text("users.name")
)
eq_(
s.query(User.id, "name").order_by(User.id).all(),
[(7, "jack"), (8, "ed"), (9, "fred"), (10, "chuck")],
)
def test_via_select(self):
User = self.classes.User
s = create_session()
eq_(
s.query(User)
.from_statement(
select([column("id"), column("name")])
.select_from(table("users"))
.order_by("id")
)
.all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_via_textasfrom_from_statement(self):
User = self.classes.User
s = create_session()
eq_(
s.query(User)
.from_statement(
text("select * from users order by id").columns(
id=Integer, name=String
)
)
.all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_via_textasfrom_use_mapped_columns(self):
User = self.classes.User
s = create_session()
eq_(
s.query(User)
.from_statement(
text("select * from users order by id").columns(
User.id, User.name
)
)
.all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_via_textasfrom_select_from(self):
User = self.classes.User
s = create_session()
eq_(
s.query(User)
.select_from(
text("select * from users")
.columns(id=Integer, name=String)
.subquery()
)
.order_by(User.id)
.all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_group_by_accepts_text(self):
User = self.classes.User
s = create_session()
q = s.query(User).group_by(text("name"))
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users GROUP BY name",
)
def test_orm_columns_accepts_text(self):
from sqlalchemy.orm.base import _orm_columns
t = text("x")
eq_(_orm_columns(t), [t])
def test_order_by_w_eager_one(self):
User = self.classes.User
s = create_session()
# from 1.0.0 thru 1.0.2, the "name" symbol here was considered
# to be part of the things we need to ORDER BY and it was being
# placed into the inner query's columns clause, as part of
# query._compound_eager_statement where we add unwrap_order_by()
# to the columns clause. However, as #3392 illustrates, unlocatable
# string expressions like "name desc" will only fail in this scenario,
# so in general the changing of the query structure with string labels
# is dangerous.
#
# the queries here are again "invalid" from a SQL perspective, as the
# "name" field isn't matched up to anything.
#
q = (
s.query(User)
.options(joinedload("addresses"))
.order_by(desc("name"))
.limit(1)
)
assert_raises_message(
sa_exc.CompileError,
"Can't resolve label reference for ORDER BY / GROUP BY.",
q.with_labels().statement.compile,
)
def test_order_by_w_eager_two(self):
User = self.classes.User
s = create_session()
q = (
s.query(User)
.options(joinedload("addresses"))
.order_by("name")
.limit(1)
)
assert_raises_message(
sa_exc.CompileError,
"Can't resolve label reference for ORDER BY / GROUP BY.",
q.with_labels().statement.compile,
)
def test_order_by_w_eager_three(self):
User = self.classes.User
s = create_session()
self.assert_compile(
s.query(User)
.options(joinedload("addresses"))
.order_by("users_name")
.limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.name "
"LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_name, addresses_1.id",
)
# however! this works (again?)
eq_(
s.query(User)
.options(joinedload("addresses"))
.order_by("users_name")
.first(),
User(name="chuck", addresses=[]),
)
def test_order_by_w_eager_four(self):
User = self.classes.User
Address = self.classes.Address
s = create_session()
self.assert_compile(
s.query(User)
.options(joinedload("addresses"))
.order_by(desc("users_name"))
.limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.name DESC "
"LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_name DESC, addresses_1.id",
)
# however! this works (again?)
eq_(
s.query(User)
.options(joinedload("addresses"))
.order_by(desc("users_name"))
.first(),
User(name="jack", addresses=[Address()]),
)
def test_order_by_w_eager_five(self):
"""essentially the same as test_eager_relations -> test_limit_3,
but test for textual label elements that are freeform.
this is again #3392."""
User = self.classes.User
Address = self.classes.Address
sess = create_session()
q = sess.query(User, Address.email_address.label("email_address"))
result = (
q.join("addresses")
.options(joinedload(User.orders))
.order_by("email_address desc")
.limit(1)
.offset(0)
)
assert_raises_message(
sa_exc.CompileError,
"Can't resolve label reference for ORDER BY / GROUP BY",
result.all,
)
class TextErrorTest(QueryTest, AssertsCompiledSQL):
def _test(self, fn, arg, offending_clause):
assert_raises_message(
sa.exc.ArgumentError,
r"Textual (?:SQL|column|SQL FROM) expression %(stmt)r should be "
r"explicitly declared (?:with|as) text\(%(stmt)r\)"
% {"stmt": util.ellipses_string(offending_clause)},
fn,
arg,
)
def test_filter(self):
User = self.classes.User
self._test(Session().query(User.id).filter, "myid == 5", "myid == 5")
def test_having(self):
User = self.classes.User
self._test(Session().query(User.id).having, "myid == 5", "myid == 5")
def test_from_statement(self):
User = self.classes.User
self._test(
Session().query(User.id).from_statement,
"select id from user",
"select id from user",
)
class ParentTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_o2m(self):
User, orders, Order = (
self.classes.User,
self.tables.orders,
self.classes.Order,
)
sess = create_session()
q = sess.query(User)
u1 = q.filter_by(name="jack").one()
# test auto-lookup of property
o = sess.query(Order).with_parent(u1).all()
assert [
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
] == o
# test with explicit property
o = sess.query(Order).with_parent(u1, property="orders").all()
assert [
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
] == o
o = sess.query(Order).with_parent(u1, property=User.orders).all()
assert [
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
] == o
o = sess.query(Order).filter(with_parent(u1, User.orders)).all()
assert [
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
] == o
# test generative criterion
o = sess.query(Order).with_parent(u1).filter(orders.c.id > 2).all()
assert [
Order(description="order 3"),
Order(description="order 5"),
] == o
# test against None for parent? this can't be done with the current
# API since we don't know what mapper to use
# assert
# sess.query(Order).with_parent(None, property='addresses').all()
# == [Order(description="order 5")]
def test_select_from(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
u1 = sess.query(User).get(7)
q = sess.query(Address).select_from(Address).with_parent(u1)
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id",
{"param_1": 7},
)
def test_from_entity_standalone_fn(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
u1 = sess.query(User).get(7)
q = sess.query(User, Address).filter(
with_parent(u1, "addresses", from_entity=Address)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses.id AS addresses_id, addresses.user_id "
"AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM users, addresses "
"WHERE :param_1 = addresses.user_id",
{"param_1": 7},
)
def test_from_entity_query_entity(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
u1 = sess.query(User).get(7)
q = sess.query(User, Address).with_parent(
u1, "addresses", from_entity=Address
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses.id AS addresses_id, addresses.user_id "
"AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM users, addresses "
"WHERE :param_1 = addresses.user_id",
{"param_1": 7},
)
def test_select_from_alias(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
u1 = sess.query(User).get(7)
a1 = aliased(Address)
q = sess.query(a1).with_parent(u1)
self.assert_compile(
q,
"SELECT addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM addresses AS addresses_1 "
"WHERE :param_1 = addresses_1.user_id",
{"param_1": 7},
)
def test_select_from_alias_explicit_prop(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
u1 = sess.query(User).get(7)
a1 = aliased(Address)
q = sess.query(a1).with_parent(u1, "addresses")
self.assert_compile(
q,
"SELECT addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM addresses AS addresses_1 "
"WHERE :param_1 = addresses_1.user_id",
{"param_1": 7},
)
def test_noparent(self):
Item, User = self.classes.Item, self.classes.User
sess = create_session()
q = sess.query(User)
u1 = q.filter_by(name="jack").one()
try:
q = sess.query(Item).with_parent(u1)
assert False
except sa_exc.InvalidRequestError as e:
assert (
str(e) == "Could not locate a property which relates "
"instances of class 'Item' to instances of class 'User'"
)
def test_m2m(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
sess = create_session()
i1 = sess.query(Item).filter_by(id=2).one()
k = sess.query(Keyword).with_parent(i1).all()
assert [
Keyword(name="red"),
Keyword(name="small"),
Keyword(name="square"),
] == k
def test_with_transient(self):
User, Order = self.classes.User, self.classes.Order
sess = Session()
q = sess.query(User)
u1 = q.filter_by(name="jack").one()
utrans = User(id=u1.id)
o = sess.query(Order).with_parent(utrans, "orders")
eq_(
[
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
],
o.all(),
)
o = sess.query(Order).filter(with_parent(utrans, "orders"))
eq_(
[
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
],
o.all(),
)
def test_with_pending_autoflush(self):
Order, User = self.classes.Order, self.classes.User
sess = Session()
o1 = sess.query(Order).first()
opending = Order(id=20, user_id=o1.user_id)
sess.add(opending)
eq_(
sess.query(User).with_parent(opending, "user").one(),
User(id=o1.user_id),
)
eq_(
sess.query(User).filter(with_parent(opending, "user")).one(),
User(id=o1.user_id),
)
def test_with_pending_no_autoflush(self):
Order, User = self.classes.Order, self.classes.User
sess = Session(autoflush=False)
o1 = sess.query(Order).first()
opending = Order(user_id=o1.user_id)
sess.add(opending)
eq_(
sess.query(User).with_parent(opending, "user").one(),
User(id=o1.user_id),
)
def test_unique_binds_union(self):
"""bindparams used in the 'parent' query are unique"""
User, Address = self.classes.User, self.classes.Address
sess = Session()
u1, u2 = sess.query(User).order_by(User.id)[0:2]
q1 = sess.query(Address).with_parent(u1, "addresses")
q2 = sess.query(Address).with_parent(u2, "addresses")
self.assert_compile(
q1.union(q2),
"SELECT anon_1.addresses_id AS anon_1_addresses_id, "
"anon_1.addresses_user_id AS anon_1_addresses_user_id, "
"anon_1.addresses_email_address AS "
"anon_1_addresses_email_address FROM (SELECT addresses.id AS "
"addresses_id, addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address FROM "
"addresses WHERE :param_1 = addresses.user_id UNION SELECT "
"addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address "
"AS addresses_email_address "
"FROM addresses WHERE :param_2 = addresses.user_id) AS anon_1",
checkparams={"param_1": 7, "param_2": 8},
)
def test_unique_binds_or(self):
User, Address = self.classes.User, self.classes.Address
sess = Session()
u1, u2 = sess.query(User).order_by(User.id)[0:2]
self.assert_compile(
sess.query(Address).filter(
or_(with_parent(u1, "addresses"), with_parent(u2, "addresses"))
),
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE "
":param_1 = addresses.user_id OR :param_2 = addresses.user_id",
checkparams={"param_1": 7, "param_2": 8},
)
class WithTransientOnNone(_fixtures.FixtureTest, AssertsCompiledSQL):
run_inserts = None
__dialect__ = "default"
def _fixture1(self):
User, Address, Dingaling, HasDingaling = (
self.classes.User,
self.classes.Address,
self.classes.Dingaling,
self.classes.HasDingaling,
)
users, addresses, dingalings, has_dingaling = (
self.tables.users,
self.tables.addresses,
self.tables.dingalings,
self.tables.has_dingaling,
)
mapper(User, users)
mapper(
Address,
addresses,
properties={
"user": relationship(User),
"special_user": relationship(
User,
primaryjoin=and_(
users.c.id == addresses.c.user_id,
users.c.name == addresses.c.email_address,
),
),
},
)
mapper(Dingaling, dingalings)
mapper(
HasDingaling,
has_dingaling,
properties={
"dingaling": relationship(
Dingaling,
primaryjoin=and_(
dingalings.c.id == has_dingaling.c.dingaling_id,
dingalings.c.data == "hi",
),
)
},
)
def test_filter_with_transient_dont_assume_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
sess = Session()
q = sess.query(Address).filter(Address.user == User())
assert_raises_message(
sa_exc.StatementError,
"Can't resolve value for column users.id on object "
".User at .*; no value has been set for this column",
q.all,
)
def test_filter_with_transient_given_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
sess = Session()
q = sess.query(Address).filter(Address.user == User(id=None))
with expect_warnings("Got None for value of column "):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id",
checkparams={"param_1": None},
)
def test_filter_with_transient_given_pk_but_only_later(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
sess = Session()
u1 = User()
# id is not set, so evaluates to NEVER_SET
q = sess.query(Address).filter(Address.user == u1)
# but we set it, so we should get the warning
u1.id = None
with expect_warnings("Got None for value of column "):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id",
checkparams={"param_1": None},
)
def test_filter_with_transient_warn_for_none_against_non_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = Session()
q = s.query(Address).filter(
Address.special_user == User(id=None, name=None)
)
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND :param_2 = addresses.email_address",
checkparams={"param_1": None, "param_2": None},
)
def test_filter_with_persistent_non_pk_col_is_default_null(self):
# test #4676 - comparison to a persistent column that is
# NULL in the database, but is not fetched
self._fixture1()
Dingaling, HasDingaling = (
self.classes.Dingaling,
self.classes.HasDingaling,
)
s = Session()
d = Dingaling(id=1)
s.add(d)
s.flush()
assert "data" not in d.__dict__
q = s.query(HasDingaling).filter_by(dingaling=d)
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT has_dingaling.id AS has_dingaling_id, "
"has_dingaling.dingaling_id AS has_dingaling_dingaling_id "
"FROM has_dingaling WHERE :param_1 = "
"has_dingaling.dingaling_id AND :param_2 = :data_1",
checkparams={"param_1": 1, "param_2": None, "data_1": "hi"},
)
def test_filter_with_detached_non_pk_col_is_default_null(self):
self._fixture1()
Dingaling, HasDingaling = (
self.classes.Dingaling,
self.classes.HasDingaling,
)
s = Session()
d = Dingaling()
s.add(d)
s.flush()
s.commit()
d.id
s.expire(d, ["data"])
s.expunge(d)
assert "data" not in d.__dict__
assert "id" in d.__dict__
q = s.query(HasDingaling).filter_by(dingaling=d)
# this case we still can't handle, object is detached so we assume
# nothing
assert_raises_message(
sa_exc.StatementError,
r"Can't resolve value for column dingalings.data on "
r"object .*Dingaling.* the object is detached and "
r"the value was expired",
q.all,
)
def test_filter_with_detached_non_pk_col_has_value(self):
self._fixture1()
Dingaling, HasDingaling = (
self.classes.Dingaling,
self.classes.HasDingaling,
)
s = Session()
d = Dingaling(data="some data")
s.add(d)
s.commit()
s.expire(d)
assert "data" not in d.__dict__
q = s.query(HasDingaling).filter_by(dingaling=d)
self.assert_compile(
q,
"SELECT has_dingaling.id AS has_dingaling_id, "
"has_dingaling.dingaling_id AS has_dingaling_dingaling_id "
"FROM has_dingaling WHERE :param_1 = "
"has_dingaling.dingaling_id AND :param_2 = :data_1",
checkparams={"param_1": 1, "param_2": "some data", "data_1": "hi"},
)
def test_with_parent_with_transient_assume_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
sess = Session()
q = sess.query(User).with_parent(Address(user_id=None), "user")
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
checkparams={"param_1": None},
)
def test_with_parent_with_transient_warn_for_none_against_non_pk(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = Session()
q = s.query(User).with_parent(
Address(user_id=None, email_address=None), "special_user"
)
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1 "
"AND users.name = :param_2",
checkparams={"param_1": None, "param_2": None},
)
def test_negated_contains_or_equals_plain_m2o(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = Session()
q = s.query(Address).filter(Address.user != User(id=None))
with expect_warnings("Got None for value of column"):
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses "
"WHERE addresses.user_id != :user_id_1 "
"OR addresses.user_id IS NULL",
checkparams={"user_id_1": None},
)
def test_negated_contains_or_equals_complex_rel(self):
self._fixture1()
User, Address = self.classes.User, self.classes.Address
s = Session()
# this one does *not* warn because we do the criteria
# without deferral
q = s.query(Address).filter(Address.special_user != User(id=None))
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses "
"WHERE NOT (EXISTS (SELECT 1 "
"FROM users "
"WHERE users.id = addresses.user_id AND "
"users.name = addresses.email_address AND users.id IS NULL))",
checkparams={},
)
class SynonymTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def setup_mappers(cls):
(
users,
Keyword,
items,
order_items,
orders,
Item,
User,
Address,
keywords,
Order,
item_keywords,
addresses,
) = (
cls.tables.users,
cls.classes.Keyword,
cls.tables.items,
cls.tables.order_items,
cls.tables.orders,
cls.classes.Item,
cls.classes.User,
cls.classes.Address,
cls.tables.keywords,
cls.classes.Order,
cls.tables.item_keywords,
cls.tables.addresses,
)
mapper(
User,
users,
properties={
"name_syn": synonym("name"),
"addresses": relationship(Address),
"orders": relationship(
Order, backref="user", order_by=orders.c.id
), # o2m, m2o
"orders_syn": synonym("orders"),
"orders_syn_2": synonym("orders_syn"),
},
)
mapper(Address, addresses)
mapper(
Order,
orders,
properties={
"items": relationship(Item, secondary=order_items), # m2m
"address": relationship(Address), # m2o
"items_syn": synonym("items"),
},
)
mapper(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords
) # m2m
},
)
mapper(Keyword, keywords)
def test_options(self):
User, Order = self.classes.User, self.classes.Order
s = create_session()
def go():
result = (
s.query(User)
.filter_by(name="jack")
.options(joinedload(User.orders_syn))
.all()
)
eq_(
result,
[
User(
id=7,
name="jack",
orders=[
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
def test_options_syn_of_syn(self):
User, Order = self.classes.User, self.classes.Order
s = create_session()
def go():
result = (
s.query(User)
.filter_by(name="jack")
.options(joinedload(User.orders_syn_2))
.all()
)
eq_(
result,
[
User(
id=7,
name="jack",
orders=[
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
def test_options_syn_of_syn_string(self):
User, Order = self.classes.User, self.classes.Order
s = create_session()
def go():
result = (
s.query(User)
.filter_by(name="jack")
.options(joinedload("orders_syn_2"))
.all()
)
eq_(
result,
[
User(
id=7,
name="jack",
orders=[
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
def test_joins(self):
User, Order = self.classes.User, self.classes.Order
for j in (
["orders", "items"],
["orders_syn", "items"],
[User.orders_syn, Order.items],
["orders_syn_2", "items"],
[User.orders_syn_2, "items"],
["orders", "items_syn"],
["orders_syn", "items_syn"],
["orders_syn_2", "items_syn"],
):
result = (
create_session().query(User).join(*j).filter_by(id=3).all()
)
assert [User(id=7, name="jack"), User(id=9, name="fred")] == result
def test_with_parent(self):
Order, User = self.classes.Order, self.classes.User
for nameprop, orderprop in (
("name", "orders"),
("name_syn", "orders"),
("name", "orders_syn"),
("name", "orders_syn_2"),
("name_syn", "orders_syn"),
("name_syn", "orders_syn_2"),
):
sess = create_session()
q = sess.query(User)
u1 = q.filter_by(**{nameprop: "jack"}).one()
o = sess.query(Order).with_parent(u1, property=orderprop).all()
assert [
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
] == o
def test_froms_aliased_col(self):
Address, User = self.classes.Address, self.classes.User
sess = create_session()
ua = aliased(User)
q = sess.query(ua.name_syn).join(Address, ua.id == Address.user_id)
self.assert_compile(
q,
"SELECT users_1.name AS users_1_name FROM "
"users AS users_1 JOIN addresses "
"ON users_1.id = addresses.user_id",
)
class ImmediateTest(_fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (
cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User,
)
mapper(Address, addresses)
mapper(User, users, properties=dict(addresses=relationship(Address)))
def test_one(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
assert_raises_message(
sa.orm.exc.NoResultFound,
r"No row was found for one\(\)",
sess.query(User).filter(User.id == 99).one,
)
eq_(sess.query(User).filter(User.id == 7).one().id, 7)
assert_raises_message(
sa.orm.exc.MultipleResultsFound,
r"Multiple rows were found for one\(\)",
sess.query(User).one,
)
assert_raises(
sa.orm.exc.NoResultFound,
sess.query(User.id, User.name).filter(User.id == 99).one,
)
eq_(
sess.query(User.id, User.name).filter(User.id == 7).one(),
(7, "jack"),
)
assert_raises(
sa.orm.exc.MultipleResultsFound, sess.query(User.id, User.name).one
)
assert_raises(
sa.orm.exc.NoResultFound,
(
sess.query(User, Address)
.join(User.addresses)
.filter(Address.id == 99)
).one,
)
eq_(
(
sess.query(User, Address)
.join(User.addresses)
.filter(Address.id == 4)
).one(),
(User(id=8), Address(id=4)),
)
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User, Address).join(User.addresses).one,
)
# this result returns multiple rows, the first
# two rows being the same. but uniquing is
# not applied for a column based result.
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User.id)
.join(User.addresses)
.filter(User.id.in_([8, 9]))
.order_by(User.id)
.one,
)
# test that a join which ultimately returns
# multiple identities across many rows still
# raises, even though the first two rows are of
# the same identity and unique filtering
# is applied ([ticket:1688])
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User)
.join(User.addresses)
.filter(User.id.in_([8, 9]))
.order_by(User.id)
.one,
)
def test_one_or_none(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
eq_(sess.query(User).filter(User.id == 99).one_or_none(), None)
eq_(sess.query(User).filter(User.id == 7).one_or_none().id, 7)
assert_raises_message(
sa.orm.exc.MultipleResultsFound,
r"Multiple rows were found for one_or_none\(\)",
sess.query(User).one_or_none,
)
eq_(
sess.query(User.id, User.name).filter(User.id == 99).one_or_none(),
None,
)
eq_(
sess.query(User.id, User.name).filter(User.id == 7).one_or_none(),
(7, "jack"),
)
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User.id, User.name).one_or_none,
)
eq_(
(
sess.query(User, Address)
.join(User.addresses)
.filter(Address.id == 99)
).one_or_none(),
None,
)
eq_(
(
sess.query(User, Address)
.join(User.addresses)
.filter(Address.id == 4)
).one_or_none(),
(User(id=8), Address(id=4)),
)
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User, Address).join(User.addresses).one_or_none,
)
# this result returns multiple rows, the first
# two rows being the same. but uniquing is
# not applied for a column based result.
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User.id)
.join(User.addresses)
.filter(User.id.in_([8, 9]))
.order_by(User.id)
.one_or_none,
)
# test that a join which ultimately returns
# multiple identities across many rows still
# raises, even though the first two rows are of
# the same identity and unique filtering
# is applied ([ticket:1688])
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User)
.join(User.addresses)
.filter(User.id.in_([8, 9]))
.order_by(User.id)
.one_or_none,
)
@testing.future
def test_getslice(self):
assert False
def test_scalar(self):
User = self.classes.User
sess = create_session()
eq_(sess.query(User.id).filter_by(id=7).scalar(), 7)
eq_(sess.query(User.id, User.name).filter_by(id=7).scalar(), 7)
eq_(sess.query(User.id).filter_by(id=0).scalar(), None)
eq_(
sess.query(User).filter_by(id=7).scalar(),
sess.query(User).filter_by(id=7).one(),
)
assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User).scalar)
assert_raises(
sa.orm.exc.MultipleResultsFound,
sess.query(User.id, User.name).scalar,
)
def test_value(self):
User = self.classes.User
sess = create_session()
eq_(sess.query(User).filter_by(id=7).value(User.id), 7)
eq_(sess.query(User.id, User.name).filter_by(id=7).value(User.id), 7)
eq_(sess.query(User).filter_by(id=0).value(User.id), None)
sess.bind = testing.db
eq_(sess.query().value(sa.literal_column("1").label("x")), 1)
class ExecutionOptionsTest(QueryTest):
def test_option_building(self):
User = self.classes.User
sess = create_session(bind=testing.db, autocommit=False)
q1 = sess.query(User)
eq_(q1._execution_options, dict())
q2 = q1.execution_options(foo="bar", stream_results=True)
# q1's options should be unchanged.
eq_(q1._execution_options, dict())
# q2 should have them set.
eq_(q2._execution_options, dict(foo="bar", stream_results=True))
q3 = q2.execution_options(foo="not bar", answer=42)
eq_(q2._execution_options, dict(foo="bar", stream_results=True))
q3_options = dict(foo="not bar", stream_results=True, answer=42)
eq_(q3._execution_options, q3_options)
def test_get_options(self):
User = self.classes.User
sess = create_session(bind=testing.db, autocommit=False)
q = sess.query(User).execution_options(foo="bar", stream_results=True)
eq_(q.get_execution_options(), dict(foo="bar", stream_results=True))
def test_options_in_connection(self):
User = self.classes.User
execution_options = dict(foo="bar", stream_results=True)
class TQuery(Query):
def instances(self, result, ctx):
try:
eq_(
result.connection._execution_options, execution_options
)
finally:
result.close()
return iter([])
sess = create_session(
bind=testing.db, autocommit=False, query_cls=TQuery
)
q1 = sess.query(User).execution_options(**execution_options)
q1.all()
class BooleanEvalTest(fixtures.TestBase, testing.AssertsCompiledSQL):
"""test standalone booleans being wrapped in an AsBoolean, as well
as true/false compilation."""
def _dialect(self, native_boolean):
d = default.DefaultDialect()
d.supports_native_boolean = native_boolean
return d
def test_one(self):
s = Session()
c = column("x", Boolean)
self.assert_compile(
s.query(c).filter(c),
"SELECT x WHERE x",
dialect=self._dialect(True),
)
def test_two(self):
s = Session()
c = column("x", Boolean)
self.assert_compile(
s.query(c).filter(c),
"SELECT x WHERE x = 1",
dialect=self._dialect(False),
)
def test_three(self):
s = Session()
c = column("x", Boolean)
self.assert_compile(
s.query(c).filter(~c),
"SELECT x WHERE x = 0",
dialect=self._dialect(False),
)
def test_four(self):
s = Session()
c = column("x", Boolean)
self.assert_compile(
s.query(c).filter(~c),
"SELECT x WHERE NOT x",
dialect=self._dialect(True),
)
def test_five(self):
s = Session()
c = column("x", Boolean)
self.assert_compile(
s.query(c).having(c),
"SELECT x HAVING x = 1",
dialect=self._dialect(False),
)
class SessionBindTest(QueryTest):
@contextlib.contextmanager
def _assert_bind_args(self, session):
get_bind = mock.Mock(side_effect=session.get_bind)
with mock.patch.object(session, "get_bind", get_bind):
yield
for call_ in get_bind.mock_calls:
is_(call_[1][0], inspect(self.classes.User))
is_not_(call_[2]["clause"], None)
def test_single_entity_q(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).all()
def test_sql_expr_entity_q(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User.id).all()
def test_count(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).count()
def test_aggregate_fn(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(func.max(User.name)).all()
def test_bulk_update_no_sync(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).update(
{"name": "foob"}, synchronize_session=False
)
def test_bulk_delete_no_sync(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).delete(
synchronize_session=False
)
def test_bulk_update_fetch_sync(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).update(
{"name": "foob"}, synchronize_session="fetch"
)
def test_bulk_delete_fetch_sync(self):
User = self.classes.User
session = Session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).delete(
synchronize_session="fetch"
)
def test_column_property(self):
User = self.classes.User
mapper = inspect(User)
mapper.add_property(
"score",
column_property(func.coalesce(self.tables.users.c.name, None)),
)
session = Session()
with self._assert_bind_args(session):
session.query(func.max(User.score)).scalar()
@testing.requires.nested_aggregates
def test_column_property_select(self):
User = self.classes.User
Address = self.classes.Address
mapper = inspect(User)
mapper.add_property(
"score",
column_property(
select([func.sum(Address.id)])
.where(Address.user_id == User.id)
.scalar_subquery()
),
)
session = Session()
with self._assert_bind_args(session):
session.query(func.max(User.score)).scalar()
class QueryClsTest(QueryTest):
def _fn_fixture(self):
def query(*arg, **kw):
return Query(*arg, **kw)
return query
def _subclass_fixture(self):
class MyQuery(Query):
pass
return MyQuery
def _callable_fixture(self):
class MyQueryFactory(object):
def __call__(self, *arg, **kw):
return Query(*arg, **kw)
return MyQueryFactory()
def _plain_fixture(self):
return Query
def _test_get(self, fixture):
User = self.classes.User
s = Session(query_cls=fixture())
assert s.query(User).get(19) is None
u = s.query(User).get(7)
u2 = s.query(User).get(7)
assert u is u2
def _test_o2m_lazyload(self, fixture):
User, Address = self.classes("User", "Address")
s = Session(query_cls=fixture())
u1 = s.query(User).filter(User.id == 7).first()
eq_(u1.addresses, [Address(id=1)])
def _test_m2o_lazyload(self, fixture):
User, Address = self.classes("User", "Address")
s = Session(query_cls=fixture())
a1 = s.query(Address).filter(Address.id == 1).first()
eq_(a1.user, User(id=7))
def _test_expr(self, fixture):
User, Address = self.classes("User", "Address")
s = Session(query_cls=fixture())
q = s.query(func.max(User.id).label("max"))
eq_(q.scalar(), 10)
def _test_expr_undocumented_query_constructor(self, fixture):
# see #4269. not documented but already out there.
User, Address = self.classes("User", "Address")
s = Session(query_cls=fixture())
q = Query(func.max(User.id).label("max")).with_session(s)
eq_(q.scalar(), 10)
def test_plain_get(self):
self._test_get(self._plain_fixture)
def test_callable_get(self):
self._test_get(self._callable_fixture)
def test_subclass_get(self):
self._test_get(self._subclass_fixture)
def test_fn_get(self):
self._test_get(self._fn_fixture)
def test_plain_expr(self):
self._test_expr(self._plain_fixture)
def test_callable_expr(self):
self._test_expr(self._callable_fixture)
def test_subclass_expr(self):
self._test_expr(self._subclass_fixture)
def test_fn_expr(self):
self._test_expr(self._fn_fixture)
def test_plain_expr_undocumented_query_constructor(self):
self._test_expr_undocumented_query_constructor(self._plain_fixture)
def test_callable_expr_undocumented_query_constructor(self):
self._test_expr_undocumented_query_constructor(self._callable_fixture)
def test_subclass_expr_undocumented_query_constructor(self):
self._test_expr_undocumented_query_constructor(self._subclass_fixture)
def test_fn_expr_undocumented_query_constructor(self):
self._test_expr_undocumented_query_constructor(self._fn_fixture)
def test_callable_o2m_lazyload(self):
self._test_o2m_lazyload(self._callable_fixture)
def test_subclass_o2m_lazyload(self):
self._test_o2m_lazyload(self._subclass_fixture)
def test_fn_o2m_lazyload(self):
self._test_o2m_lazyload(self._fn_fixture)
def test_callable_m2o_lazyload(self):
self._test_m2o_lazyload(self._callable_fixture)
def test_subclass_m2o_lazyload(self):
self._test_m2o_lazyload(self._subclass_fixture)
def test_fn_m2o_lazyload(self):
self._test_m2o_lazyload(self._fn_fixture)
|
wujuguang/sqlalchemy
|
test/orm/test_query.py
|
Python
|
mit
| 183,302 | 0.000005 |
# -*- coding: utf-8 -*-
"""
MiniTwit
~~~~~~~~
A microblogging application written with Flask and sqlite3.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import time
from sqlite3 import dbapi2 as sqlite3
from hashlib import md5
from datetime import datetime
from contextlib import closing
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash
from werkzeug import check_password_hash, generate_password_hash
# configuration
DATABASE = '/tmp/minitwit.db'
PER_PAGE = 30
DEBUG = True
SECRET_KEY = 'development key'
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('MINITWIT_SETTINGS', silent=True)
def connect_db():
"""Returns a new connection to the database."""
return sqlite3.connect(app.config['DATABASE'])
def init_db():
"""Creates the database tables."""
with closing(connect_db()) as db:
with app.open_resource('schema.sql') as f:
db.cursor().executescript(f.read())
db.commit()
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = g.db.execute(query, args)
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = g.db.execute('select user_id from user where username = ?',
[username]).fetchone()
return rv[0] if rv else None
def format_datetime(timestamp):
"""Format a timestamp for display."""
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d @ %H:%M')
def gravatar_url(email, size=80):
"""Return the gravatar image for the given email address."""
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(email.strip().lower().encode('utf-8')).hexdigest(), size)
@app.before_request
def before_request():
"""Make sure we are connected to the database each request and look
up the current user so that we know he's there.
"""
g.db = connect_db()
g.user = None
if 'user_id' in session:
g.user = query_db('select * from user where user_id = ?',
[session['user_id']], one=True)
@app.teardown_request
def teardown_request(exception):
"""Closes the database again at the end of the request."""
if hasattr(g, 'db'):
g.db.close()
@app.route('/')
def timeline():
"""Shows a users timeline or if no user is logged in it will
redirect to the public timeline. This timeline shows the user's
messages as well as all the messages of followed users.
"""
if not g.user:
return redirect(url_for('public_timeline'))
return render_template('timeline.html', messages=query_db('''
select message.*, user.* from message, user
where message.author_id = user.user_id and (
user.user_id = ? or
user.user_id in (select whom_id from follower
where who_id = ?))
order by message.pub_date desc limit ?''',
[session['user_id'], session['user_id'], PER_PAGE]))
@app.route('/public')
def public_timeline():
"""Displays the latest messages of all users."""
return render_template('timeline.html', messages=query_db('''
select message.*, user.* from message, user
where message.author_id = user.user_id
order by message.pub_date desc limit ?''', [PER_PAGE]))
@app.route('/<username>')
def user_timeline(username):
"""Display's a users tweets."""
profile_user = query_db('select * from user where username = ?',
[username], one=True)
if profile_user is None:
abort(404)
followed = False
if g.user:
followed = query_db('''select 1 from follower where
follower.who_id = ? and follower.whom_id = ?''',
[session['user_id'], profile_user['user_id']],
one=True) is not None
return render_template('timeline.html', messages=query_db('''
select message.*, user.* from message, user where
user.user_id = message.author_id and user.user_id = ?
order by message.pub_date desc limit ?''',
[profile_user['user_id'], PER_PAGE]), followed=followed,
profile_user=profile_user)
@app.route('/<username>/follow')
def follow_user(username):
"""Adds the current user as follower of the given user."""
if not g.user:
abort(401)
whom_id = get_user_id(username)
if whom_id is None:
abort(404)
g.db.execute('insert into follower (who_id, whom_id) values (?, ?)',
[session['user_id'], whom_id])
g.db.commit()
flash('You are now following "%s"' % username)
return redirect(url_for('user_timeline', username=username))
@app.route('/<username>/unfollow')
def unfollow_user(username):
"""Removes the current user as follower of the given user."""
if not g.user:
abort(401)
whom_id = get_user_id(username)
if whom_id is None:
abort(404)
g.db.execute('delete from follower where who_id=? and whom_id=?',
[session['user_id'], whom_id])
g.db.commit()
flash('You are no longer following "%s"' % username)
return redirect(url_for('user_timeline', username=username))
@app.route('/add_message', methods=['POST'])
def add_message():
"""Registers a new message for the user."""
if 'user_id' not in session:
abort(401)
if request.form['text']:
g.db.execute('''insert into message (author_id, text, pub_date)
values (?, ?, ?)''', (session['user_id'], request.form['text'],
int(time.time())))
g.db.commit()
flash('Your message was recorded')
return redirect(url_for('timeline'))
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Logs the user in."""
if g.user:
return redirect(url_for('timeline'))
error = None
if request.method == 'POST':
user = query_db('''select * from user where
username = ?''', [request.form['username']], one=True)
if user is None:
error = 'Invalid username'
elif not check_password_hash(user['pw_hash'],
request.form['password']):
error = 'Invalid password'
else:
flash('You were logged in')
session['user_id'] = user['user_id']
return redirect(url_for('timeline'))
return render_template('login.html', error=error)
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Registers the user."""
if g.user:
return redirect(url_for('timeline'))
error = None
if request.method == 'POST':
if not request.form['username']:
error = 'You have to enter a username'
elif not request.form['email'] or \
'@' not in request.form['email']:
error = 'You have to enter a valid email address'
elif not request.form['password']:
error = 'You have to enter a password'
elif request.form['password'] != request.form['password2']:
error = 'The two passwords do not match'
elif get_user_id(request.form['username']) is not None:
error = 'The username is already taken'
else:
g.db.execute('''insert into user (
username, email, pw_hash) values (?, ?, ?)''',
[request.form['username'], request.form['email'],
generate_password_hash(request.form['password'])])
g.db.commit()
flash('You were successfully registered and can login now')
return redirect(url_for('login'))
return render_template('register.html', error=error)
@app.route('/logout')
def logout():
"""Logs the user out."""
flash('You were logged out')
session.pop('user_id', None)
return redirect(url_for('public_timeline'))
# add some filters to jinja
app.jinja_env.filters['datetimeformat'] = format_datetime
app.jinja_env.filters['gravatar'] = gravatar_url
if __name__ == '__main__':
app.run()
|
HackingHabits/PersonalPasswordManager
|
packages/Flask/examples/minitwit/minitwit.py
|
Python
|
mit
| 8,424 | 0.00095 |
import string
import random
import json
from collections import defaultdict
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from catmaid.fields import Double3D
from catmaid.models import Log, NeuronSearch, CELL_BODY_CHOICES, \
SORT_ORDERS_DICT, Relation, Class, ClassInstance, \
ClassInstanceClassInstance
def _create_relation(user, project_id, relation_id, instance_a_id, instance_b_id):
relation = ClassInstanceClassInstance()
relation.user = user
relation.project_id = project_id
relation.relation_id = relation_id
relation.class_instance_a_id = instance_a_id
relation.class_instance_b_id = instance_b_id
relation.save()
return relation
def insert_into_log(project_id, user_id, op_type, location=None, freetext=None):
""" Inserts a new entry into the log table. If the location parameter is
passed, it is expected to be an iteratable (list, tuple).
"""
# valid operation types
operation_type_array = [
"rename_root",
"create_neuron",
"rename_neuron",
"remove_neuron",
"move_neuron",
"create_group",
"rename_group",
"remove_group",
"move_group",
"create_skeleton",
"rename_skeleton",
"remove_skeleton",
"move_skeleton",
"split_skeleton",
"join_skeleton",
"reroot_skeleton",
"change_confidence"
]
if not op_type in operation_type_array:
return {'error': 'Operation type {0} not valid'.format(op_type)}
new_log = Log()
new_log.user_id = user_id
new_log.project_id = project_id
new_log.operation_type = op_type
if not location is None:
new_log.location = Double3D(*location)
if not freetext is None:
new_log.freetext = freetext
new_log.save()
# $q = $db->insertIntoId('log', $data );
# echo json_encode( array ( 'error' => "Failed to insert operation $op_type for user $uid in project %pid." ) );
# Tip from: http://lincolnloop.com/blog/2008/may/10/getting-requestcontext-your-templates/
# Required because we need a RequestContext, not just a Context - the
# former looks at TEMPLATE_CONTEXT_PROCESSORS, while the latter doesn't.
def my_render_to_response(req, *args, **kwargs):
kwargs['context_instance'] = RequestContext(req)
return render_to_response(*args, **kwargs)
def json_error_response(message):
"""
When an operation fails we should return a JSON dictionary
with the key 'error' set to an error message. This is a
helper method to return such a structure:
"""
return HttpResponse(json.dumps({'error': message}),
content_type='text/json')
def order_neurons(neurons, order_by=None):
column, reverse = 'name', False
if order_by and (order_by in SORT_ORDERS_DICT):
column, reverse, _ = SORT_ORDERS_DICT[order_by]
if column == 'name':
neurons.sort(key=lambda x: x.name)
elif column == 'gal4':
neurons.sort(key=lambda x: x.cached_sorted_lines_str)
elif column == 'cell_body':
neurons.sort(key=lambda x: x.cached_cell_body)
else:
raise Exception("Unknown column (%s) in order_neurons" % (column,))
if reverse:
neurons.reverse()
return neurons
# Both index and visual_index take a request and kwargs and then
# return a list of neurons and a NeuronSearch form:
def get_form_and_neurons(request, project_id, kwargs):
# If we've been passed parameters in a REST-style GET request,
# create a form from them. Otherwise, if it's a POST request,
# create the form from the POST parameters. Otherwise, it's a
# plain request, so create the default search form.
rest_keys = ('search', 'cell_body_location', 'order_by')
if any((x in kwargs) for x in rest_keys):
kw_search = kwargs.get('search', None) or ""
kw_cell_body_choice = kwargs.get('cell_body_location', None) or "a"
kw_order_by = kwargs.get('order_by', None) or 'name'
search_form = NeuronSearch({'search': kw_search,
'cell_body_location': kw_cell_body_choice,
'order_by': kw_order_by})
elif request.method == 'POST':
search_form = NeuronSearch(request.POST)
else:
search_form = NeuronSearch({'search': '',
'cell_body_location': 'a',
'order_by': 'name'})
if search_form.is_valid():
search = search_form.cleaned_data['search']
cell_body_location = search_form.cleaned_data['cell_body_location']
order_by = search_form.cleaned_data['order_by']
else:
search = ''
cell_body_location = 'a'
order_by = 'name'
cell_body_choices_dict = dict(CELL_BODY_CHOICES)
all_neurons = ClassInstance.objects.filter(
project__id=project_id,
class_column__class_name='neuron',
name__icontains=search).exclude(name='orphaned pre').exclude(name='orphaned post')
if cell_body_location != 'a':
location = cell_body_choices_dict[cell_body_location]
all_neurons = all_neurons.filter(
project__id=project_id,
cici_via_a__relation__relation_name='has_cell_body',
cici_via_a__class_instance_b__name=location)
cici_qs = ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='has_cell_body',
class_instance_a__class_column__class_name='neuron',
class_instance_b__class_column__class_name='cell_body_location')
neuron_id_to_cell_body_location = dict(
(x.class_instance_a.id, x.class_instance_b.name) for x in cici_qs)
neuron_id_to_driver_lines = defaultdict(list)
for cici in ClassInstanceClassInstance.objects.filter(
project__id=project_id,
relation__relation_name='expresses_in',
class_instance_a__class_column__class_name='driver_line',
class_instance_b__class_column__class_name='neuron'):
neuron_id_to_driver_lines[cici.class_instance_b.id].append(cici.class_instance_a)
all_neurons = list(all_neurons)
for n in all_neurons:
n.cached_sorted_lines = sorted(
neuron_id_to_driver_lines[n.id], key=lambda x: x.name)
n.cached_sorted_lines_str = ", ".join(x.name for x in n.cached_sorted_lines)
n.cached_cell_body = neuron_id_to_cell_body_location.get(n.id, 'Unknown')
all_neurons = order_neurons(all_neurons, order_by)
return (all_neurons, search_form)
# TODO After all PHP functions have been replaced and all occurrence of
# this odd behavior have been found, change callers to not depend on this
# legacy functionality.
def makeJSON_legacy_list(objects):
'''
The PHP function makeJSON, when operating on a list of rows as
results, will output a JSON list of key-values, with keys being
integers from 0 and upwards. We return a dict with the same
structure so that it looks the same when used with json.dumps.
'''
i = 0
res = {}
for o in objects:
res[i] = o
i += 1
return res
def cursor_fetch_dictionary(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def get_relation_to_id_map(project_id):
return {rname: ID for rname, ID in Relation.objects.filter(project=project_id).values_list("relation_name", "id")}
def get_class_to_id_map(project_id):
return {cname: ID for cname, ID in Class.objects.filter(project=project_id).values_list("class_name", "id")}
def urljoin(a, b):
""" Joins to URL parts a and b while making sure this
exactly one slash inbetween.
"""
if a[-1] != '/':
a = a + '/'
if b[0] == '/':
b = b[1:]
return a + b
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
""" Creates a random string of the specified length.
"""
return ''.join(random.choice(chars) for x in range(size))
|
htem/CATMAID
|
django/applications/catmaid/control/common.py
|
Python
|
agpl-3.0
| 8,243 | 0.002669 |
""" Buildbot inplace config
(C) Copyright 2015 HicknHack Software GmbH
The original code can be found at:
https://github.com/hicknhack-software/buildbot-inplace-config
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from buildbot.config import BuilderConfig
from twisted.python import log
from buildbot.process.factory import BuildFactory
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.schedulers.triggerable import Triggerable
from inplace_build import InplaceBuildFactory
from project import Project
from setup_build import SetupBuildFactory
from worker import Worker
from pprint import pformat
class NamedList(list):
def named_set(self, elem):
self.named_del(elem.name)
self.append(elem)
def named_del(self, name):
for elem in self:
if elem.name == name:
self.remove(elem)
def named_get(self, name):
for elem in self:
if elem.name == name:
return elem
def clear(self):
del self[:]
@property
def names(self):
return map(lambda elem: elem.name, self)
class Wrapper(dict):
""" Wrapper for the configuration dictionary """
def __init__(self, **kwargs):
super(Wrapper, self).__init__(**kwargs)
self._inplace_workers = NamedList()
self._projects = NamedList()
@property
def builders(self):
return self.named_list('builders')
@property
def schedulers(self):
return self.named_list('schedulers')
@property
def change_source(self):
return self.named_list('change_source')
@property
def workers(self):
return self.named_list('workers')
@property
def inplace_workers(self):
return self._inplace_workers
@property
def projects(self):
return self._projects
def named_list(self, key):
if key not in self:
self[key] = NamedList()
return self[key]
def load_workers(self, path):
Worker.load(path, self.inplace_workers, self.workers)
def load_projects(self, path):
Project.load(path, self.projects)
DUMMY_NAME = "Dummy"
DUMMY_TRIGGER = "Trigger_Dummy"
def setup_inplace(self):
self.builders.clear()
self.schedulers.clear()
builder_name = self.DUMMY_NAME
trigger_name = self.DUMMY_TRIGGER
worker_names = self.inplace_workers.names
self.builders.named_set(BuilderConfig(name=builder_name, workernames=worker_names, factory=BuildFactory()))
self.schedulers.named_set(ForceScheduler(name=trigger_name, builderNames=[builder_name]))
for project in self.projects:
builder_name = "%s_Builder" % project.name
trigger_name = "Force_%s_Build" % project.name
builder_factory = InplaceBuildFactory(self, project)
self.builders.named_set(BuilderConfig(name=builder_name, workernames=worker_names, factory=builder_factory))
self.schedulers.named_set(ForceScheduler(name=trigger_name, builderNames=[builder_name]))
def project_profile_worker_names(self, profile):
return [worker.name
for worker in self.inplace_workers
if set(profile.setups).issubset(set(worker.setups))
and profile.platform in worker.platforms]
def setup_project_inplace(self, project):
self.setup_inplace()
for worker in self.inplace_workers:
log.msg("Got worker '%s' for platform %s and setups %s" %
(worker.name, pformat(worker.platforms), pformat(worker.setups)),
system='Inplace Config')
for profile in project.inplace.profiles:
worker_names = self.project_profile_worker_names(profile)
if not worker_names:
log.msg("Failed to find worker for platform '%s' and setups '%s' (project '%s')" %
(profile.platform, pformat(profile.setups), project.name),
system='Inplace Config')
continue # profile not executable
builder_name = "_".join([project.name, profile.platform, profile.name])
trigger_name = _project_profile_trigger_name(project.name, profile)
build_factory = SetupBuildFactory(self, project, profile)
self.builders.named_set(BuilderConfig(name=builder_name, workernames=worker_names, factory=build_factory))
self.schedulers.named_set(Triggerable(name=trigger_name, builderNames=[builder_name]))
def project_trigger_names(self, project):
return [
_project_profile_trigger_name(project.name, profile)
for profile in project.inplace.profiles
if self.project_profile_worker_names(profile)]
def _project_profile_trigger_name(project_name, profile):
return "_".join([project_name, profile.platform, profile.name, "Trigger"])
|
hicknhack-software/buildbot-inplace-config
|
buildbot_inplace/config.py
|
Python
|
apache-2.0
| 5,400 | 0.002222 |
class constant():
folder_name = 'results'
MAX_HELP_POSITION = 27
CURRENT_VERSION = '0.9.1'
output = None
file_logger = None
# jitsi options
jitsi_masterpass = None
# mozilla options
manually = None
path = None
bruteforce = None
specific_path = None
mozilla_software = ''
# ie options
ie_historic = None
# total password found
nbPasswordFound = 0
passwordFound = []
|
theoneandonly-vector/LaZagne
|
Windows/src/LaZagne/config/constant.py
|
Python
|
lgpl-3.0
| 416 | 0.057692 |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("SVR_rbf" , "freidman1" , "db2")
|
antoinecarme/sklearn2sql_heroku
|
tests/regression/freidman1/ws_freidman1_SVR_rbf_db2_code_gen.py
|
Python
|
bsd-3-clause
| 121 | 0.016529 |
# -*- coding: utf-8 -*-
from helper.resource import YuzukiResource
|
Perlmint/Yuzuki
|
resource/util.py
|
Python
|
mit
| 66 | 0.015152 |
from __future__ import absolute_import
import os
import re
import numpy as np
import tensorflow as tf
stop_words=set(["a","an","the"])
def load_candidates(data_dir, task_id):
assert task_id > 0 and task_id < 6
candidates=[]
candidates_f=None
candid_dic={}
#candidates_f='candidates.txt'
candidates_f='candidates' + str(task_id) + '.txt'
with open(os.path.join(data_dir,candidates_f)) as f:
for i,line in enumerate(f):
candid_dic[line.strip().split(' ',1)[1]] = i
line=tokenize(line.strip())[1:]
candidates.append(line)
# return candidates,dict((' '.join(cand),i) for i,cand in enumerate(candidates))
return candidates,candid_dic
def load_test_candidates(data_dir, task_id, test_id):
assert task_id > 0 and task_id < 6
candidates=[]
candidates_f=None
candid_dic={}
'''
if test_id == 1 or test_id == 2:
candidates_f='candidates.txt'
else:
candidates_f='candidates-ext.txt'
'''
if test_id == 1 or test_id == 2:
candidates_f='candidates' + str(task_id) + '.txt'
else:
candidates_f='candidates' + str(task_id) + '_tst'+ str(test_id) + '.txt'
with open(os.path.join(data_dir,candidates_f)) as f:
for i,line in enumerate(f):
candid_dic[line.strip().split(' ',1)[1]] = i
line=tokenize(line.strip())[1:]
candidates.append(line)
# return candidates,dict((' '.join(cand),i) for i,cand in enumerate(candidates))
return candidates,candid_dic
def load_dialog_task(data_dir, task_id, candid_dic, isOOV):
'''Load the nth task. There are 20 tasks in total.
Returns a tuple containing the training and testing data for the task.
'''
assert task_id > 0 and task_id < 6
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = '-dialog-task{}'.format(task_id)
train_file = [f for f in files if s in f and 'train' in f][0]
test_file = [f for f in files if s in f and 'dev' in f][0]
val_file = [f for f in files if s in f and 'dev' in f][0]
train_data = get_dialogs(train_file,candid_dic)
test_data = get_dialogs(test_file,candid_dic)
val_data = get_dialogs(val_file,candid_dic)
return train_data, test_data, val_data
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple']
'''
sent=sent.lower()
if sent=='<silence>':
return [sent]
result=[x.strip() for x in re.split('(\W+)?', sent) if x.strip() and x.strip() not in stop_words]
if not result:
result=['<silence>']
if result[-1]=='.' or result[-1]=='?' or result[-1]=='!':
result=result[:-1]
return result
def load_dialog_test_data(data_dir, task_id, test_id):
assert task_id > 0 and task_id < 6
files = os.listdir(data_dir)
files = [os.path.join(data_dir, f) for f in files]
s = '-dialog-task{}'.format(task_id)
t = 'tst_' + str(test_id)
test_file = [f for f in files if s in f and t in f][0]
test_data = get_test_dialogs(test_file)
return test_data
def get_test_dialogs(f):
'''Given a file name, read the file, retrieve the dialogs, and then convert the sentences into a single dialog.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
with open(f) as f:
return parse_test_dialogs(f.readlines())
def parse_test_dialogs(lines):
'''
Parse dialogs provided in the babi tasks format
'''
data=[]
context=[]
u=None
r=None
a=-1
dialog_id=0
for line in lines:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
nid = int(nid)
if '\t' in line:
u, r = line.split('\t')
u = tokenize(u)
r = tokenize(r)
# temporal encoding, and utterance/response encoding
# data.append((context[:],u[:],candid_dic[' '.join(r)]))
# data.append((context[:],u[:],a,dialog_id))
u.append('$u')
u.append('#'+str(nid))
r.append('$r')
r.append('#'+str(nid))
context.append(u)
context.append(r)
else:
r=tokenize(line)
r.append('$r')
r.append('#'+str(nid))
context.append(r)
else:
data.append((context[:-2],u[:],a,dialog_id))
# clear context
u=None
r=None
a=None
context=[]
dialog_id=dialog_id+1
return data
def parse_dialogs_per_response(lines,candid_dic):
'''
Parse dialogs provided in the babi tasks format
'''
data=[]
context=[]
u=None
r=None
dialog_id=0
for line in lines:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
nid = int(nid)
if '\t' in line:
u, r = line.split('\t')
a = candid_dic[r]
u = tokenize(u)
r = tokenize(r)
# temporal encoding, and utterance/response encoding
# data.append((context[:],u[:],candid_dic[' '.join(r)]))
data.append((context[:],u[:],a,dialog_id))
u.append('$u')
u.append('#'+str(nid))
r.append('$r')
r.append('#'+str(nid))
context.append(u)
context.append(r)
else:
r=tokenize(line)
r.append('$r')
r.append('#'+str(nid))
context.append(r)
else:
dialog_id=dialog_id+1
# clear context
context=[]
return data
def get_dialogs(f,candid_dic):
'''Given a file name, read the file, retrieve the dialogs, and then convert the sentences into a single dialog.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
with open(f) as f:
return parse_dialogs_per_response(f.readlines(),candid_dic)
def vectorize_candidates_sparse(candidates,word_idx):
shape=(len(candidates),len(word_idx)+1)
indices=[]
values=[]
for i,candidate in enumerate(candidates):
for w in candidate:
indices.append([i,word_idx[w]])
values.append(1.0)
return tf.SparseTensor(indices,values,shape)
def vectorize_candidates(candidates,word_idx,sentence_size):
shape=(len(candidates),sentence_size)
C=[]
for i,candidate in enumerate(candidates):
lc=max(0,sentence_size-len(candidate))
C.append([word_idx[w] if w in word_idx else 0 for w in candidate] + [0] * lc)
return tf.constant(C,shape=shape)
def vectorize_data(data, word_idx, sentence_size, batch_size, candidates_size, max_memory_size, candidates, match_feature_flag):
"""
Vectorize stories and queries.
If a sentence length < sentence_size, the sentence will be padded with 0's.
If a story length < memory_size, the story will be padded with empty memories.
Empty memories are 1-D arrays of length sentence_size filled with 0's.
The answer array is returned as a one-hot encoding.
"""
atmosphere_restriction_set={'casual','romantic','business','glutenfree','vegan','vegetarian'}
S = []
Q = []
A = []
C = []
data.sort(key=lambda x:len(x[0]),reverse=True)
for i, (story, query, answer, start) in enumerate(data):
if i%batch_size==0:
memory_size=max(1,min(max_memory_size,len(story)))
ss = []
story_query_vocab = set()
for i, sentence in enumerate(story, 1):
ls = max(0, sentence_size - len(sentence))
ss.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
for w in sentence:
story_query_vocab.add(w)
# take only the most recent sentences that fit in memory
ss = ss[::-1][:memory_size][::-1]
# pad to memory_size
lm = max(0, memory_size - len(ss))
for _ in range(lm):
ss.append([0] * sentence_size)
lq = max(0, sentence_size - len(query))
q = [word_idx[w] if w in word_idx else 0 for w in query] + [0] * lq
for w in query:
story_query_vocab.add(w)
story_query_vocab = story_query_vocab.intersection(atmosphere_restriction_set)
c = []
for j,candidate in enumerate(candidates):
candidate_vocab = set()
for w in candidate:
candidate_vocab.add(w)
candidate_vocab = candidate_vocab.intersection(atmosphere_restriction_set)
extra_feature_len=0
match_feature=[]
if candidate_vocab <= story_query_vocab and len(candidate_vocab) > 0 and match_feature_flag:
extra_feature_len=1
match_feature.append(word_idx['MATCH_ATMOSPHERE_RESTRICTION'])
lc=max(0,sentence_size-len(candidate)-extra_feature_len)
c.append([word_idx[w] if w in word_idx else 0 for w in candidate] + [0] * lc + match_feature)
S.append(np.array(ss))
Q.append(np.array(q))
A.append(np.array(answer))
C.append(np.array(c))
return S, Q, A, C
def vectorize_data_with_surface_form(data, word_idx, sentence_size, batch_size, candidates_size, max_memory_size, candidates, match_feature_flag):
"""
Vectorize stories and queries.
If a sentence length < sentence_size, the sentence will be padded with 0's.
If a story length < memory_size, the story will be padded with empty memories.
Empty memories are 1-D arrays of length sentence_size filled with 0's.
The answer array is returned as a one-hot encoding.
"""
atmosphere_restriction_set={'casual','romantic','business','glutenfree','vegan','vegetarian'}
S = []
Q = []
A = []
C = []
S_in_readable_form = []
Q_in_readable_form = []
dialogIDs = []
last_db_results = []
data.sort(key=lambda x:len(x[0]),reverse=True)
for i, (story, query, answer, dialog_id) in enumerate(data):
if i%batch_size==0:
memory_size=max(1,min(max_memory_size,len(story)))
ss = []
story_string = []
story_query_vocab = set()
dbentries =set([])
dbEntriesRead=False
last_db_result=""
for i, sentence in enumerate(story, 1):
ls = max(0, sentence_size - len(sentence))
ss.append([word_idx[w] if w in word_idx else 0 for w in sentence] + [0] * ls)
for w in sentence:
story_query_vocab.add(w)
story_element = ' '.join([str(x) for x in sentence[:-2]])
# if the story element is a database response/result
if 'r_' in story_element and 'api_call' not in story_element:
dbEntriesRead = True
if 'r_rating' in story_element:
dbentries.add( sentence[0] + '(' + sentence[2] + ')')
else:
if dbEntriesRead:
#story_string.append('$db : ' + ' '.join([str(x) for x in dbentries]))
last_db_result = '$db : ' + ' '.join([str(x) for x in dbentries])
dbentries =set([])
dbEntriesRead = False
#story_string.append(' '.join([str(x) for x in sentence[-2:]]) + ' : ' + story_element)
story_string.append(' '.join([str(x) for x in sentence[-2:]]) + ' : ' + story_element)
# take only the most recent sentences that fit in memory
ss = ss[::-1][:memory_size][::-1]
# pad to memory_size
lm = max(0, memory_size - len(ss))
for _ in range(lm):
ss.append([0] * sentence_size)
lq = max(0, sentence_size - len(query))
q = [word_idx[w] if w in word_idx else 0 for w in query] + [0] * lq
for w in query:
story_query_vocab.add(w)
story_query_vocab = story_query_vocab.intersection(atmosphere_restriction_set)
c = []
for j,candidate in enumerate(candidates):
candidate_vocab = set()
for w in candidate:
candidate_vocab.add(w)
candidate_vocab = candidate_vocab.intersection(atmosphere_restriction_set)
extra_feature_len=0
match_feature=[]
if candidate_vocab == story_query_vocab and len(candidate_vocab) > 0 and match_feature_flag:
extra_feature_len=1
match_feature.append(word_idx['MATCH_ATMOSPHERE_RESTRICTION'])
lc=max(0,sentence_size-len(candidate)-extra_feature_len)
c.append([word_idx[w] if w in word_idx else 0 for w in candidate] + [0] * lc + match_feature)
S.append(np.array(ss))
Q.append(np.array(q))
A.append(np.array(answer))
C.append(np.array(c))
S_in_readable_form.append(story_string)
Q_in_readable_form.append(' '.join([str(x) for x in query]))
last_db_results.append(last_db_result)
dialogIDs.append(dialog_id)
return S, Q, A, C, S_in_readable_form, Q_in_readable_form, last_db_results, dialogIDs
def restaurant_reco_evluation(test_preds, testA, indx2candid):
total = 0
match = 0
for idx, val in enumerate(test_preds):
answer = indx2candid[testA[idx].item(0)]
prediction = indx2candid[val]
if "what do you think of this option:" in prediction:
total = total+1
if prediction == answer:
match=match+1
print('Restaurant Recommendation Accuracy : ' + str(match/float(total)) + " (" + str(match) + "/" + str(total) + ")")
if __name__ == '__main__':
u = tokenize('The phone number of taj_tandoori is taj_tandoori_phone')
print(u)
|
DineshRaghu/dstc6-track1
|
src/data_utils.py
|
Python
|
gpl-3.0
| 14,089 | 0.013273 |
from pathlib import Path
def source_dir():
src = Path("@CMAKE_CURRENT_SOURCE_DIR@/../..")
if src.is_dir():
return src.relative_to(Path.cwd())
# If the file was not correctly configured by cmake, look for the source
# folder, assuming the build folder is inside the source folder.
current_path = Path(__file__)
while current_path != Path("/"):
if (current_path / ".git").is_dir():
return current_path
current_path = current_path.parent
raise RuntimeError("Cannot find the source folder")
SOURCE_DIR = source_dir()
|
joakim-hove/ert
|
tests/utils.py
|
Python
|
gpl-3.0
| 582 | 0 |
"""Triton Daemon - Communication server for Oxford Triton system
The Triton fridge already has communication capacity to directly control and read both the temperatures and other elements
of the fridge (pressure sensors, valves, compressor, ...). However, the Triton logging uses binary format files that can
only be opened in their (very flaky) propriety program. This Daemon acts as an intermediary between the Triton system and
the measurement computer, allowing the measurement computer to send commands to the system while it maintains an ASCII log
of the system parameters. These logfiles can then be opened using cryoLogger. Note that the binary logs are still
kept in any case.
Run as::
python TritonDaemon.py
This Daemon is intended to be run on the Triton control computer but can actually be run from any system that has network access
to the triton control computer. The address will be requested when the script is started as well as the folder where the logs
should be saved. The defaults can be adjusted in the variables :code:`TRITONADDR` and :code:`LOGFOLDER` at the top of the script.
"""
from stlab.devices.Oxford_Triton import Oxford_Triton as Triton
from queue import Queue
from threading import Thread
import time
from stlab.devices.TritonDaemon.TritonLogger import TritonLogger as logger
import socket
from stlab.utils.MySocket import MySocket
import sys
import datetime
TRITONADDR = 'TCPIP::127.0.0.1::33576::SOCKET'
LOGFOLDER = 'C:/RemoteLogging/'
def command_handler(qin,addr='TCPIP::127.0.0.1::33576::SOCKET'):
mytriton = Triton(addr=addr)
while True:
nextcomm = qin.get()
if nextcomm == 0:
break
qout = nextcomm[0]
comm = nextcomm[1]
args = nextcomm[2]
ret = comm(mytriton, *args)
qin.task_done()
qout.put(ret)
if __name__ == '__main__':
print("StLab Temperature server for Triton. Initializing...")
'''
if len(sys.argv) >= 2:
filename = sys.argv[1]
ff = open(filename,'a')
ff.write('\n')
else:
t0 = datetime.datetime.now()
filename = 'log_' + t0.strftime('%y_%m_%d__%H_%M_%S') + '.dat'
varline = ['Time (s)'] + ['PT2 Head (K)','PT2 Plate (K)', 'Still Plate (K)','Cold Plate (K)','MC Cernox (K)','PT1 Head (K)','PT1 Plate (K)','MC Plate (K)'] + ['P%d (mbar)' % i for i in range(1,7)]
#varline = ['Time (s)'] + ['T%d (K)' % i for i in range(1,10)] + ['P%d (mbar)' % i for i in range(1,7)]
print(varline)
ff = open(filename,'w')
ff.write('#' + ', '.join(varline)+'\n')
'''
logfolder = input('Enter BF log folder location (default "{}"):\n'.format(LOGFOLDER))
if logfolder == '':
logfolder = LOGFOLDER
tritonaddr = input('Enter address of Triton instrument (default "{}"):\n'.format(TRITONADDR))
if tritonaddr == '':
tritonaddr = TRITONADDR
commandq = Queue(maxsize=0)
myhandler = Thread(target=command_handler, args=(commandq,tritonaddr))
myhandler.daemon = True
myhandler.start()
loggerthread = Thread(target=logger, args=(commandq,logfolder))
loggerthread.start()
# create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
#serversocket.bind((socket.gethostname(), 8001))
#addr = socket.gethostbyname(socket.gethostname())
addr = '0.0.0.0' #listen on all network interfaces
port = 8472
serversocket.bind((addr, port))
# become a server socket
serversocket.listen(5)
myip = socket.gethostbyname(socket.gethostname())
print("Ready. Listening on port %d and address %s" % (port,myip))
def RunCommand(sock,resultq):
ss = MySocket(sock)
word = ss.myreceive()
word = word.decode('utf_8')
commandq.put( (resultq, Triton.query, (word,)) )
xx = resultq.get()
resultq.task_done()
ss.mysend(xx.encode('utf_8'))
ss.sock.close()
return word
resultq = Queue(maxsize=0)
while True:
clientsocket = None
try:
# accept connections from outside
(clientsocket, address) = serversocket.accept()
RunCommand(clientsocket,resultq)
print("Listening on port %d and address %s" % (port,myip))
except KeyboardInterrupt:
print('Shutting down temperature server')
serversocket.close()
break
commandq.put(0)
loggerthread.join()
|
yausern/stlab
|
devices/TritonDaemon/TritonDaemon.py
|
Python
|
gpl-3.0
| 4,577 | 0.007428 |
#!/usr/bin/env python
# ESP32 efuse get/set utility
# https://github.com/themadinventor/esptool
#
# Copyright (C) 2016 Espressif Systems (Shanghai) PTE LTD
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import division, print_function
import argparse
import esptool
import io
import json
import os
import struct
import sys
import time
# Table of efuse values - (category, block, word in block, mask, write disable bit, read disable bit, type, description)
# Match values in efuse_reg.h & Efuse technical reference chapter
EFUSES = [
('WR_DIS', "efuse", 0, 0, 0x0000FFFF, 1, None, "int", "Efuse write disable mask"),
('RD_DIS', "efuse", 0, 0, 0x000F0000, 0, None, "int", "Efuse read disablemask"),
('FLASH_CRYPT_CNT', "security", 0, 0, 0x07F00000, 2, None, "bitcount", "Flash encryption mode counter"),
('MAC', "identity", 0, 1, 0xFFFFFFFF, 3, None, "mac", "Factory MAC Address"),
('XPD_SDIO_FORCE', "config", 0, 4, 1 << 16, 5, None, "flag", "Ignore MTDI pin (GPIO12) for VDD_SDIO on reset"),
('XPD_SDIO_REG', "config", 0, 4, 1 << 14, 5, None, "flag", "If XPD_SDIO_FORCE, enable VDD_SDIO reg on reset"),
('XPD_SDIO_TIEH', "config", 0, 4, 1 << 15, 5, None, "flag", "If XPD_SDIO_FORCE & XPD_SDIO_REG, 1=3.3V 0=1.8V"),
('CLK8M_FREQ', "config", 0, 4, 0xFF, None, None, "int", "8MHz clock freq override"),
('SPI_PAD_CONFIG_CLK', "config", 0, 5, 0x1F << 0, 6, None, "spipin", "Override SD_CLK pad (GPIO6/SPICLK)"),
('SPI_PAD_CONFIG_Q', "config", 0, 5, 0x1F << 5, 6, None, "spipin", "Override SD_DATA_0 pad (GPIO7/SPIQ)"),
('SPI_PAD_CONFIG_D', "config", 0, 5, 0x1F << 10, 6, None, "spipin", "Override SD_DATA_1 pad (GPIO8/SPID)"),
('SPI_PAD_CONFIG_HD', "config", 0, 3, 0x1F << 4, 6, None, "spipin", "Override SD_DATA_2 pad (GPIO9/SPIHD)"),
('SPI_PAD_CONFIG_CS0', "config", 0, 5, 0x1F << 15, 6, None, "spipin", "Override SD_CMD pad (GPIO11/SPICS0)"),
('FLASH_CRYPT_CONFIG', "security", 0, 5, 0x0F << 28, 10, 3, "int", "Flash encryption config (key tweak bits)"),
('CHIP_VER_REV1', "identity", 0, 3, 1 << 15, 3, None, "flag", "Silicon Revision 1"),
('CHIP_VER_REV2', "identity", 0, 5, 1 << 20, 6, None, "flag", "Silicon Revision 2"),
('BLK3_PART_RESERVE', "calibration", 0, 3, 1 << 14, 10, 3, "flag", "BLOCK3 partially served for ADC calibration data"),
('CHIP_VERSION', "identity", 0, 3, 0x03 << 12, 3, None, "int", "Reserved for future chip versions"),
('CHIP_PACKAGE', "identity", 0, 3, 0x07 << 9, 3, None, "int", "Chip package identifier"),
('CODING_SCHEME', "efuse", 0, 6, 0x3, 10, 3, "int", "Efuse variable block length scheme"),
('CONSOLE_DEBUG_DISABLE',"security", 0, 6, 1 << 2, 15, None, "flag", "Disable ROM BASIC interpreter fallback"),
('DISABLE_SDIO_HOST', "config", 0, 6, 1 << 3, None, None, "flag", "Disable SDIO host"),
('ABS_DONE_0', "security", 0, 6, 1 << 4, 12, None, "flag", "secure boot enabled for bootloader"),
('ABS_DONE_1', "security", 0, 6, 1 << 5, 13, None, "flag", "secure boot abstract 1 locked"),
('JTAG_DISABLE', "security", 0, 6, 1 << 6, 14, None, "flag", "Disable JTAG"),
('DISABLE_DL_ENCRYPT', "security", 0, 6, 1 << 7, 15, None, "flag", "Disable flash encryption in UART bootloader"),
('DISABLE_DL_DECRYPT', "security", 0, 6, 1 << 8, 15, None, "flag", "Disable flash decryption in UART bootloader"),
('DISABLE_DL_CACHE', "security", 0, 6, 1 << 9, 15, None, "flag", "Disable flash cache in UART bootloader"),
('KEY_STATUS', "efuse", 0, 6, 1 << 10, 10, 3, "flag", "Usage of efuse block 3 (reserved)"),
('ADC_VREF', "calibration", 0, 4,0x1F << 8,0, None, "vref", "Voltage reference calibration"),
('BLK1', "security", 1, 0, 0xFFFFFFFF, 7, 0, "keyblock", "Flash encryption key"),
('BLK2', "security", 2, 0, 0xFFFFFFFF, 8, 1, "keyblock", "Secure boot key"),
('BLK3', "security", 3, 0, 0xFFFFFFFF, 9, 2, "keyblock", "Variable Block 3"),
]
# if BLK3_PART_RESERVE is set, these efuse fields are in BLK3:
BLK3_PART_EFUSES = [
('ADC1_TP_LOW', "calibration", 3, 3, 0x7F << 0, 9, 2, "adc_tp", "ADC1 150mV reading"),
('ADC1_TP_HIGH', "calibration", 3, 3, 0x1FF << 7, 9, 2, "adc_tp", "ADC1 850mV reading"),
('ADC2_TP_LOW', "calibration", 3, 3, 0x7F << 16, 9, 2, "adc_tp", "ADC2 150mV reading"),
('ADC2_TP_HIGH', "calibration", 3, 3, 0x1FF << 23, 9, 2, "adc_tp", "ADC2 850mV reading"),
]
# Offsets and lengths of each of the 4 efuse blocks in register space
#
# These offsets/lens are for esptool.read_efuse(X) which takes
# a word offset (into registers) not a byte offset.
EFUSE_BLOCK_OFFS = [0, 14, 22, 30]
EFUSE_BLOCK_LEN = [7, 8, 8, 8]
# EFUSE registers & command/conf values
EFUSE_REG_CONF = 0x3FF5A0FC
EFUSE_CONF_WRITE = 0x5A5A
EFUSE_CONF_READ = 0x5AA5
EFUSE_REG_CMD = 0x3FF5A104
EFUSE_CMD_WRITE = 0x2
EFUSE_CMD_READ = 0x1
# address of first word of write registers for each efuse
EFUSE_REG_WRITE = [0x3FF5A01C, 0x3FF5A098, 0x3FF5A0B8, 0x3FF5A0D8]
# 3/4 Coding scheme warnings registers
EFUSE_REG_DEC_STATUS = 0x3FF5A11C
EFUSE_REG_DEC_STATUS_MASK = 0xFFF
# Efuse clock control
EFUSE_DAC_CONF_REG = 0x3FF5A118
EFUSE_CLK_REG = 0x3FF5A0F8
EFUSE_DAC_CLK_DIV_MASK = 0xFF
EFUSE_CLK_SEL0_MASK = 0x00FF
EFUSE_CLK_SEL1_MASK = 0xFF00
EFUSE_CLK_SETTINGS = {
# APB freq: clk_sel0, clk_sel1, dac_clk_div
# Taken from TRM chapter "eFuse Controller": Timing Configuration
26: (250, 255, 52),
40: (160, 255, 80),
80: (80, 128, 100), # this is here for completeness only as esptool never sets an 80MHz APB clock
}
EFUSE_BURN_TIMEOUT = 0.250 # seconds
# Coding Scheme values
CODING_SCHEME_NONE = 0
CODING_SCHEME_34 = 1
def confirm(action, args):
print("%s%sThis is an irreversible operation." % (action, "" if action.endswith("\n") else ". "))
if not args.do_not_confirm:
print("Type 'BURN' (all capitals) to continue.")
sys.stdout.flush() # required for Pythons which disable line buffering, ie mingw in mintty
try:
yes = raw_input() # raw_input renamed to input in Python 3
except NameError:
yes = input()
if yes != "BURN":
print("Aborting.")
sys.exit(0)
def efuse_write_reg_addr(block, word):
"""
Return the physical address of the efuse write data register
block X word X.
"""
return EFUSE_REG_WRITE[block] + (4 * word)
class EspEfuses(object):
"""
Wrapper object to manage the efuse fields in a connected ESP bootloader
"""
def __init__(self, esp):
self._esp = esp
self._efuses = [EfuseField.from_tuple(self, efuse) for efuse in EFUSES]
if self["BLK3_PART_RESERVE"].get():
# add these BLK3 efuses, if the BLK3_PART_RESERVE flag is set...
self._efuses += [EfuseField.from_tuple(self, efuse) for efuse in BLK3_PART_EFUSES]
self.coding_scheme = self["CODING_SCHEME"].get()
def __getitem__(self, efuse_name):
""" Return the efuse field with the given name """
for e in self._efuses:
if efuse_name == e.register_name:
return e
raise KeyError
def __iter__(self):
return self._efuses.__iter__()
def write_efuses(self):
""" Write the values in the efuse write registers to
the efuse hardware, then refresh the efuse read registers.
"""
# Configure clock
apb_freq = self._esp.get_crystal_freq()
clk_sel0, clk_sel1, dac_clk_div = EFUSE_CLK_SETTINGS[apb_freq]
self.update_reg(EFUSE_DAC_CONF_REG, EFUSE_DAC_CLK_DIV_MASK, dac_clk_div)
self.update_reg(EFUSE_CLK_REG, EFUSE_CLK_SEL0_MASK, clk_sel0)
self.update_reg(EFUSE_CLK_REG, EFUSE_CLK_SEL1_MASK, clk_sel1)
self.write_reg(EFUSE_REG_CONF, EFUSE_CONF_WRITE)
self.write_reg(EFUSE_REG_CMD, EFUSE_CMD_WRITE)
def wait_idle():
deadline = time.time() + EFUSE_BURN_TIMEOUT
while time.time() < deadline:
if self._esp.read_reg(EFUSE_REG_CMD) == 0:
return
raise esptool.FatalError("Timed out waiting for Efuse controller command to complete")
wait_idle()
self.write_reg(EFUSE_REG_CONF, EFUSE_CONF_READ)
self.write_reg(EFUSE_REG_CMD, EFUSE_CMD_READ)
wait_idle()
def read_efuse(self, addr):
return self._esp.read_efuse(addr)
def read_reg(self, addr):
return self._esp.read_reg(addr)
def write_reg(self, addr, value):
return self._esp.write_reg(addr, value)
def update_reg(self, addr, mask, new_val):
return self._esp.update_reg(addr, mask, new_val)
def get_coding_scheme_warnings(self):
""" Check if the coding scheme has detected any errors.
Meaningless for default coding scheme (0)
"""
return self.read_reg(EFUSE_REG_DEC_STATUS) & EFUSE_REG_DEC_STATUS_MASK
def get_block_len(self):
""" Return the length of BLK1, BLK2, BLK3 in bytes """
return 24 if self.coding_scheme == CODING_SCHEME_34 else 32
class EfuseField(object):
@staticmethod
def from_tuple(parent, efuse_tuple):
category = efuse_tuple[7]
return {
"mac": EfuseMacField,
"keyblock": EfuseKeyblockField,
"spipin": EfuseSpiPinField,
"vref": EfuseVRefField,
"adc_tp": EfuseAdcPointCalibration,
}.get(category, EfuseField)(parent, *efuse_tuple)
def __init__(self, parent, register_name, category, block, word, mask, write_disable_bit, read_disable_bit, efuse_type, description):
self.category = category
self.parent = parent
self.block = block
self.word = word
self.data_reg_offs = EFUSE_BLOCK_OFFS[self.block] + self.word
self.mask = mask
self.shift = esptool._mask_to_shift(mask)
self.write_disable_bit = write_disable_bit
self.read_disable_bit = read_disable_bit
self.register_name = register_name
self.efuse_type = efuse_type
self.description = description
def get_raw(self):
""" Return the raw (unformatted) numeric value of the efuse bits
Returns a simple integer or (for some subclasses) a bitstring.
"""
value = self.parent.read_efuse(self.data_reg_offs)
return (value & self.mask) >> self.shift
def get(self):
""" Get a formatted version of the efuse value, suitable for display """
return self.get_raw()
def is_readable(self):
""" Return true if the efuse is readable by software """
if self.read_disable_bit is None:
return True # read cannot be disabled
value = (self.parent.read_efuse(0) >> 16) & 0xF # RD_DIS values
return (value & (1 << self.read_disable_bit)) == 0
def disable_read(self):
if self.read_disable_bit is None:
raise esptool.FatalError("This efuse cannot be read-disabled")
rddis_reg_addr = efuse_write_reg_addr(0, 0)
self.parent.write_reg(rddis_reg_addr, 1 << (16 + self.read_disable_bit))
self.parent.write_efuses()
return self.get()
def is_writeable(self):
if self.write_disable_bit is None:
return True # write cannot be disabled
value = self.parent.read_efuse(0) & 0xFFFF # WR_DIS values
return (value & (1 << self.write_disable_bit)) == 0
def disable_write(self):
wrdis_reg_addr = efuse_write_reg_addr(0, 0)
self.parent.write_reg(wrdis_reg_addr, 1 << self.write_disable_bit)
self.parent.write_efuses()
return self.get()
def burn(self, new_value):
raw_value = (new_value << self.shift) & self.mask
# don't both reading old value as we can only set bits 0->1
write_reg_addr = efuse_write_reg_addr(self.block, self.word)
self.parent.write_reg(write_reg_addr, raw_value)
self.parent.write_efuses()
return self.get()
class EfuseMacField(EfuseField):
def get_raw(self):
# MAC values are high half of second efuse word, then first efuse word
words = [self.parent.read_efuse(self.data_reg_offs + word) for word in [1,0]]
# endian-swap into a bitstring
bitstring = struct.pack(">II", *words)
return bitstring[2:] # trim 2 byte CRC from the beginning
@staticmethod
def get_and_check(raw_mac, stored_crc):
computed_crc = EfuseMacField.calc_crc(raw_mac)
if computed_crc == stored_crc:
valid_msg = "(CRC 0x%02x OK)" % stored_crc
else:
valid_msg = "(CRC 0x%02x invalid - calculated 0x%02x)" % (stored_crc, computed_crc)
return "%s %s" % (hexify(raw_mac, ":"), valid_msg)
def get(self):
stored_crc = self.get_stored_crc()
return EfuseMacField.get_and_check(self.get_raw(), stored_crc)
def burn(self, new_value):
# Writing the BLK0 default MAC is not sensible, as it's written in the factory.
raise esptool.FatalError("Writing Factory MAC address is not supported")
def get_stored_crc(self):
return (self.parent.read_efuse(self.data_reg_offs + 1) >> 16) & 0xFF
@staticmethod
def calc_crc(raw_mac):
"""
This algorithm is the equivalent of esp_crc8() in ESP32 ROM code
This is CRC-8 w/ inverted polynomial value 0x8C & initial value 0x00.
"""
result = 0x00
for b in struct.unpack("B" * 6, raw_mac):
result ^= b
for _ in range(8):
lsb = result & 1
result >>= 1
if lsb != 0:
result ^= 0x8c
return result
class EfuseKeyblockField(EfuseField):
def get_raw(self):
words = self.get_words()
return struct.pack("<" + ("I" * len(words)), *words)
def get_key(self):
# Keys are stored in reverse byte order
result = self.get_raw()
result = result[::-1]
return result
def get_words(self):
num_words = self.parent.get_block_len() // 4
return [self.parent.read_efuse(self.data_reg_offs + word) for word in range(num_words)]
def get(self):
return hexify(self.get_raw(), " ")
def apply_34_encoding(self, inbits):
""" Takes 24 byte sequence to be represented in 3/4 encoding,
returns 8 words suitable for writing "encoded" to an efuse block
"""
def popcnt(b):
""" Return number of "1" bits set in 'b' """
return len([x for x in bin(b) if x == "1"])
outbits = b""
while len(inbits) > 0: # process in chunks of 6 bytes
bits = inbits[0:6]
inbits = inbits[6:]
xor_res = 0
mul_res = 0
index = 1
for b in struct.unpack("B" * 6, bits):
xor_res ^= b
mul_res += index * popcnt(b)
index += 1
outbits += bits
outbits += struct.pack("BB", xor_res, mul_res)
return struct.unpack("<" + "I" * (len(outbits) // 4), outbits)
def burn_key(self, new_value):
new_value = new_value[::-1] # AES keys are stored in reverse order in efuse
return self.burn(new_value)
def burn(self, new_value):
key_len = self.parent.get_block_len()
if len(new_value) != key_len:
raise RuntimeError("Invalid new value length for key block (%d), %d is required" % len(new_value), key_len)
if self.parent.coding_scheme == CODING_SCHEME_34:
words = self.apply_34_encoding(new_value)
else:
words = struct.unpack("<" + ("I" * 8), new_value)
return self.burn_words(words)
def burn_words(self, words, word_offset=0):
write_reg_addr = efuse_write_reg_addr(self.block, self.word + word_offset)
for word in words:
self.parent.write_reg(write_reg_addr, word)
write_reg_addr += 4
warnings_before = self.parent.get_coding_scheme_warnings()
self.parent.write_efuses()
warnings_after = self.parent.get_coding_scheme_warnings()
if warnings_after & ~warnings_before != 0:
print("WARNING: Burning efuse block added coding scheme warnings 0x%x -> 0x%x. Encoding bug?" % (warnings_before, warnings_after))
return self.get()
class EfuseSpiPinField(EfuseField):
def get(self):
val = self.get_raw()
if val >= 30:
val += 2 # values 30,31 map to 32, 33
return val
def burn(self, new_value):
if new_value in [30, 31]:
raise esptool.FatalError("IO pins 30 & 31 cannot be set for SPI flash. 0-29, 32 & 33 only.")
if new_value > 33:
raise esptool.FatalError("IO pin %d cannot be set for SPI flash. 0-29, 32 & 33 only." % new_value)
if new_value > 30:
new_value -= 2 # values 32,33 map to 30, 31
return super(EfuseSpiPinField, self).burn(new_value)
class EfuseVRefField(EfuseField):
VREF_OFFSET = 1100 # ideal efuse value in mV
VREF_STEP_SIZE = 7 # 1 count in efuse == 7mV
VREF_SIGN_BIT = 0x10
VREF_MAG_BITS = 0x0F
def get(self):
val = self.get_raw()
# sign-magnitude format
if (val & self.VREF_SIGN_BIT):
val = -(val & self.VREF_MAG_BITS)
else:
val = (val & self.VREF_MAG_BITS)
val *= self.VREF_STEP_SIZE
return self.VREF_OFFSET + val
def burn(self, new_value):
raise RuntimeError("Writing to VRef is not supported.")
class EfuseAdcPointCalibration(EfuseField):
TP_OFFSET = { # See TP_xxxx_OFFSET in esp_adc_cal.c in ESP-IDF
"ADC1_TP_LOW": 278,
"ADC2_TP_LOW": 421,
"ADC1_TP_HIGH": 3265,
"ADC2_TP_HIGH": 3406,
}
SIGN_BIT = (0x40, 0x100) # LOW, HIGH (2s complement format)
STEP_SIZE = 4
def get(self):
idx = 0 if self.register_name.endswith("LOW") else 1
sign_bit = self.SIGN_BIT[idx]
offset = self.TP_OFFSET[self.register_name]
raw = self.get_raw()
delta = (raw & (sign_bit - 1)) - (raw & sign_bit)
return offset + (delta * self.STEP_SIZE)
def dump(esp, _efuses, args):
""" Dump raw efuse data registers """
for block in range(len(EFUSE_BLOCK_OFFS)):
print("EFUSE block %d:" % block)
offsets = [x + EFUSE_BLOCK_OFFS[block] for x in range(EFUSE_BLOCK_LEN[block])]
print(" ".join(["%08x" % esp.read_efuse(offs) for offs in offsets]))
def summary(esp, efuses, args):
""" Print a human-readable summary of efuse contents """
ROW_FORMAT = "%-22s %-50s%s= %s %s %s"
human_output = (args.format == 'summary')
json_efuse = {}
if args.file != sys.stdout:
print("Saving efuse values to " + args.file.name)
if human_output:
print(ROW_FORMAT.replace("-50", "-12") % ("EFUSE_NAME", "Description", "", "[Meaningful Value]", "[Readable/Writeable]", "(Hex Value)"),file=args.file)
print("-" * 88,file=args.file)
for category in set(e.category for e in efuses):
if human_output:
print("%s fuses:" % category.title(),file=args.file)
for e in (e for e in efuses if e.category == category):
raw = e.get_raw()
try:
raw = "(0x%x)" % raw
except TypeError:
raw = ""
(readable, writeable) = (e.is_readable(), e.is_writeable())
if readable and writeable:
perms = "R/W"
elif readable:
perms = "R/-"
elif writeable:
perms = "-/W"
else:
perms = "-/-"
base_value = e.get()
value = str(base_value)
if not readable:
value = value.replace("0", "?")
if human_output:
print(ROW_FORMAT % (e.register_name, e.description, "\n " if len(value) > 20 else "", value, perms, raw),file=args.file)
if args.format == 'json':
json_efuse[e.register_name] = {
'value': base_value if readable else value,
'readable':readable,
'writeable':writeable}
if human_output:
print("",file=args.file)
if human_output:
sdio_force = efuses["XPD_SDIO_FORCE"]
sdio_tieh = efuses["XPD_SDIO_TIEH"]
sdio_reg = efuses["XPD_SDIO_REG"]
if sdio_force.get() == 0:
print("Flash voltage (VDD_SDIO) determined by GPIO12 on reset (High for 1.8V, Low/NC for 3.3V).",file=args.file)
elif sdio_reg.get() == 0:
print("Flash voltage (VDD_SDIO) internal regulator disabled by efuse.",file=args.file)
elif sdio_tieh.get() == 0:
print("Flash voltage (VDD_SDIO) set to 1.8V by efuse.",file=args.file)
else:
print("Flash voltage (VDD_SDIO) set to 3.3V by efuse.",file=args.file)
warnings = efuses.get_coding_scheme_warnings()
if warnings:
print("WARNING: Coding scheme has encoding bit error warnings (0x%x)" % warnings,file=args.file)
if args.file != sys.stdout:
args.file.close()
print("Done")
if args.format == 'json':
json.dump(json_efuse,args.file,sort_keys=True,indent=4)
print("")
def burn_efuse(esp, efuses, args):
efuse = efuses[args.efuse_name]
old_value = efuse.get()
if efuse.efuse_type == "flag":
if args.new_value not in [None, 1]:
raise esptool.FatalError("Efuse %s is type 'flag'. New value is not accepted for this efuse (will always burn 0->1)" % efuse.register_name)
args.new_value = 1
if old_value:
print("Efuse %s is already burned." % efuse.register_name)
return
elif efuse.efuse_type == "int":
if args.new_value is None:
raise esptool.FatalError("New value required for efuse %s" % efuse.register_name)
elif efuse.efuse_type == "spipin":
if args.new_value is None or args.new_value == 0:
raise esptool.FatalError("New value required for efuse %s" % efuse.register_name)
elif efuse.efuse_type == "bitcount":
if args.new_value is None: # find the first unset bit and set it
args.new_value = old_value
bit = 1
while args.new_value == old_value:
args.new_value = bit | old_value
bit <<= 1
if args.new_value & (efuse.mask >> efuse.shift) != args.new_value:
raise esptool.FatalError("Value mask for efuse %s is 0x%x. Value 0x%x is too large." % (efuse.register_name, efuse.mask >> efuse.shift, args.new_value))
if args.new_value | old_value != args.new_value:
print("WARNING: New value contains some bits that cannot be cleared (value will be 0x%x)" % (old_value | args.new_value))
confirm("Burning efuse %s (%s) 0x%x -> 0x%x" % (efuse.register_name, efuse.description, old_value, args.new_value | old_value), args)
burned_value = efuse.burn(args.new_value)
if burned_value == old_value:
raise esptool.FatalError("Efuse %s failed to burn. Protected?" % efuse.register_name)
def read_protect_efuse(esp, efuses, args):
efuse = efuses[args.efuse_name]
if not efuse.is_readable():
print("Efuse %s is already read protected" % efuse.register_name)
else:
# make full list of which efuses will be disabled (ie share a read disable bit)
all_disabling = [e for e in efuses if e.read_disable_bit == efuse.read_disable_bit]
names = ", ".join(e.register_name for e in all_disabling)
confirm("Permanently read-disabling efuse%s %s" % ("s" if len(all_disabling) > 1 else "",names), args)
efuse.disable_read()
def write_protect_efuse(esp, efuses, args):
efuse = efuses[args.efuse_name]
if not efuse.is_writeable():
print("]fuse %s is already write protected" % efuse.register_name)
else:
# make full list of which efuses will be disabled (ie share a write disable bit)
all_disabling = [e for e in efuses if e.write_disable_bit == efuse.write_disable_bit]
names = ", ".join(e.register_name for e in all_disabling)
confirm("Permanently write-disabling efuse%s %s" % ("s" if len(all_disabling) > 1 else "",names), args)
efuse.disable_write()
def burn_key(esp, efuses, args):
# check block choice
if args.block in ["flash_encryption", "BLK1"]:
block_num = 1
elif args.block in ["secure_boot", "BLK2"]:
block_num = 2
elif args.block == "BLK3":
block_num = 3
else:
raise RuntimeError("args.block argument not in list!")
num_bytes = efuses.get_block_len()
# check keyfile
keyfile = args.keyfile
keyfile.seek(0,2) # seek t oend
size = keyfile.tell()
keyfile.seek(0)
if size != num_bytes:
raise esptool.FatalError("Incorrect key file size %d. Key file must be %d bytes (%d bits) of raw binary key data." %
(size, num_bytes, num_bytes * 8))
# check existing data
efuse = [e for e in efuses if e.register_name == "BLK%d" % block_num][0]
original = efuse.get_raw()
EMPTY_KEY = b'\x00' * num_bytes
if original != EMPTY_KEY:
if not args.force_write_always:
raise esptool.FatalError("Key block already has value %s." % efuse.get())
else:
print("WARNING: Key appears to have a value already. Trying anyhow, due to --force-write-always (result will be bitwise OR of new and old values.)")
if not efuse.is_writeable():
if not args.force_write_always:
raise esptool.FatalError("The efuse block has already been write protected.")
else:
print("WARNING: Key appears to be write protected. Trying anyhow, due to --force-write-always")
msg = "Write key in efuse block %d. " % block_num
if args.no_protect_key:
msg += "The key block will left readable and writeable (due to --no-protect-key)"
else:
msg += "The key block will be read and write protected (no further changes or readback)"
confirm(msg, args)
new_value = keyfile.read(num_bytes)
new = efuse.burn_key(new_value)
print("Burned key data. New value: %s" % (new,))
if not args.no_protect_key:
print("Disabling read/write to key efuse block...")
efuse.disable_write()
efuse.disable_read()
if efuse.is_readable():
print("WARNING: Key does not appear to have been read protected. Perhaps read disable efuse is write protected?")
if efuse.is_writeable():
print("WARNING: Key does not appear to have been write protected. Perhaps write disable efuse is write protected?")
else:
print("Key is left unprotected as per --no-protect-key argument.")
def burn_block_data(esp, efuses, args):
num_bytes = efuses.get_block_len()
offset = args.offset
data = args.datafile.read()
if offset >= num_bytes:
raise RuntimeError("Invalid offset: Key block only holds %d bytes." % num_bytes)
if len(data) > num_bytes - offset:
raise RuntimeError("Data will not fit: Key block size %d bytes, data file is %d bytes" % (num_bytes, len(data)))
if efuses.coding_scheme == CODING_SCHEME_34:
if offset % 6 != 0:
raise RuntimeError("Device has 3/4 Coding Scheme. Can only write at offsets which are a multiple of 6.")
if len(data) % 6 != 0:
raise RuntimeError("Device has 3/4 Coding Scheme. Can only write data lengths which are a multiple of 6 (data is %d bytes)" % len(data))
efuse = [e for e in efuses if e.register_name == args.block.upper()][0]
if not args.force_write_always and \
efuse.get_raw() != b'\x00' * num_bytes:
raise esptool.FatalError("Efuse block already has values written.")
if efuses.coding_scheme == CODING_SCHEME_NONE:
pad = offset % 4
if pad != 0: # left-pad to a word boundary
data = (b'\x00' * pad) + data
offset -= pad
pad = len(data) % 4
if pad != 0: # right-pad to a word boundary
data += (b'\x00' * (4 - pad))
words = struct.unpack("<" + "I" * (len(data) // 4), data)
word_offset = offset // 4
else: # CODING_SCHEME_34
words = efuse.apply_34_encoding(data)
word_offset = (offset // 6) * 2
confirm("Burning efuse %s (%s) with %d bytes of data at offset %d in the block" % (efuse.register_name, efuse.description, len(data), offset), args)
efuse.burn_words(words, word_offset)
def set_flash_voltage(esp, efuses, args):
sdio_force = efuses["XPD_SDIO_FORCE"]
sdio_tieh = efuses["XPD_SDIO_TIEH"]
sdio_reg = efuses["XPD_SDIO_REG"]
# check efuses aren't burned in a way which makes this impossible
if args.voltage == 'OFF' and sdio_reg.get() != 0:
raise esptool.FatalError("Can't set flash regulator to OFF as XPD_SDIO_REG efuse is already burned")
if args.voltage == '1.8V' and sdio_tieh.get() != 0:
raise esptool.FatalError("Can't set regulator to 1.8V is XPD_SDIO_TIEH efuse is already burned")
if args.voltage == 'OFF':
msg = """
Disable internal flash voltage regulator (VDD_SDIO). SPI flash will need to be powered from an external source.
The following efuse is burned: XPD_SDIO_FORCE.
It is possible to later re-enable the internal regulator (%s) by burning an additional efuse
""" % ("to 3.3V" if sdio_tieh.get() != 0 else "to 1.8V or 3.3V")
elif args.voltage == '1.8V':
msg = """
Set internal flash voltage regulator (VDD_SDIO) to 1.8V.
The following efuses are burned: XPD_SDIO_FORCE, XPD_SDIO_REG.
It is possible to later increase the voltage to 3.3V (permanently) by burning additional efuse XPD_SDIO_TIEH
"""
elif args.voltage == '3.3V':
msg = """
Enable internal flash voltage regulator (VDD_SDIO) to 3.3V.
The following efuses are burned: XPD_SDIO_FORCE, XPD_SDIO_REG, XPD_SDIO_TIEH.
"""
confirm(msg, args)
sdio_force.burn(1) # Disable GPIO12
if args.voltage != 'OFF':
sdio_reg.burn(1) # Enable internal regulator
if args.voltage == '3.3V':
sdio_tieh.burn(1)
print("VDD_SDIO setting complete.")
def adc_info(esp, efuses, args):
adc_vref = efuses["ADC_VREF"]
blk3_reserve = efuses["BLK3_PART_RESERVE"]
vref_raw = adc_vref.get_raw()
if vref_raw == 0:
print("ADC VRef calibration: None (1100mV nominal)")
else:
print("ADC VRef calibration: %dmV" % adc_vref.get())
if blk3_reserve.get():
print("ADC readings stored in efuse BLK3:")
print(" ADC1 Low reading (150mV): %d" % efuses["ADC1_TP_LOW"].get())
print(" ADC1 High reading (850mV): %d" % efuses["ADC1_TP_HIGH"].get())
print(" ADC2 Low reading (150mV): %d" % efuses["ADC2_TP_LOW"].get())
print(" ADC2 High reading (850mV): %d" % efuses["ADC2_TP_HIGH"].get())
class CustomMacAddressField(object):
"""
The custom MAC field uses the formatting according to the specification for version 1
"""
def __init__(self, efuses):
self.efuse = [e for e in efuses if e.register_name == 'BLK3'][0]
self.parent = self.efuse.parent
def get_raw(self):
words = [self.parent.read_efuse(self.efuse.data_reg_offs + word) for word in [0, 1]]
bitstring = struct.pack("<II", *words)
return bitstring[1:-1] # trim a byte from the beginning and one (CRC) from the end
def get_stored_crc(self):
return self.parent.read_efuse(self.efuse.data_reg_offs) & 0xFF
@staticmethod
def calc_crc(raw_mac):
return EfuseMacField.calc_crc(raw_mac)
def get(self):
return EfuseMacField.get_and_check(self.get_raw(), self.get_stored_crc())
def get_version(self):
"""
Returns the version of the MAC field
The version is stored in the block at the [191:184] bit positions. That is in the 5th 4-byte word, the most
significant byte (3 * 8 = 24)
"""
return (self.parent.read_efuse(self.efuse.data_reg_offs + 5) >> 24) & 0xFF
def get_block(self, new_mac, new_version):
"""
Returns a byte array which can be written directly to BLK3
"""
num_words = self.parent.get_block_len() // 4
words = [self.parent.read_efuse(self.efuse.data_reg_offs + word) for word in range(num_words)]
B = sum([x << (i * 32) for i, x in enumerate(words)]) # integer representation of the whole BLK content
new_mac_b = struct.pack(">Q", new_mac)[2:] # Q has 8-bytes. Removing two MSB bytes to get a 6-byte MAC
new_mac_rev = struct.unpack("<Q", new_mac_b + b'\x00\x00')[0] # bytes in reversed order
crc = self.calc_crc(new_mac_b)
# MAC fields according to esp_efuse_table.c:
# - CRC - offset 0 bits, length 8 bits
# - MAC - offset 8 bits, length 48 bits
# - MAC version - offset 184 bits, length 8 bits
B |= (crc & ((1 << 8) - 1)) << 0
B |= (new_mac_rev & ((1 << 48) - 1)) << 8
B |= (new_version & ((1 << 8) - 1)) << 184
return bytearray([(B >> i * 8) & 0xFF for i in range(self.parent.get_block_len())])
def burn_custom_mac(esp, efuses, args):
write_always = args.force_write_always
c = CustomMacAddressField(efuses)
old_version = c.get_version()
new_version = old_version | 1 # Only version 1 MAC Addresses are supported yet
if (not write_always and old_version != 0) or (write_always and old_version not in [0, new_version]):
raise esptool.FatalError("The version of the custom MAC Address is already burned ({})!".format(old_version))
old_mac_b = c.get_raw()
old_mac = struct.unpack(">Q", b'\x00\x00' + old_mac_b)[0]
new_mac_b = struct.pack(">Q", args.mac)[2:] # Q has 8-bytes. Removing two MSB bytes to get a 6-byte MAC
new_mac = args.mac
if (not write_always and old_mac != 0) or (write_always and new_mac | old_mac != new_mac):
raise esptool.FatalError("Custom MAC Address was previously burned ({})!".format(hexify(old_mac_b, ":")))
old_crc = c.get_stored_crc()
new_crc = c.calc_crc(new_mac_b)
if (not write_always and old_crc != 0) or (write_always and new_crc | old_crc != new_crc):
raise esptool.FatalError("The CRC of the custom MAC Address was previously burned ({})!".format(old_crc))
confirm("Burning efuse for custom MAC address {} (version {}, CRC 0x{:x}) -> {} (version {}, CRC 0x{:x})"
"".format(hexify(old_mac_b, ":"), old_version, old_crc, hexify(new_mac_b, ":"), new_version, new_crc), args)
with io.BytesIO(c.get_block(new_mac, new_version)) as buf:
args.do_not_confirm = True # Custom MAC burning was already confirmed. No need to ask twice.
# behavour of burn_block_data() for args.force_write_always is compatible
args.offset = 0
args.datafile = buf
args.block = 'BLK3'
burn_block_data(esp, efuses, args)
def get_custom_mac(esp, efuses, args):
c = CustomMacAddressField(efuses)
version = c.get_version()
if version > 0:
print("Custom MAC Address version {}: {}".format(version, c.get()))
else:
print("Custom MAC Address is not set in the device.")
def hexify(bitstring, separator=""):
try:
as_bytes = tuple(ord(b) for b in bitstring)
except TypeError: # python 3, items in bitstring already ints
as_bytes = tuple(b for b in bitstring)
return separator.join(("%02x" % b) for b in as_bytes)
def arg_auto_int(x):
return int(x, 0)
def mac_int(string):
if string.count(":") != 5:
raise argparse.ArgumentTypeError("MAC Address needs to be a 6-byte hexadecimal format separated by colons (:)!")
hexad = string.replace(":", "")
if len(hexad) != 12:
raise argparse.ArgumentTypeError("MAC Address needs to be a 6-byte hexadecimal number (12 hexadecimal characters)!")
return int(hexad, 16)
def main():
parser = argparse.ArgumentParser(description='espefuse.py v%s - ESP32 efuse get/set tool' % esptool.__version__, prog='espefuse')
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate used when flashing/reading',
type=arg_auto_int,
default=os.environ.get('ESPTOOL_BAUD', esptool.ESPLoader.ESP_ROM_BAUD))
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', esptool.ESPLoader.DEFAULT_PORT))
parser.add_argument(
'--before',
help='What to do before connecting to the chip',
choices=['default_reset', 'no_reset', 'esp32r1', 'no_reset_no_sync'],
default='default_reset')
parser.add_argument('--do-not-confirm',
help='Do not pause for confirmation before permanently writing efuses. Use with caution.', action='store_true')
def add_force_write_always(p):
p.add_argument('--force-write-always', help="Write the efuse even if it looks like it's already been written, or is write protected. " +
"Note that this option can't disable write protection, or clear any bit which has already been set.", action='store_true')
subparsers = parser.add_subparsers(
dest='operation',
help='Run espefuse.py {command} -h for additional help')
subparsers.add_parser('dump', help='Dump raw hex values of all efuses')
p = subparsers.add_parser('summary',
help='Print human-readable summary of efuse values')
p.add_argument('--format', help='Select the summary format',choices=['summary','json'],default='summary')
p.add_argument('--file', help='File to save the efuse summary',type=argparse.FileType('w'),default=sys.stdout)
p = subparsers.add_parser('burn_efuse',
help='Burn the efuse with the specified name')
p.add_argument('efuse_name', help='Name of efuse register to burn',
choices=[efuse[0] for efuse in EFUSES])
p.add_argument('new_value', help='New value to burn (not needed for flag-type efuses', nargs='?', type=esptool.arg_auto_int)
p = subparsers.add_parser('read_protect_efuse',
help='Disable readback for the efuse with the specified name')
p.add_argument('efuse_name', help='Name of efuse register to burn',
choices=[efuse[0] for efuse in EFUSES if efuse[6] is not None]) # only allow if read_disable_bit is not None
p = subparsers.add_parser('write_protect_efuse',
help='Disable writing to the efuse with the specified name')
p.add_argument('efuse_name', help='Name of efuse register to burn',
choices=[efuse[0] for efuse in EFUSES])
p = subparsers.add_parser('burn_key',
help='Burn a 256-bit AES key to EFUSE BLK1,BLK2 or BLK3 (flash_encryption, secure_boot).')
p.add_argument('--no-protect-key', help='Disable default read- and write-protecting of the key. ' +
'If this option is not set, once the key is flashed it cannot be read back or changed.', action='store_true')
add_force_write_always(p)
p.add_argument('block', help='Key block to burn. "flash_encryption" is an alias for BLK1, ' +
'"secure_boot" is an alias for BLK2.', choices=["secure_boot", "flash_encryption","BLK1","BLK2","BLK3"])
p.add_argument('keyfile', help='File containing 256 bits of binary key data', type=argparse.FileType('rb'))
p = subparsers.add_parser('burn_block_data',
help="Burn non-key data to EFUSE BLK1, BLK2 or BLK3. " +
" Don't use this command to burn key data for Flash Encryption or Secure Boot, " +
"as the byte order of keys is swapped (use burn_key).")
p.add_argument('--offset', '-o', help='Byte offset in the efuse block', type=int, default=0)
add_force_write_always(p)
p.add_argument('block', help='Efuse block to burn.', choices=["BLK1","BLK2","BLK3"])
p.add_argument('datafile', help='File containing data to burn into the efuse block', type=argparse.FileType('rb'))
p = subparsers.add_parser('set_flash_voltage',
help='Permanently set the internal flash voltage regulator to either 1.8V, 3.3V or OFF. ' +
'This means GPIO12 can be high or low at reset without changing the flash voltage.')
p.add_argument('voltage', help='Voltage selection',
choices=['1.8V', '3.3V', 'OFF'])
p = subparsers.add_parser('adc_info',
help='Display information about ADC calibration data stored in efuse.')
p = subparsers.add_parser('burn_custom_mac',
help='Burn a 48-bit Custom MAC Address to EFUSE BLK3.')
p.add_argument('mac', help='Custom MAC Address to burn given in hexadecimal format with bytes separated by colons' +
' (e.g. AB:CD:EF:01:02:03).', type=mac_int)
add_force_write_always(p)
p = subparsers.add_parser('get_custom_mac',
help='Prints the Custom MAC Address.')
args = parser.parse_args()
print('espefuse.py v%s' % esptool.__version__)
if args.operation is None:
parser.print_help()
parser.exit(1)
# each 'operation' is a module-level function of the same name
operation_func = globals()[args.operation]
esp = esptool.ESP32ROM(args.port, baud=args.baud)
esp.connect(args.before)
# dict mapping register name to its efuse object
efuses = EspEfuses(esp)
operation_func(esp, efuses, args)
def _main():
try:
main()
except esptool.FatalError as e:
print('\nA fatal error occurred: %s' % e)
sys.exit(2)
if __name__ == '__main__':
_main()
|
themadinventor/esptool
|
espefuse.py
|
Python
|
gpl-2.0
| 42,922 | 0.004497 |
import abc
class PluginTypeBase(object):
""" Baseclass for plugin types.
This needs to be derived from in order for plugin types to
be accepted by plugz.
"""
__metaclass__ = abc.ABCMeta
plugintype = None
@staticmethod
def is_valid_file(file):
""" Accept or reject files as valid plugins. """
return file.endswith('.py')
|
mistermatti/plugz
|
plugz/plugz.py
|
Python
|
bsd-3-clause
| 373 | 0.002681 |
import pycrs
import mpl_toolkits.basemap.pyproj as pyproj # Import the pyproj module
import shapefile as shp
import matplotlib.pyplot as plt
shpFilePath = r"taxi_zones\taxi_zones"
sf = shp.Reader(shpFilePath)
records = sf.records()
plt.figure()
for shape in sf.shapeRecords():
x = [i[0] for i in shape.shape.points[:]]
y = [i[1] for i in shape.shape.points[:]]
plt.plot(x,y)
projobj = pycrs.loader.from_file(r'taxi_zones\taxi_zones.prj')
proj4string = projobj.to_proj4()
print(proj4string)
isn2004=pyproj.Proj(proj4string, preserve_units=True)
wgs84=pyproj.Proj("+init=EPSG:4326")
i = 0
lat = []
lon = []
#1 foot = 0.3048 meters
conv = 0.3048
with open("2013000005_sampled.traj") as f:
next(f)
for line in f:
i += 1
# print line
strings = line.split(",")
co1 = float(strings[0])
co2 = float(strings[1])
x2,y2 = pyproj.transform(wgs84,isn2004 ,co1,co2)
lat.append(x2)
lon.append(y2)
# if i == 14450:
# break
if i == 1169120:
break
x1 = lat
y1 = lon
plt.plot(x1, y1, 'o', color='blue', markersize=7, markeredgewidth=0.0)
plt.show()
|
arg-hya/taxiCab
|
Plots/TrajectoryPlot/TrajectoryPlot.py
|
Python
|
gpl-3.0
| 1,224 | 0.013072 |
"""
This module contains all the Data Access Objects for models which are persisted to Elasticsearch
at some point in their lifecycle.
Each DAO is an extension of the octopus ESDAO utility class which provides all of the ES-level heavy lifting,
so these DAOs mostly just provide information on where to persist the data, and some additional storage-layer
query methods as required
"""
from octopus.modules.es import dao
class ContentLogDAO(dao.ESDAO):
__type__ = 'contentlog'
class UnroutedNotificationDAO(dao.ESDAO):
"""
DAO for UnroutedNotifications
"""
__type__ = 'unrouted'
""" The index type to use to store these objects """
@classmethod
def example(cls):
"""
request a document which acts as an example for this type
"""
from service.tests import fixtures
return cls(fixtures.NotificationFactory.unrouted_notification())
class RoutedNotificationDAO(dao.TimeBoxedTypeESDAO):
"""
DAO for RoutedNotification
This is an extension of the TimeBoxedTypeESDAO object, which means that a new type is created very
period (e.g. monthly) for new content. This enables rapid dropping of old index types without affecting
Elasticsearch performance, and works here because RoutedNotifications only persiste for a limited time
"""
__type__ = 'routed'
""" The base index type to use to store these objects - this will be appended by the time-boxing features of the DAO with the creation timestamp """
@classmethod
def example(cls):
"""
request a document which acts as an example for this type
"""
from service.tests import fixtures
return cls(fixtures.NotificationFactory.routed_notification())
class FailedNotificationDAO(dao.ESDAO):
"""
DAO for FailedNotifications
"""
__type__ = "failed"
""" The index type to use to store these objects """
class RepositoryConfigDAO(dao.ESDAO):
"""
DAO for RepositoryConfig
"""
__type__ = 'repo_config'
""" The index type to use to store these objects """
class MatchProvenanceDAO(dao.ESDAO):
"""
DAO for MatchProvenance
"""
__type__ = "match_prov"
""" The index type to use to store these objects """
@classmethod
def pull_by_notification(cls, notification_id, size=10):
"""
List all of the match provenance information for the requested notification
:param notification_id: the id of the notification for which to retrieve match provenance
:param size: the maximum number to return (defaults to 10)
"""
q = MatchProvNotificationQuery(notification_id, size=size)
return cls.object_query(q=q.query())
class MatchProvNotificationQuery(object):
"""
Query wrapper which generates an ES query for retrieving match provenance objects
based on the notification to which they are attached
"""
def __init__(self, notification_id, size=10):
"""
Set the parameters of the query
:param notification_id: the id of the notification for which to retrieve match provenance
:param size: the maximum number to return (defaults to 10)
"""
self.notification_id = notification_id
self.size = size
def query(self):
"""
generate the query as a python dictionary object
:return: a python dictionary containing the ES query, ready for JSON serialisation
"""
return {
"query" : {
"term" : {"notification.exact" : self.notification_id}
},
"size" : self.size
}
class RetrievalRecordDAO(dao.ESDAO):
"""
DAO for RetrievalRecord
"""
__type__ = "retrieval"
""" The index type to use to store these objects """
class AccountDAO(dao.ESDAO):
"""
DAO for Account
"""
__type__ = "account"
""" The index type to use to store these objects """
|
JiscPER/jper
|
service/dao.py
|
Python
|
apache-2.0
| 3,957 | 0.006318 |
import math
import fpformat
import os
from pydrone.utils.data_structures import Graph
def world_generator(size, x_end, y_end, knowledge):
# Controllo se si richiede un mondo con il knowledge degli stati o meno
if knowledge:
world = Graph(x_end, y_end)
for i in range(size):
for j in range(size):
world.add_node_coord((i, j))
world.change_weight((i, j), float(fpformat.fix(math.sqrt(math.fabs(pow((x_end - i), 2) + pow((y_end - j), 2))), 3)))
world.change_weight((x_end, y_end), -1)
return world
else:
matrix = [[0 for i in range(size)] for j in range(size)]
for i in range(size):
for j in range(size):
matrix[i][j] = float(fpformat.fix(math.sqrt(math.fabs(pow((x_end - i), 2) + pow((y_end - j), 2))), 3))
matrix[x_end][y_end] = -1
return matrix
def matrix_generator(size):
matrix = [[0 for i in range(size)] for j in range(size)]
return matrix
def print_matrix(matrix):
os.system("clear")
size = len(matrix[0])
for j in range(size):
for i in range(size):
value = matrix[i][j]
if value > 0:
print "", "*",
else:
print "", "-",
print
|
DMIAlumni/pydrone-game
|
pydrone/utils/matrix_generator.py
|
Python
|
bsd-2-clause
| 1,291 | 0.001549 |
from setuptools import setup
setup(name='decision_tree',
version='0.04',
description='Practice implementation of a classification decision tree',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='classification decision tree machine learning random forest',
url='https://github.com/metjush/decision_tree',
author='metjush',
author_email='metjush@gmail.com',
license='MIT',
packages=['decision_tree'],
install_requires=[
'numpy',
'sklearn'
],
include_package_data=True,
zip_safe=False)
|
metjush/decision_tree
|
setup.py
|
Python
|
mit
| 734 | 0.042234 |
# -*- coding: utf-8 -*-
import pytest
import re
import requests
try:
# Faster, C-ext
from cStringIO import StringIO
except ImportError:
# Slower, pure python
from StringIO import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from cfme.configure.about import product_assistance as about
from utils import version
def pdf_get_text(file_obj, page_nums):
output = StringIO()
manager = PDFResourceManager()
laparams = LAParams(all_texts=True, detect_vertical=True)
converter = TextConverter(manager, output, laparams=laparams)
interpreter = PDFPageInterpreter(manager, converter)
for page in PDFPage.get_pages(file_obj, page_nums):
interpreter.process_page(page)
converter.close()
text = output.getvalue().replace('\n', ' ')
output.close()
return text
@pytest.fixture(scope="module")
def guides():
return [
loc
for loc
in about.locators.iterkeys()
if loc.endswith("_guide")
and (
version.pick(about.locators[loc])
if isinstance(about.locators[loc], dict)
else about.locators[loc]
) is not None]
@pytest.mark.tier(3)
@pytest.fixture(scope="session")
def docs_info():
if version.current_version() < "5.4.0.1":
return [
'Control',
'Lifecycle and Automation',
'Quick Start',
'Settings And Operations',
'Insight',
'Integration Services'
]
elif version.current_version() < "5.5.0.12":
return [
'Insight',
'Control',
'Lifecycle and Automation',
'REST API',
'SOAP API',
'User',
'Settings and Operations'
]
elif version.appliance_is_downstream():
return [
'Monitoring Alerts Reporting',
'General Configuration',
'Virtual Machines Hosts',
'Methods For Automation',
'Infrastructure Inventory',
'Providers',
'Scripting Actions',
'Defining Policies Profiles'
]
else:
# Upstream version has no docs
return []
@pytest.mark.tier(2)
@pytest.mark.meta(blockers=[1272618])
@pytest.mark.sauce
def test_links(guides, soft_assert):
"""Test whether the PDF documents are present."""
pytest.sel.force_navigate("about")
for link in guides:
locator = getattr(about, link)
url = pytest.sel.get_attribute(locator, "href")
soft_assert(
requests.head(url, verify=False).status_code == 200,
"'{}' is not accessible".format(pytest.sel.text(locator).encode("utf-8").strip())
)
@pytest.mark.tier(3)
@pytest.mark.meta(blockers=[1272618])
def test_contents(guides, soft_assert):
"""Test contents of each document."""
pytest.sel.force_navigate("about")
precomp_noguide = re.compile("(.*) Guide")
cur_ver = version.current_version()
for link in guides:
locator = getattr(about, link)
url = pytest.sel.get_attribute(locator, "href")
data = requests.get(url, verify=False)
pdf_titlepage_text_low = pdf_get_text(StringIO(data.content), [0]).lower()
# don't include the word 'guide'
title_text_low = precomp_noguide.search(pytest.sel.text(locator)).group(1).lower()
expected = [title_text_low]
if cur_ver == version.LATEST:
expected.append('manageiq')
else:
expected.append('cloudforms')
maj_min = '{}.{}'.format(cur_ver.version[0], cur_ver.version[1])
expected.append(version.get_product_version(maj_min))
for exp_str in expected:
soft_assert(exp_str in pdf_titlepage_text_low,
"{} not in {}".format(exp_str, pdf_titlepage_text_low))
@pytest.mark.tier(3)
@pytest.mark.sauce
@pytest.mark.meta(blockers=[1232434, 1272618])
def test_info(guides, soft_assert):
pytest.sel.force_navigate("about")
for link in guides:
l_a = getattr(about, link)
# l_icon also implicitly checks for the icon url == text url
l_icon = lambda: pytest.sel.element(
"../a[contains(@href, '{}')]/img".format(
pytest.sel.get_attribute(l_a, "href").rsplit("/", 1)[-1]
),
root=l_a
)
l_icon_a = lambda: pytest.sel.element("..", root=l_icon)
soft_assert(
pytest.sel.get_attribute(l_icon, "alt") == pytest.sel.get_attribute(l_icon_a, "title"),
"Icon alt attr should match icon title attr ({})".format(pytest.sel.text(l_a))
)
soft_assert(
pytest.sel.get_attribute(l_icon_a, "href") == pytest.sel.get_attribute(l_a, "href"),
"Icon url should match text url ({})".format(pytest.sel.text(l_a))
)
@pytest.mark.tier(2)
@pytest.mark.ignore_stream("upstream")
@pytest.mark.meta(blockers=[1272618])
def test_all_docs_present(guides, docs_info):
pytest.sel.force_navigate("about")
docs_list = list(docs_info)
for link in guides:
for doc in docs_list:
if doc.lower() in pytest.sel.text(getattr(about, link)).lower():
break
else:
continue
docs_list.remove(doc)
assert len(docs_list) == 0, "All documents should be available ({} are missing)".format(
", ".join(docs_list)
)
|
lehinevych/cfme_tests
|
cfme/tests/configure/test_docs.py
|
Python
|
gpl-2.0
| 5,548 | 0.001802 |
import pyak
import yikbot
import time
# Latitude and Longitude of location where bot should be localized
yLocation = pyak.Location("42.270340", "-83.742224")
yb = yikbot.YikBot("yikBot", yLocation)
print "DEBUG: Registered yikBot with handle %s and id %s" % (yb.handle, yb.id)
print "DEBUG: Going to sleep, new yakkers must wait ~90 seconds before they can act"
time.sleep(90)
print "DEBUG: yikBot instance 90 seconds after initialization"
print vars(yb)
yb.boot()
|
congrieb/yikBot
|
start.py
|
Python
|
mit
| 469 | 0.004264 |
import unittest
import warnings
import datetime
from django.core.urlresolvers import reverse
from django.test import TestCase
from incuna.utils import find
from articles.models import Article
class ArticleAccessTests(TestCase):
fixtures = ['articles_data.json',]
def test_article_index(self):
response = self.client.get(reverse('article_index'))
for article in Article.objects.active():
self.assertContains(response, article.title)
def test_article_detail(self):
response = self.client.get(reverse('article_detail', args=['test-article',]))
article = Article.objects.active().get(slug='test-article')
self.assertContains(response, article.title)
class ArticleActiveTests(TestCase):
fixtures = ['articles_data.json',]
def test_article_active(self):
response = self.client.get(reverse('article_index'))
inactive_articles = Article.objects.exclude(pk__in=[a[0] for a in Article.objects.active().values_list('pk')])
assert(inactive_articles)
for article in inactive_articles:
self.assertNotContains(response, article.title)
def test_article_views_404(self):
response = self.client.get(reverse('article_detail', args=['inactive-article',]))
self.assertEquals(response.status_code, 404)
# extension related tests
class ArticleDatePublisherTests(TestCase):
fixtures = ['articles_datepublisher_data.json',]
def setUp(self, *args, **kwargs):
if bool(find(lambda f: f.name == 'publication_date', Article._meta.local_fields)) \
and bool(find(lambda f: f.name == 'publication_end_date', Article._meta.local_fields)):
self.skip = False
else:
warnings.warn("Skipping datepublisher tests. Extension not registered")
self.skip = True
def test_publication_date(self):
if self.skip:
return
article = Article.objects.active().get(slug='publication-date-test')
article.publication_date = datetime.datetime.now() + datetime.timedelta(1)
article.save()
response = self.client.get(reverse('article_detail', args=['publication-date-test',]))
self.assertEquals(response.status_code, 404)
article.publication_date = datetime.datetime.now() + datetime.timedelta(-1)
article.publication_end_date = datetime.datetime.now() + datetime.timedelta(-1)
article.save()
response = self.client.get(reverse('article_detail', args=['publication-date-test',]))
self.assertEquals(response.status_code, 404)
class ArticleTagsTests(TestCase):
fixtures = ['articles_tags_data.json',]
def setUp(self, *args, **kwargs):
if bool(find(lambda f: f.name == 'tags', Article._meta.many_to_many)):
self.skip = False
else:
warnings.warn("Skipping tags tests. Extension not registered")
self.skip = True
def test_tags(self):
if self.skip:
return
article = Article.objects.active().get(slug='tag-test')
article.tags.add("test", "testing")
response = self.client.get(reverse('article_tagged_list', args=['test',]))
self.assertContains(response, article.title)
def test_tags_404(self):
response = self.client.get(reverse('article_tagged_list', args=['tag_does_not_exist',]))
self.assertEquals(response.status_code, 404)
|
viswimmer1/PythonGenerator
|
data/python_files/29179833/tests.py
|
Python
|
gpl-2.0
| 3,436 | 0.009604 |
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
#
r"""
mx is a command line tool for managing the development of Java code organized as suites of projects.
"""
from __future__ import print_function
import sys
if sys.version_info < (2, 7):
major, minor, micro, _, _ = sys.version_info
raise SystemExit('mx requires python 2.7+, not {0}.{1}.{2}'.format(major, minor, micro))
from abc import ABCMeta, abstractmethod, abstractproperty
if __name__ == '__main__':
# Rename this module as 'mx' so it is not re-executed when imported by other modules.
sys.modules['mx'] = sys.modules.pop('__main__')
try:
import defusedxml #pylint: disable=unused-import
from defusedxml.ElementTree import parse as etreeParse
except ImportError:
from xml.etree.ElementTree import parse as etreeParse
import os, errno, time, subprocess, shlex, zipfile, signal, tempfile, platform
import textwrap
import socket
import tarfile, gzip
import hashlib
import itertools
from functools import cmp_to_key
# TODO use defusedexpat?
import xml.parsers.expat, xml.sax.saxutils, xml.dom.minidom
from xml.dom.minidom import parseString as minidomParseString
import shutil, re
import pipes
import difflib
import glob
import filecmp
import json
from collections import OrderedDict, namedtuple, deque
from datetime import datetime, timedelta
from threading import Thread
from argparse import ArgumentParser, PARSER, REMAINDER, Namespace, HelpFormatter, ArgumentTypeError, RawTextHelpFormatter, FileType
from os.path import join, basename, dirname, exists, lexists, isabs, expandvars, isdir, islink, normpath, realpath, relpath, splitext
from tempfile import mkdtemp, mkstemp
from io import BytesIO
import fnmatch
import operator
import calendar
import random
from stat import S_IWRITE
from mx_commands import MxCommands, MxCommand
from copy import copy, deepcopy
import posixpath
_mx_commands = MxCommands("mx")
# Temporary imports and (re)definitions while porting mx from Python 2 to Python 3
if sys.version_info[0] < 3:
filter = itertools.ifilter # pylint: disable=redefined-builtin,invalid-name
def input(prompt=None): # pylint: disable=redefined-builtin
return raw_input(prompt) # pylint: disable=undefined-variable
import __builtin__ as builtins
import urllib2 # pylint: disable=unused-import
_urllib_request = urllib2
_urllib_error = urllib2
del urllib2
import urlparse as _urllib_parse
def _decode(x):
return x
def _encode(x):
return x
_unicode = unicode # pylint: disable=undefined-variable
import multiprocessing
else:
import builtins # pylint: disable=unused-import,no-name-in-module
import urllib.request as _urllib_request # pylint: disable=unused-import,no-name-in-module
import urllib.error as _urllib_error # pylint: disable=unused-import,no-name-in-module
import urllib.parse as _urllib_parse # pylint: disable=unused-import,no-name-in-module
def _decode(x):
return x.decode()
def _encode(x):
return x.encode()
_unicode = str
import multiprocessing.dummy as multiprocessing
class _DummyProcess(multiprocessing.DummyProcess):
def run(self):
try:
super(_DummyProcess, self).run()
except:
self._exitcode = 1
raise
@property
def exitcode(self):
return getattr(self, '_exitcode', super(_DummyProcess, self).exitcode)
multiprocessing.Process = _DummyProcess
### ~~~~~~~~~~~~~ _private
def _hashFromUrl(url):
logvv('Retrieving SHA1 from {}'.format(url))
hashFile = _urllib_request.urlopen(url)
try:
return hashFile.read()
except _urllib_error.URLError as e:
_suggest_http_proxy_error(e)
abort('Error while retrieving sha1 {}: {}'.format(url, str(e)))
finally:
if hashFile:
hashFile.close()
def _merge_file_contents(input_files, output_file):
for file_name in input_files:
with open(file_name, 'r') as input_file:
shutil.copyfileobj(input_file, output_file)
output_file.flush()
def _make_absolute(path, prefix):
"""
If 'path' is not absolute prefix it with 'prefix'
"""
return join(prefix, path)
def _cache_dir():
return _cygpathW2U(get_env('MX_CACHE_DIR', join(dot_mx_dir(), 'cache')))
def _global_env_file():
return _cygpathW2U(get_env('MX_GLOBAL_ENV', join(dot_mx_dir(), 'env')))
def _get_path_in_cache(name, sha1, urls, ext=None, sources=False, oldPath=False):
"""
Gets the path an artifact has (or would have) in the download cache.
"""
assert sha1 != 'NOCHECK', 'artifact for ' + name + ' cannot be cached since its sha1 is NOCHECK'
if ext is None:
for url in urls:
# Use extension of first URL whose path component ends with a non-empty extension
o = _urllib_parse.urlparse(url)
if o.path == "/remotecontent" and o.query.startswith("filepath"):
path = o.query
else:
path = o.path
ext = get_file_extension(path)
if ext:
ext = '.' + ext
break
if not ext:
abort('Could not determine a file extension from URL(s):\n ' + '\n '.join(urls))
assert os.sep not in name, name + ' cannot contain ' + os.sep
assert os.pathsep not in name, name + ' cannot contain ' + os.pathsep
if oldPath:
return join(_cache_dir(), name + ('.sources' if sources else '') + '_' + sha1 + ext) # mx < 5.176.0
filename = _map_to_maven_dist_name(name) + ('.sources' if sources else '') + ext
return join(_cache_dir(), name + '_' + sha1 + ('.dir' if not ext else ''), filename)
def _urlopen(*args, **kwargs):
timeout_attempts = [0]
timeout_retries = kwargs.pop('timeout_retries', 3)
def on_timeout():
if timeout_attempts[0] <= timeout_retries:
timeout_attempts[0] += 1
kwargs['timeout'] = kwargs.get('timeout', 5) * 2
warn("urlopen() timed out! Retrying with timeout of {}s.".format(kwargs['timeout']))
return True
return False
error500_attempts = 0
error500_limit = 5
while True:
try:
return _urllib_request.urlopen(*args, **kwargs)
except (_urllib_error.HTTPError) as e:
if e.code == 500:
if error500_attempts < error500_limit:
error500_attempts += 1
url = '?' if len(args) == 0 else args[0]
warn("Retrying after error reading from " + url + ": " + str(e))
time.sleep(0.2)
continue
raise
except _urllib_error.URLError as e:
if isinstance(e.reason, socket.error):
if e.reason.errno == errno.EINTR and 'timeout' in kwargs and is_interactive():
warn("urlopen() failed with EINTR. Retrying without timeout.")
del kwargs['timeout']
return _urllib_request.urlopen(*args, **kwargs)
if e.reason.errno == errno.EINPROGRESS:
if on_timeout():
continue
if isinstance(e.reason, socket.timeout):
if on_timeout():
continue
raise
except socket.timeout:
if on_timeout():
continue
raise
abort("should not reach here")
def _check_file_with_sha1(path, urls, sha1, sha1path, mustExist=True, newFile=False, logErrors=False):
"""
Checks if a file exists and is up to date according to the sha1.
Returns False if the file is not there or does not have the right checksum.
"""
sha1Check = sha1 and sha1 != 'NOCHECK'
def _sha1CachedValid():
if not exists(sha1path):
return False
if TimeStampFile(path, followSymlinks=True).isNewerThan(sha1path):
return False
return True
def _sha1Cached():
with open(sha1path, 'r') as f:
return f.read()[0:40]
def _writeSha1Cached(value=None):
with SafeFileCreation(sha1path) as sfc, open(sfc.tmpPath, 'w') as f:
f.write(value or sha1OfFile(path))
if exists(path):
if sha1Check and sha1:
if not _sha1CachedValid() or (newFile and sha1 != _sha1Cached()):
logv('Create/update SHA1 cache file ' + sha1path)
_writeSha1Cached()
if sha1 != _sha1Cached():
computedSha1 = sha1OfFile(path)
if sha1 == computedSha1:
warn('Fixing corrupt SHA1 cache file ' + sha1path)
_writeSha1Cached(computedSha1)
return True
if logErrors:
size = os.path.getsize(path)
log_error('SHA1 of {} [size: {}] ({}) does not match expected value ({})'.format(TimeStampFile(path), size, computedSha1, sha1))
return False
elif mustExist:
if logErrors:
log_error("'{}' does not exist".format(path))
return False
return True
def _needsUpdate(newestInput, path):
"""
Determines if the file denoted by `path` does not exist or `newestInput` is not None
and `path`'s latest modification time is older than the `newestInput` TimeStampFile.
Returns a string describing why `path` needs updating or None if it does not need updating.
"""
if not exists(path):
return path + ' does not exist'
if newestInput:
ts = TimeStampFile(path, followSymlinks=False)
if ts.isOlderThan(newestInput):
return '{} is older than {}'.format(ts, newestInput)
return None
def _function_code(f):
if hasattr(f, 'func_code'):
# Python 2
return f.func_code
# Python 3
return f.__code__
def _check_output_str(*args, **kwargs):
try:
return _decode(subprocess.check_output(*args, **kwargs))
except subprocess.CalledProcessError as e:
if e.output:
e.output = _decode(e.output)
if hasattr(e, 'stderr') and e.stderr:
e.stderr = _decode(e.stderr)
raise e
def _with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# Copyright (c) 2010-2018 Benjamin Peterson
# Taken from https://github.com/benjaminp/six/blob/8da94b8a153ceb0d6417d76729ba75e80eaa75c1/six.py#L820
# MIT license
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class MetaClass(type):
def __new__(mcs, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(mcs, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(MetaClass, '_with_metaclass({}, {})'.format(meta, bases), (), {}) #pylint: disable=unused-variable
def _validate_abolute_url(urlstr, acceptNone=False):
if urlstr is None:
return acceptNone
url = _urllib_parse.urlsplit(urlstr)
return url.scheme and (url.netloc or url.path)
def _safe_path(path):
"""
If not on Windows, this function returns `path`.
Otherwise, it return a potentially transformed path that is safe for file operations.
This works around the MAX_PATH limit on Windows:
https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
"""
if is_windows():
if _opts.verbose and '/' in path:
warn("Forward slash in path on windows: {}".format(path))
import traceback
traceback.print_stack()
path = normpath(path)
MAX_PATH = 260 # pylint: disable=invalid-name
path_len = len(path) + 1 # account for trailing NUL
if isabs(path) and path_len >= MAX_PATH:
if path.startswith('\\\\'):
if path[2:].startswith('?\\'):
# if it already has a \\?\ don't do the prefix
pass
else:
# Only a UNC path has a double slash prefix.
# Replace it with `\\?\UNC\'. For example:
#
# \\Mac\Home\mydir
#
# becomes:
#
# \\?\UNC\Mac\Home\mydir
#
path = '\\\\?\\UNC' + path[1:]
else:
path = '\\\\?\\' + path
path = _unicode(path)
return path
def atomic_file_move_with_fallback(source_path, destination_path):
is_directory = isdir(source_path) and not islink(source_path)
copy_function = copytree if is_directory else shutil.copyfile
remove_function = rmtree if is_directory else os.remove
temp_function = mkdtemp if is_directory else mkstemp
try:
# This can fail if we move across file systems.
os.rename(source_path, destination_path)
except:
destination_temp_path = temp_function(prefix=basename(destination_path), dir=dirname(destination_path))
# We are only interested in a path, not a file itself. For directories, using copytree on an existing directory can fail.
remove_function(destination_temp_path)
# This can get interrupted mid-copy. Since we cannot guarantee the atomicity of copytree,
# we copy to a .tmp folder first and then atomically rename.
copy_function(source_path, destination_temp_path)
os.rename(destination_temp_path, destination_path)
remove_function(source_path)
def _tarfile_chown(tf, tarinfo, targetpath):
if sys.version_info < (3, 5):
tf.chown(tarinfo, targetpath)
else:
tf.chown(tarinfo, targetpath, False) # extra argument in Python 3.5, False gives previous behavior
### ~~~~~~~~~~~~~ command
def command_function(name, fatalIfMissing=True):
"""
Return the function for the (possibly overridden) command named `name`.
If no such command, abort if `fatalIsMissing` is True, else return None
"""
return _mx_commands.command_function(name, fatalIfMissing)
def update_commands(suite, new_commands):
"""
Using the decorator mx_command is preferred over this function.
:param suite: for which the command is added.
:param new_commands: keys are command names, value are lists: [<function>, <usage msg>, <format doc function>]
if any of the format args are instances of callable, then they are called with an 'env' are before being
used in the call to str.format().
"""
suite_name = suite if isinstance(suite, str) else suite.name
_length_of_command = 4
for command_name, command_list in new_commands.items():
assert len(command_list) > 0 and command_list[0] is not None
args = [suite_name, command_name] + command_list[1:_length_of_command]
command_decorator = command(*args)
# apply the decorator so all functions are tracked
command_list[0] = command_decorator(command_list[0])
def command(suite_name, command_name, usage_msg='', doc_function=None, props=None, auto_add=True):
"""
Decorator for making a function an mx shell command.
The annotated function should have a single parameter typed List[String].
:param suite_name: suite to which the command belongs to.
:param command_name: the command name. Will be used in the shell command.
:param usage_msg: message to display usage.
:param doc_function: function to render the documentation for this feature.
:param props: a dictionary of properties attributed to this command.
:param auto_add: automatically it to the commands.
:return: the decorator factory for the function.
"""
def mx_command_decorator_factory(command_func):
mx_command = MxCommand(_mx_commands, command_func, suite_name, command_name, usage_msg, doc_function, props)
if auto_add:
_mx_commands.add_commands([mx_command])
return mx_command
return mx_command_decorator_factory
### ~~~~~~~~~~~~~ Language support
# Support for comparing objects given removal of `cmp` function in Python 3.
# https://portingguide.readthedocs.io/en/latest/comparisons.html
def compare(a, b):
return (a > b) - (a < b)
class Comparable(object):
def _checked_cmp(self, other, f):
compar = self.__cmp__(other) #pylint: disable=assignment-from-no-return
return f(compar, 0) if compar is not NotImplemented else compare(id(self), id(other))
def __lt__(self, other):
return self._checked_cmp(other, lambda a, b: a < b)
def __gt__(self, other):
return self._checked_cmp(other, lambda a, b: a > b)
def __eq__(self, other):
return self._checked_cmp(other, lambda a, b: a == b)
def __le__(self, other):
return self._checked_cmp(other, lambda a, b: a <= b)
def __ge__(self, other):
return self._checked_cmp(other, lambda a, b: a >= b)
def __ne__(self, other):
return self._checked_cmp(other, lambda a, b: a != b)
def __cmp__(self, other): # to override
raise TypeError("No override for compare")
from mx_javacompliance import JavaCompliance
class DynamicVar(object):
def __init__(self, initial_value):
self.value = initial_value
def get(self):
return self.value
def set_scoped(self, newvalue):
return DynamicVarScope(self, newvalue)
class DynamicVarScope(object):
def __init__(self, dynvar, newvalue):
self.dynvar = dynvar
self.newvalue = newvalue
def __enter__(self):
assert not hasattr(self, "oldvalue")
self.oldvalue = self.dynvar.value
self.dynvar.value = self.newvalue
def __exit__(self, tpe, value, traceback):
self.dynvar.value = self.oldvalue
self.oldvalue = None
self.newvalue = None
class ArgParser(ArgumentParser):
# Override parent to append the list of available commands
def format_help(self):
return ArgumentParser.format_help(self) + """
environment variables:
JAVA_HOME Default value for primary JDK directory. Can be overridden with --java-home option.
EXTRA_JAVA_HOMES Secondary JDK directories. Can be overridden with --extra-java-homes option.
MX_BUILD_EXPLODED Create and use jar distributions as extracted directories.
MX_ALT_OUTPUT_ROOT Alternate directory for generated content. Instead of <suite>/mxbuild, generated
content will be placed under $MX_ALT_OUTPUT_ROOT/<suite>. A suite can override
this with the suite level "outputRoot" attribute in suite.py.
MX_EXEC_LOG Specifies default value for --exec-log option.
MX_CACHE_DIR Override the default location of the mx download cache. Defaults to `~/.mx/cache`.
MX_GLOBAL_ENV Override the default location of the global env file that is always loaded at startup.
Defaults to `~/.mx/env`. Can be disabled by setting it to an empty string.
MX_GIT_CACHE Use a cache for git objects during clones.
* Setting it to `reference` will clone repositories using the cache and let them
reference the cache (if the cache gets deleted these repositories will be
incomplete).
* Setting it to `dissociated` will clone using the cache but then dissociate the
repository from the cache.
* Setting it to `refcache` will synchronize with server only if a branch is
requested or if a specific revision is requested which does not exist in the
local cache. Hence, remote references will be synchronized occasionally. This
allows cloning without even contacting the git server.
The cache is located at `~/.mx/git-cache`.
""" + _format_commands()
def __init__(self, parents=None):
self.parsed = False
if not parents:
parents = []
ArgumentParser.__init__(self, prog='mx', parents=parents, add_help=len(parents) != 0, formatter_class=lambda prog: HelpFormatter(prog, max_help_position=32, width=120))
if len(parents) != 0:
# Arguments are inherited from the parents
return
self.add_argument('-v', action='store_true', dest='verbose', help='enable verbose output')
self.add_argument('-V', action='store_true', dest='very_verbose', help='enable very verbose output')
self.add_argument('--no-warning', action='store_false', dest='warn', help='disable warning messages')
self.add_argument('--quiet', action='store_true', help='disable log messages')
self.add_argument('-y', action='store_const', const='y', dest='answer', help='answer \'y\' to all questions asked')
self.add_argument('-n', action='store_const', const='n', dest='answer', help='answer \'n\' to all questions asked')
self.add_argument('-p', '--primary-suite-path', help='set the primary suite directory', metavar='<path>')
self.add_argument('--dbg', dest='java_dbg_port', help='make Java processes wait on [<host>:]<port> for a debugger', metavar='<address>') # metavar=[<host>:]<port> https://bugs.python.org/issue11874
self.add_argument('-d', action='store_const', const=8000, dest='java_dbg_port', help='alias for "-dbg 8000"')
self.add_argument('--attach', dest='attach', help='Connect to existing server running at [<host>:]<port>', metavar='<address>') # metavar=[<host>:]<port> https://bugs.python.org/issue11874
self.add_argument('--backup-modified', action='store_true', help='backup generated files if they pre-existed and are modified')
self.add_argument('--exec-log', help='A file to which the environment and command line for each subprocess executed by mx is appended', metavar='<path>', default=get_env("MX_EXEC_LOG"))
self.add_argument('--cp-pfx', dest='cp_prefix', help='class path prefix', metavar='<arg>')
self.add_argument('--cp-sfx', dest='cp_suffix', help='class path suffix', metavar='<arg>')
jargs = self.add_mutually_exclusive_group()
jargs.add_argument('-J', dest='java_args', help='Java VM arguments (e.g. "-J-dsa")', metavar='<arg>')
jargs.add_argument('--J', dest='java_args_legacy', help='Java VM arguments (e.g. "--J @-dsa")', metavar='@<args>')
jpargs = self.add_mutually_exclusive_group()
jpargs.add_argument('-P', action='append', dest='java_args_pfx', help='prefix Java VM arguments (e.g. "-P-dsa")', metavar='<arg>', default=[])
jpargs.add_argument('--Jp', action='append', dest='java_args_pfx_legacy', help='prefix Java VM arguments (e.g. --Jp @-dsa)', metavar='@<args>', default=[])
jaargs = self.add_mutually_exclusive_group()
jaargs.add_argument('-A', action='append', dest='java_args_sfx', help='suffix Java VM arguments (e.g. "-A-dsa")', metavar='<arg>', default=[])
jaargs.add_argument('--Ja', action='append', dest='java_args_sfx_legacy', help='suffix Java VM arguments (e.g. --Ja @-dsa)', metavar='@<args>', default=[])
self.add_argument('--user-home', help='users home directory', metavar='<path>', default=os.path.expanduser('~'))
self.add_argument('--java-home', help='primary JDK directory (must be JDK 7 or later)', metavar='<path>')
self.add_argument('--jacoco', help='instruments selected classes using JaCoCo', default='off', choices=['off', 'on', 'append'])
self.add_argument('--jacoco-whitelist-package', help='only include classes in the specified package', metavar='<package>', action='append', default=[])
self.add_argument('--jacoco-exclude-annotation', help='exclude classes with annotation from JaCoCo instrumentation', metavar='<annotation>', action='append', default=[])
self.add_argument('--jacoco-dest-file', help='path of the JaCoCo dest file, which contains the execution data', metavar='<path>', action='store', default='jacoco.exec')
self.add_argument('--extra-java-homes', help='secondary JDK directories separated by "' + os.pathsep + '"', metavar='<path>')
self.add_argument('--strict-compliance', action='store_true', dest='strict_compliance', help='use JDK matching a project\'s Java compliance when compiling (legacy - this is the only supported mode)', default=True)
self.add_argument('--ignore-project', action='append', dest='ignored_projects', help='name of project to ignore', metavar='<name>', default=[])
self.add_argument('--kill-with-sigquit', action='store_true', dest='killwithsigquit', help='send sigquit first before killing child processes')
self.add_argument('--suite', action='append', dest='specific_suites', help='limit command to the given suite', metavar='<name>', default=[])
self.add_argument('--suitemodel', help='mechanism for locating imported suites', metavar='<arg>')
self.add_argument('--primary', action='store_true', help='limit command to primary suite')
self.add_argument('--dynamicimports', action='append', dest='dynamic_imports', help='dynamically import suite <name>', metavar='<name>', default=[])
self.add_argument('--no-download-progress', action='store_true', help='disable download progress meter')
self.add_argument('--version', action='store_true', help='print version and exit')
self.add_argument('--mx-tests', action='store_true', help='load mxtests suite (mx debugging)')
self.add_argument('--jdk', action='store', help='JDK to use for the "java" command', metavar='<tag:compliance>')
self.add_argument('--jmods-dir', action='store', help='path to built jmods (default JAVA_HOME/jmods)', metavar='<path>')
self.add_argument('--version-conflict-resolution', dest='version_conflict_resolution', action='store', help='resolution mechanism used when a suite is imported with different versions', default='suite', choices=['suite', 'none', 'latest', 'latest_all', 'ignore'])
self.add_argument('-c', '--max-cpus', action='store', type=int, dest='cpu_count', help='the maximum number of cpus to use during build', metavar='<cpus>', default=None)
self.add_argument('--proguard-cp', action='store', help='class path containing ProGuard jars to be used instead of default versions')
self.add_argument('--strip-jars', action='store_true', help='produce and use stripped jars in all mx commands.')
self.add_argument('--env', dest='additional_env', help='load an additional env file in the mx dir of the primary suite', metavar='<name>')
self.add_argument('--trust-http', action='store_true', help='Suppress warning about downloading from non-https sources')
self.add_argument('--multiarch', action='store_true', help='enable all architectures of native multiarch projects (not just the host architecture)')
self.add_argument('--dump-task-stats', help='Dump CSV formatted start/end timestamps for each build task. If set to \'-\' it will print it to stdout, otherwise the CSV will be written to <path>', metavar='<path>', default=None)
self.add_argument('--compdb', action='store', metavar='<file>', help="generate a JSON compilation database for native "
"projects and store it in the given <file>. If <file> is 'default', the compilation database will "
"be stored in the parent directory of the repository containing the primary suite. This option "
"can also be configured using the MX_COMPDB environment variable. Use --compdb none to disable.")
self.add_argument('--arch', action='store', dest='arch', help='force use of the specified architecture')
if not is_windows():
# Time outs are (currently) implemented with Unix specific functionality
self.add_argument('--timeout', help='timeout (in seconds) for command', type=int, default=0, metavar='<secs>')
self.add_argument('--ptimeout', help='timeout (in seconds) for subprocesses', type=int, default=0, metavar='<secs>')
def _parse_cmd_line(self, opts, firstParse):
if firstParse:
parser = ArgParser(parents=[self])
parser.add_argument('initialCommandAndArgs', nargs=REMAINDER, metavar='command args...')
# Legacy support - these options are recognized during first parse and
# appended to the unknown options to be reparsed in the second parse
parser.add_argument('--vm', action='store', dest='vm', help='the VM type to build/run')
parser.add_argument('--vmbuild', action='store', dest='vmbuild', help='the VM build to build/run')
# Parse the known mx global options and preserve the unknown args, command and
# command args for the second parse.
_, self.unknown = parser.parse_known_args(namespace=opts)
for deferrable in _opts_parsed_deferrables:
deferrable()
if opts.version:
print('mx version ' + str(version))
sys.exit(0)
if opts.vm: self.unknown += ['--vm=' + opts.vm]
if opts.vmbuild: self.unknown += ['--vmbuild=' + opts.vmbuild]
self.initialCommandAndArgs = opts.__dict__.pop('initialCommandAndArgs')
# For some reason, argparse considers an unknown argument starting with '-'
# and containing a space as a positional argument instead of an optional
# argument. Subsequent arguments starting with '-' are also considered as
# positional. We need to treat all of these as unknown optional arguments.
while len(self.initialCommandAndArgs) > 0:
arg = self.initialCommandAndArgs[0]
if arg.startswith('-'):
self.unknown.append(arg)
del self.initialCommandAndArgs[0]
else:
break
# Give the timeout options a default value to avoid the need for hasattr() tests
opts.__dict__.setdefault('timeout', 0)
opts.__dict__.setdefault('ptimeout', 0)
if opts.java_args_legacy:
opts.java_args = opts.java_args_legacy.lstrip('@')
if opts.java_args_pfx_legacy:
opts.java_args_pfx = [s.lstrip('@') for s in opts.java_args_pfx_legacy]
if opts.java_args_sfx_legacy:
opts.java_args_sfx = [s.lstrip('@') for s in opts.java_args_sfx_legacy]
if opts.very_verbose:
opts.verbose = True
if opts.user_home is None or opts.user_home == '':
abort('Could not find user home. Use --user-home option or ensure HOME environment variable is set.')
if opts.primary and primary_suite():
opts.specific_suites.append(primary_suite().name)
if opts.java_home is not None:
os.environ['JAVA_HOME'] = opts.java_home
if opts.extra_java_homes is not None:
os.environ['EXTRA_JAVA_HOMES'] = opts.extra_java_homes
os.environ['HOME'] = opts.user_home
global _primary_suite_path
_primary_suite_path = opts.primary_suite_path or os.environ.get('MX_PRIMARY_SUITE_PATH')
if _primary_suite_path:
_primary_suite_path = os.path.abspath(_primary_suite_path)
global _suitemodel
_suitemodel = SuiteModel.create_suitemodel(opts)
# Communicate primary suite path to mx subprocesses
if _primary_suite_path:
os.environ['MX_PRIMARY_SUITE_PATH'] = _primary_suite_path
opts.ignored_projects += os.environ.get('IGNORED_PROJECTS', '').split(',')
mx_gate._jacoco = opts.jacoco
mx_gate._jacoco_whitelisted_packages.extend(opts.jacoco_whitelist_package)
mx_gate.add_jacoco_excluded_annotations(opts.jacoco_exclude_annotation)
mx_gate.Task.verbose = opts.verbose
if opts.exec_log:
try:
ensure_dir_exists(dirname(opts.exec_log))
with open(opts.exec_log, 'a'):
pass
except IOError as e:
abort('Error opening {} specified by --exec-log: {}'.format(opts.exec_log, e))
system_arch = platform.uname()[4]
if opts.arch and opts.arch != system_arch:
warn('overriding detected architecture ({}) with {}'.format(system_arch, opts.arch))
else:
parser = ArgParser(parents=[self])
parser.add_argument('commandAndArgs', nargs=REMAINDER, metavar='command args...')
args = self.unknown + self.initialCommandAndArgs
parser.parse_args(args=args, namespace=opts)
commandAndArgs = opts.__dict__.pop('commandAndArgs')
if self.initialCommandAndArgs != commandAndArgs:
abort('Suite specific global options must use name=value format: {0}={1}'.format(self.unknown[-1], self.initialCommandAndArgs[0]))
self.parsed = True
return commandAndArgs
def add_argument(*args, **kwargs):
"""
Defines a single command-line argument.
"""
assert _argParser is not None
_argParser.add_argument(*args, **kwargs)
def remove_doubledash(args):
if '--' in args:
args.remove('--')
def ask_question(question, options, default=None, answer=None):
""""""
assert not default or default in options
questionMark = '? ' + options + ': '
if default:
questionMark = questionMark.replace(default, default.upper())
if answer:
answer = str(answer)
print(question + questionMark + answer)
else:
if is_interactive():
answer = input(question + questionMark) or default
while not answer:
answer = input(question + questionMark)
else:
if default:
answer = default
else:
abort("Can not answer '" + question + "?' if stdin is not a tty")
return answer.lower()
def ask_yes_no(question, default=None):
""""""
return ask_question(question, '[yn]', default, _opts.answer).startswith('y')
def warn(msg, context=None):
if _opts.warn and not _opts.quiet:
if context is not None:
if callable(context):
contextMsg = context()
elif hasattr(context, '__abort_context__'):
contextMsg = context.__abort_context__()
else:
contextMsg = str(context)
msg = contextMsg + ":\n" + msg
print(colorize('WARNING: ' + msg, color='magenta', bright=True, stream=sys.stderr), file=sys.stderr)
class Timer():
"""
A simple timing facility.
Example 1:
with Timer('phase'):
phase()
will emit the following as soon as `phase()` returns:
"phase took 2.45 seconds"
Example 2:
times = []
with Timer('phase1', times):
phase1()
with Timer('phase2', times):
phase2()
will not emit anything but will have leave `times` with something like:
[('phase1', 2.45), ('phase2', 1.75)]
See also _show_timestamp.
"""
def __init__(self, name, times=None):
self.name = name
self.times = times
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, t, value, traceback):
elapsed = time.time() - self.start
if self.times is None:
print('{} took {} seconds'.format(self.name, elapsed))
else:
self.times.append((self.name, elapsed))
_last_timestamp = None
def _show_timestamp(label):
"""
Prints the current date and time followed by `label` followed by the
milliseconds elapsed since the last call to this method, if any.
"""
global _last_timestamp
now = datetime.utcnow()
if _last_timestamp is not None:
duration = (now - _last_timestamp).total_seconds() * 1000
print('{}: {} [{:.02f} ms]'.format(now, label, duration))
else:
print('{}: {}'.format(now, label))
_last_timestamp = now
def glob_match_any(patterns, path):
return any((glob_match(pattern, path) for pattern in patterns))
def glob_match(pattern, path):
"""
Matches a path against a pattern using glob's special rules. In particular, the pattern is checked for each part
of the path and files starting with `.` are not matched unless the pattern also starts with a `.`.
:param str pattern: The pattern to match with glob syntax
:param str path: The path to be checked against the pattern
:return: The part of the path that matches or None if the path does not match
"""
pattern_parts = pattern.replace(os.path.sep, '/').split('/')
path_parts = path.replace(os.path.sep, '/').split('/')
if len(path_parts) < len(pattern_parts):
return None
for pattern_part, path_part in zip(pattern_parts, path_parts):
if len(pattern_part) > 0 and pattern_part[0] != '.' and len(path_part) > 0 and path_part[0] == '.':
return None
if not fnmatch.fnmatch(path_part, pattern_part):
return None
return '/'.join(path_parts[:len(pattern_parts)])
### ~~~~~~~~~~~~~ Suite
# Define this machinery early in case other modules want to use them
# Names of commands that don't need a primary suite.
# This cannot be used outside of mx because of implementation restrictions
currently_loading_suite = DynamicVar(None)
_suite_context_free = ['init', 'version', 'urlrewrite']
def _command_function_names(func):
"""
Generates list of guesses for command name based on its function name
"""
if isinstance(func, MxCommand):
func_name = func.command
else:
func_name = func.__name__
command_names = [func_name]
if func_name.endswith('_cli'):
command_names.append(func_name[0:-len('_cli')])
for command_name in command_names:
if '_' in command_name:
command_names.append(command_name.replace("_", "-"))
return command_names
def suite_context_free(func):
"""
Decorator for commands that don't need a primary suite.
"""
_suite_context_free.extend(_command_function_names(func))
return func
# Names of commands that don't need a primary suite but will use one if it can be found.
# This cannot be used outside of mx because of implementation restrictions
_optional_suite_context = ['help', 'paths']
def optional_suite_context(func):
"""
Decorator for commands that don't need a primary suite but will use one if it can be found.
"""
_optional_suite_context.extend(_command_function_names(func))
return func
# Names of commands that need a primary suite but don't need suites to be loaded.
# This cannot be used outside of mx because of implementation restrictions
_no_suite_loading = []
def no_suite_loading(func):
"""
Decorator for commands that need a primary suite but don't need suites to be loaded.
"""
_no_suite_loading.extend(_command_function_names(func))
return func
# Names of commands that need a primary suite but don't need suites to be discovered.
# This cannot be used outside of mx because of implementation restrictions
_no_suite_discovery = []
def no_suite_discovery(func):
"""
Decorator for commands that need a primary suite but don't need suites to be discovered.
"""
_no_suite_discovery.extend(_command_function_names(func))
return func
class SuiteModel:
"""
Defines how to locate a URL/path for a suite, including imported suites.
Conceptually a SuiteModel is defined a primary suite URL/path,
and a map from suite name to URL/path for imported suites.
Subclasses define a specific implementation.
"""
def __init__(self):
self.primaryDir = None
self.suitenamemap = {}
def find_suite_dir(self, suite_import):
"""locates the URL/path for suite_import or None if not found"""
abort('find_suite_dir not implemented')
def set_primary_dir(self, d):
"""informs that d is the primary suite directory"""
self._primaryDir = d
def importee_dir(self, importer_dir, suite_import, check_alternate=True):
"""
returns the directory path for an import of suite_import.name, given importer_dir.
For a "src" suite model, if check_alternate == True and if suite_import specifies an alternate URL,
check whether path exists and if not, return the alternate.
"""
abort('importee_dir not implemented')
def nestedsuites_dirname(self):
"""Returns the dirname that contains any nested suites if the model supports that"""
return None
def _search_dir(self, searchDir, suite_import):
if suite_import.suite_dir:
sd = _is_suite_dir(suite_import.suite_dir, _mxDirName(suite_import.name))
assert sd
return sd
if not exists(searchDir):
return None
found = []
for dd in os.listdir(searchDir):
if suite_import.in_subdir:
candidate = join(searchDir, dd, suite_import.name)
else:
candidate = join(searchDir, dd)
sd = _is_suite_dir(candidate, _mxDirName(suite_import.name))
if sd is not None:
found.append(sd)
if len(found) == 0:
return None
elif len(found) == 1:
return found[0]
else:
abort("Multiple suites match the import {}:\n{}".format(suite_import.name, "\n".join(found)))
def verify_imports(self, suites, args):
"""Ensure that the imports are consistent."""
def _check_exists(self, suite_import, path, check_alternate=True):
if check_alternate and suite_import.urlinfos is not None and not exists(path):
return suite_import.urlinfos
return path
@staticmethod
def create_suitemodel(opts):
envKey = 'MX__SUITEMODEL'
default = os.environ.get(envKey, 'sibling')
name = getattr(opts, 'suitemodel') or default
# Communicate the suite model to mx subprocesses
os.environ[envKey] = name
if name.startswith('sibling'):
return SiblingSuiteModel(_primary_suite_path, name)
elif name.startswith('nested'):
return NestedImportsSuiteModel(_primary_suite_path, name)
else:
abort('unknown suitemodel type: ' + name)
@staticmethod
def siblings_dir(suite_dir):
if exists(suite_dir):
_, primary_vc_root = VC.get_vc_root(suite_dir, abortOnError=False)
if not primary_vc_root:
suite_parent = dirname(suite_dir)
# Use the heuristic of a 'ci.hocon' or '.mx_vcs_root' file being
# at the root of a repo that contains multiple suites.
hocon = join(suite_parent, 'ci.hocon')
mx_vcs_root = join(suite_parent, '.mx_vcs_root')
if exists(hocon) or exists(mx_vcs_root):
return dirname(suite_parent)
return suite_parent
else:
primary_vc_root = suite_dir
return dirname(primary_vc_root)
@staticmethod
def _checked_to_importee_tuple(checked, suite_import):
""" Converts the result of `_check_exists` to a tuple containing the result of `_check_exists` and
the directory in which the importee can be found.
If the result of checked is the urlinfos list, this path is relative to where the repository would be checked out.
"""
if isinstance(checked, list):
return checked, suite_import.name if suite_import.in_subdir else None
else:
return checked, join(checked, suite_import.name) if suite_import.in_subdir else checked
class SiblingSuiteModel(SuiteModel):
"""All suites are siblings in the same parent directory, recorded as _suiteRootDir"""
def __init__(self, suiteRootDir, option):
SuiteModel.__init__(self)
self._suiteRootDir = suiteRootDir
def find_suite_dir(self, suite_import):
logvv("find_suite_dir(SiblingSuiteModel({}), {})".format(self._suiteRootDir, suite_import))
return self._search_dir(self._suiteRootDir, suite_import)
def set_primary_dir(self, d):
logvv("set_primary_dir(SiblingSuiteModel({}), {})".format(self._suiteRootDir, d))
SuiteModel.set_primary_dir(self, d)
self._suiteRootDir = SuiteModel.siblings_dir(d)
logvv("self._suiteRootDir = {}".format(self._suiteRootDir))
def importee_dir(self, importer_dir, suite_import, check_alternate=True):
suitename = suite_import.name
if suitename in self.suitenamemap:
suitename = self.suitenamemap[suitename]
# Try use the URL first so that a big repo is cloned to a local
# directory whose named is based on the repo instead of a suite
# nested in the big repo.
base = None
for urlinfo in suite_import.urlinfos:
if urlinfo.abs_kind() == 'source':
# 'https://github.com/graalvm/graal.git' -> 'graal'
base, _ = os.path.splitext(basename(_urllib_parse.urlparse(urlinfo.url).path))
if base: break
if base:
path = join(SiblingSuiteModel.siblings_dir(importer_dir), base)
else:
path = join(SiblingSuiteModel.siblings_dir(importer_dir), suitename)
checked = self._check_exists(suite_import, path, check_alternate)
return SuiteModel._checked_to_importee_tuple(checked, suite_import)
def verify_imports(self, suites, args):
if not args:
args = []
results = []
# Ensure that all suites in the same repo import the same version of other suites
dirs = {s.vc_dir for s in suites if s.dir != s.vc_dir}
for vc_dir in dirs:
imports = {}
for suite_dir in [_is_suite_dir(join(vc_dir, x)) for x in os.listdir(vc_dir) if _is_suite_dir(join(vc_dir, x))]:
suite = SourceSuite(suite_dir, load=False, primary=True)
for suite_import in suite.suite_imports:
current_import = imports.get(suite_import.name)
if not current_import:
imports[suite_import.name] = (suite, suite_import.version)
else:
importing_suite, version = current_import
if suite_import.version != version:
results.append((suite_import.name, importing_suite.dir, suite.dir))
# Parallel suite imports may mean that multiple suites import the
# same subsuite and if scheckimports isn't run in the right suite
# then it creates a mismatch.
if len(results) != 0:
mismatches = []
for name, suite1, suite2 in results:
log_error('\'%s\' and \'%s\' import different versions of the suite \'%s\'' % (suite1, suite2, name))
for s in suites:
if s.dir == suite1:
mismatches.append(suite2)
elif s.dir == suite2:
mismatches.append(suite1)
log_error('Please adjust the other imports using this command')
for mismatch in mismatches:
log_error('mx -p %s scheckimports %s' % (mismatch, ' '.join(args)))
abort('Aborting for import mismatch')
return results
class NestedImportsSuiteModel(SuiteModel):
"""Imported suites are all siblings in an 'mx.imports/source' directory of the primary suite"""
@staticmethod
def _imported_suites_dirname():
return join('mx.imports', 'source')
def __init__(self, primaryDir, option):
SuiteModel.__init__(self)
self._primaryDir = primaryDir
def find_suite_dir(self, suite_import):
return self._search_dir(join(self._primaryDir, NestedImportsSuiteModel._imported_suites_dirname()), suite_import)
def importee_dir(self, importer_dir, suite_import, check_alternate=True):
suitename = suite_import.name
if suitename in self.suitenamemap:
suitename = self.suitenamemap[suitename]
if basename(importer_dir) == basename(self._primaryDir):
# primary is importer
this_imported_suites_dirname = join(importer_dir, NestedImportsSuiteModel._imported_suites_dirname())
ensure_dir_exists(this_imported_suites_dirname)
path = join(this_imported_suites_dirname, suitename)
else:
path = join(SuiteModel.siblings_dir(importer_dir), suitename)
checked = self._check_exists(suite_import, path, check_alternate)
return SuiteModel._checked_to_importee_tuple(checked, suite_import)
def nestedsuites_dirname(self):
return NestedImportsSuiteModel._imported_suites_dirname()
class SuiteImportURLInfo:
"""
Captures the info in the {"url", "kind"} dict,
and adds a 'vc' field.
"""
def __init__(self, url, kind, vc):
self.url = url
self.kind = kind
self.vc = vc
def abs_kind(self):
""" Maps vc kinds to 'source'
"""
return self.kind if self.kind == 'binary' else 'source'
class SuiteImport:
def __init__(self, name, version, urlinfos, kind=None, dynamicImport=False, in_subdir=False, version_from=None, suite_dir=None):
self.name = name
self.urlinfos = [] if urlinfos is None else urlinfos
self.version = self._deprecated_resolve_git_branchref(version)
self.version_from = version_from
self.dynamicImport = dynamicImport
self.kind = kind
self.in_subdir = in_subdir
self.suite_dir = suite_dir
def __str__(self):
return self.name
def _deprecated_resolve_git_branchref(self, version):
prefix = 'git-bref:'
if not version or not version.startswith(prefix):
return version
if primary_suite() and not primary_suite().getMxCompatibility().supportSuiteImportGitBref():
abort("Invalid version: {}. Automatic translation of `git-bref:` is not supported anymore".format(version))
bref_name = version[len(prefix):]
git_urlinfos = [urlinfo for urlinfo in self.urlinfos if urlinfo.vc.kind == 'git']
if len(git_urlinfos) != 1:
abort('Using ' + version + ' requires exactly one git urlinfo')
git_url = git_urlinfos[0].url
return SuiteImport.resolve_git_branchref(git_url, bref_name)
@staticmethod
def resolve_git_branchref(repo_url, bref_name, abortOnError=True):
resolved_version = GitConfig.get_branch_remote(repo_url, bref_name)
if not resolved_version:
if abortOnError:
abort('Resolving ' + bref_name + ' against ' + repo_url + ' failed')
return None
logv('Resolved ' + bref_name + ' against ' + repo_url + ' to ' + resolved_version)
return resolved_version
@staticmethod
def parse_specification(import_dict, context, importer, dynamicImport=False):
name = import_dict.get('name')
if not name:
abort('suite import must have a "name" attribute', context=context)
urls = import_dict.get("urls")
in_subdir = import_dict.get("subdir", False)
version = import_dict.get("version")
suite_dir = None
version_from = import_dict.get("versionFrom")
if version_from and version:
abort("In import for '{}': 'version' and 'versionFrom' can not be both set".format(name), context=context)
if version is None and version_from is None:
if not (in_subdir and (importer.vc_dir != importer.dir or isinstance(importer, BinarySuite))):
abort("In import for '{}': No version given and not a 'subdir' suite of the same repository".format(name), context=context)
if importer.isSourceSuite():
suite_dir = join(importer.vc_dir, name)
version = importer.version()
if urls is None:
if not in_subdir:
if import_dict.get("subdir") is None and importer.vc_dir != importer.dir:
warn("In import for '{}': No urls given but 'subdir' is not set, assuming 'subdir=True'".format(name), context)
in_subdir = True
elif not import_dict.get('noUrl'):
abort("In import for '{}': No urls given and not a 'subdir' suite".format(name), context=context)
return SuiteImport(name, version, None, None, dynamicImport=dynamicImport, in_subdir=in_subdir, version_from=version_from, suite_dir=suite_dir)
# urls a list of alternatives defined as dicts
if not isinstance(urls, list):
abort('suite import urls must be a list', context=context)
urlinfos = []
mainKind = None
for urlinfo in urls:
if isinstance(urlinfo, dict) and urlinfo.get('url') and urlinfo.get('kind'):
kind = urlinfo.get('kind')
if not VC.is_valid_kind(kind):
abort('suite import kind ' + kind + ' illegal', context=context)
else:
abort('suite import url must be a dict with {"url", kind", attributes', context=context)
vc = vc_system(kind)
if kind != 'binary':
assert not mainKind or mainKind == kind, "Only expecting one non-binary kind"
mainKind = kind
url = mx_urlrewrites.rewriteurl(urlinfo.get('url'))
urlinfos.append(SuiteImportURLInfo(url, kind, vc))
vc_kind = None
if mainKind:
vc_kind = mainKind
elif urlinfos:
vc_kind = 'binary'
return SuiteImport(name, version, urlinfos, vc_kind, dynamicImport=dynamicImport, in_subdir=in_subdir, version_from=version_from, suite_dir=suite_dir)
@staticmethod
def get_source_urls(source, kind=None):
"""
Returns a list of SourceImportURLInfo instances
If source is a string (dir) determine kind, else search the list of
urlinfos and return the values for source repos
"""
if isinstance(source, str):
if kind:
vc = vc_system(kind)
else:
assert not source.startswith("http:")
vc = VC.get_vc(source)
return [SuiteImportURLInfo(mx_urlrewrites.rewriteurl(source), 'source', vc)]
elif isinstance(source, list):
result = [s for s in source if s.kind != 'binary']
return result
else:
abort('unexpected type in SuiteImport.get_source_urls')
_suites = dict()
_primary_suite_path = None
_primary_suite = None
_mx_suite = None
# List of functions to run when the primary suite is initialized
_primary_suite_deferrables = []
def _primary_suite_init(s):
global _primary_suite
assert not _primary_suite
_primary_suite = s
_primary_suite.primary = True
os.environ['MX_PRIMARY_SUITE_PATH'] = s.dir
for deferrable in _primary_suite_deferrables:
deferrable()
def primary_suite():
""":rtype: Suite"""
return _primary_suite
class SuiteConstituent(_with_metaclass(ABCMeta, Comparable)):
def __init__(self, suite, name, build_time=1): # pylint: disable=super-init-not-called
"""
:type name: str
:type suite: Suite
:type build_time: Expected build time in minutes (Used to schedule parallel jobs efficient)
"""
self.name = name
self.suite = suite
self.build_time = build_time
# Should this constituent be visible outside its suite
self.internal = False
def origin(self):
"""
Gets a 2-tuple (file, line) describing the source file where this constituent
is defined or None if the location cannot be determined.
"""
suitepy = self.suite.suite_py()
if exists(suitepy):
import tokenize
with open(suitepy) as fp:
candidate = None
for t in tokenize.generate_tokens(fp.readline):
_, tval, (srow, _), _, _ = t
if candidate is None:
if tval in ('"' + self.name + '"', "'" + self.name + "'"):
candidate = srow
else:
if tval == ':':
return (suitepy, srow)
else:
candidate = None
def __abort_context__(self):
"""
Gets a description of where this constituent was defined in terms of source file
and line number. If no such description can be generated, None is returned.
"""
loc = self.origin()
if loc:
path, lineNo = loc
return ' File "{}", line {} in definition of {}'.format(path, lineNo, self.name)
return ' {}'.format(self.name)
def _comparison_key(self):
return self.name, self.suite
def __cmp__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return compare(self._comparison_key(), other._comparison_key())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._comparison_key() == other._comparison_key()
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._comparison_key() != other._comparison_key()
def __hash__(self):
return hash(self._comparison_key())
def __str__(self):
return self.name
def __repr__(self):
return self.name
class License(SuiteConstituent):
def __init__(self, suite, name, fullname, url):
SuiteConstituent.__init__(self, suite, name)
self.fullname = fullname
self.url = url
def _comparison_key(self):
# Licenses are equal across suites
return self.name, self.url, self.fullname
class Dependency(SuiteConstituent):
"""
A dependency is a library, distribution or project specified in a suite.
The name must be unique across all Dependency instances.
"""
def __init__(self, suite, name, theLicense, **kwArgs):
SuiteConstituent.__init__(self, suite, name)
if isinstance(theLicense, str):
theLicense = [theLicense]
self.theLicense = theLicense
self.__dict__.update(kwArgs)
def isBaseLibrary(self):
return isinstance(self, BaseLibrary)
def isLibrary(self):
return isinstance(self, Library)
def isResourceLibrary(self):
return isinstance(self, ResourceLibrary)
def isPackedResourceLibrary(self):
return isinstance(self, PackedResourceLibrary)
def isJreLibrary(self):
return isinstance(self, JreLibrary)
def isJdkLibrary(self):
return isinstance(self, JdkLibrary)
def isProject(self):
return isinstance(self, Project)
def isJavaProject(self):
return isinstance(self, JavaProject)
def isNativeProject(self):
return isinstance(self, AbstractNativeProject)
def isArchivableProject(self):
return isinstance(self, ArchivableProject)
def isMavenProject(self):
return isinstance(self, MavenProject)
def isDistribution(self):
return isinstance(self, Distribution)
def isJARDistribution(self):
return isinstance(self, JARDistribution)
def isLayoutJARDistribution(self):
return isinstance(self, LayoutJARDistribution)
def isClasspathDependency(self):
return isinstance(self, ClasspathDependency)
def isTARDistribution(self):
return isinstance(self, AbstractTARDistribution)
def isZIPDistribution(self):
return isinstance(self, AbstractZIPDistribution)
def isLayoutDistribution(self):
return isinstance(self, LayoutDistribution)
def isProjectOrLibrary(self):
return self.isProject() or self.isLibrary()
def isPlatformDependent(self):
return False
def isJDKDependent(self):
return None
def getGlobalRegistry(self):
if self.isProject():
return _projects
if self.isLibrary():
return _libs
if self.isDistribution():
return _dists
if self.isJreLibrary():
return _jreLibs
assert self.isJdkLibrary()
return _jdkLibs
def getGlobalRemovedRegistry(self):
if self.isProject():
return _removed_projects
if self.isLibrary():
return _removed_libs
if self.isDistribution():
return _removed_dists
if self.isJreLibrary():
return _removed_jreLibs
assert self.isJdkLibrary()
return _removed_jdkLibs
def getSuiteRegistry(self):
if self.isProject():
return self.suite.projects
if self.isLibrary():
return self.suite.libs
if self.isDistribution():
return self.suite.dists
if self.isJreLibrary():
return self.suite.jreLibs
assert self.isJdkLibrary()
return self.suite.jdkLibs
def getSuiteRemovedRegistry(self):
if self.isProject():
return self.suite.removed_projects
if self.isLibrary():
return self.suite.removed_libs
if self.isDistribution():
return self.suite.removed_dists
if self.isJreLibrary():
return self.suite.removed_jreLibs
assert self.isJdkLibrary()
return self.suite.removed_jdkLibs
def get_output_base(self):
return self.suite.get_output_root(platformDependent=self.isPlatformDependent(), jdkDependent=self.isJDKDependent())
def getBuildTask(self, args):
"""
Return a BuildTask that can be used to build this dependency.
:rtype : BuildTask
"""
nyi('getBuildTask', self)
def abort(self, msg):
"""
Aborts with given message prefixed by the origin of this dependency.
"""
abort(msg, context=self)
def warn(self, msg):
"""
Warns with given message prefixed by the origin of this dependency.
"""
warn(msg, context=self)
def qualifiedName(self):
return '{}:{}'.format(self.suite.name, self.name)
def walk_deps(self, preVisit=None, visit=None, visited=None, ignoredEdges=None, visitEdge=None):
"""
Walk the dependency graph rooted at this object.
See documentation for mx.walk_deps for more info.
"""
if visited is not None:
if self in visited:
return
else:
visited = set()
if ignoredEdges is None:
# Default ignored edges
ignoredEdges = [DEP_ANNOTATION_PROCESSOR, DEP_EXCLUDED, DEP_BUILD]
self._walk_deps_helper(visited, None, preVisit, visit, ignoredEdges, visitEdge)
def _walk_deps_helper(self, visited, edge, preVisit=None, visit=None, ignoredEdges=None, visitEdge=None):
_debug_walk_deps_helper(self, edge, ignoredEdges)
assert self not in visited, self
if not preVisit or preVisit(self, edge):
visited.add(self)
self._walk_deps_visit_edges(visited, edge, preVisit, visit, ignoredEdges, visitEdge)
if visit:
visit(self, edge)
def _walk_deps_visit_edges(self, visited, edge, preVisit=None, visit=None, ignoredEdges=None, visitEdge=None):
nyi('_walk_deps_visit_edges', self)
def _walk_deps_visit_edges_helper(self, deps, visited, in_edge, preVisit=None, visit=None, ignoredEdges=None, visitEdge=None):
for dep_type, dep_list in deps:
if not _is_edge_ignored(dep_type, ignoredEdges):
for dst in dep_list:
out_edge = DepEdge(self, dep_type, in_edge)
if visitEdge:
visitEdge(self, dst, out_edge)
if dst not in visited:
dst._walk_deps_helper(visited, out_edge, preVisit, visit, ignoredEdges, visitEdge)
def getArchivableResults(self, use_relpath=True, single=False):
"""
Generates (file_path, archive_path) tuples for all the build results of this dependency.
:param use_relpath: When `False` flattens all the results to the root of the archive
:param single: When `True` expects a single result.
Might throw `ValueError` if that does not make sense for this dependency type.
:rtype: collections.Iterable[(str, str)]
"""
nyi('getArchivableResults', self)
def contains_dep(self, dep, includeAnnotationProcessors=False):
"""
Determines if the dependency graph rooted at this object contains 'dep'.
Returns the path from this object to 'dep' if so, otherwise returns None.
"""
if dep == self:
return [self]
class FoundPath(StopIteration):
def __init__(self, path):
StopIteration.__init__(self)
self.path = path
def visit(path, d, edge):
if dep is d:
raise FoundPath(path)
try:
ignoredEdges = [DEP_EXCLUDED] if includeAnnotationProcessors else None
self.walk_deps(visit=visit, ignoredEdges=ignoredEdges)
except FoundPath as e:
return e.path
return None
"""Only JavaProjects define Java packages"""
def defined_java_packages(self):
return []
def mismatched_imports(self):
return {}
def _extra_artifact_discriminant(self):
"""
An extra string to help identify the current build configuration. It will be used in the generated path for the
built artifacts and will avoid unnecessary rebuilds when frequently changing this build configuration.
:rtype : str
"""
return ''
def _resolveDepsHelper(self, deps, fatalIfMissing=True):
"""
Resolves any string entries in 'deps' to the Dependency objects named
by the strings. The 'deps' list is updated in place.
"""
if deps:
assert all((isinstance(d, (str, Dependency)) for d in deps))
if isinstance(deps[0], str):
assert all((isinstance(d, str) for d in deps))
resolvedDeps = []
for name in deps:
s, _ = splitqualname(name)
if s and s in _jdkProvidedSuites:
logvv('[{}: ignoring dependency {} as it is provided by the JDK]'.format(self, name))
continue
dep = dependency(name, context=self, fatalIfMissing=fatalIfMissing)
if not dep:
continue
if dep.isProject() and self.suite is not dep.suite:
abort('cannot have an inter-suite reference to a project: ' + dep.name, context=self)
if s is None and self.suite is not dep.suite:
current_suite_dep = self.suite.dependency(dep.name, fatalIfMissing=False)
if dep != current_suite_dep:
raise abort('inter-suite reference must use qualified form ' + dep.suite.name + ':' + dep.name, context=self)
dep = current_suite_dep # prefer our version
if self.suite is not dep.suite and dep.internal:
abort('cannot reference internal ' + dep.name + ' from ' + self.suite.name + ' suite', context=self)
selfJC = getattr(self, 'javaCompliance', None)
depJC = getattr(dep, 'javaCompliance', None)
if selfJC and depJC and selfJC.value < depJC.value:
if self.suite.getMxCompatibility().checkDependencyJavaCompliance():
abort('cannot depend on ' + name + ' as it has a higher Java compliance than ' + str(selfJC), context=self)
resolvedDeps.append(dep)
deps[:] = resolvedDeps
assert all((isinstance(d, Dependency) for d in deps))
def get_output_root(self):
"""
Gets the root of the directory hierarchy under which generated artifacts for this
dependency such as class files and annotation generated sources should be placed.
"""
if self.suite._output_root_includes_config():
return join(self.get_output_base(), self.name)
# Legacy code
assert self.isProject(), self
if not self.subDir:
return join(self.get_output_base(), self.name)
names = self.subDir.split(os.sep)
parents = len([n for n in names if n == os.pardir])
if parents != 0:
return os.sep.join([self.get_output_base(), '{}-parent-{}'.format(self.suite, parents)] + names[parents:] + [self.name])
return join(self.get_output_base(), self.subDir, self.name)
class Suite(object):
"""
Command state and methods for all suite subclasses.
:type dists: list[Distribution]
"""
def __init__(self, mxDir, primary, internal, importing_suite, load, vc, vc_dir, dynamicallyImported=False):
if primary is True and vc_dir is None:
abort("The primary suite must be in a vcs repository or under a directory containing a file called '.mx_vcs_root' or 'ci.hocon'")
self.imported_by = [] if primary else [importing_suite]
self.mxDir = mxDir
self.dir = dirname(mxDir)
self.name = _suitename(mxDir)
self.primary = primary
self.internal = internal
self.libs = []
self.jreLibs = []
self.jdkLibs = []
self.suite_imports = []
self.extensions = None
self.requiredMxVersion = None
self.dists = []
self._metadata_initialized = False
self.loading_imports = False
self.post_init = False
self.resolved_dependencies = False
self.distTemplates = []
self.licenseDefs = []
self.repositoryDefs = []
self.javacLintOverrides = []
self.versionConflictResolution = 'none' if importing_suite is None else importing_suite.versionConflictResolution
self.dynamicallyImported = dynamicallyImported
self.scm = None
self._outputRoot = None
self._preloaded_suite_dict = None
self.vc = vc
self.vc_dir = vc_dir
self._preload_suite_dict()
self._init_imports()
self.removed_dists = []
self.removed_libs = []
self.removed_jreLibs = []
self.removed_jdkLibs = []
if load:
self._load()
def __str__(self):
return self.name
def all_dists(self):
return self.dists + self.removed_dists
def all_projects(self):
return self.projects + self.removed_projects
def all_libs(self):
return self.libs + self.removed_libs
def _load(self):
"""
Calls _parse_env and _load_extensions
"""
logvv("Loading suite " + self.name)
self._load_suite_dict()
self._parse_env()
self._load_extensions()
def getMxCompatibility(self):
if not hasattr(self, ".mx_compat"):
setattr(self, '.mx_compat', mx_compat.getMxCompatibility(self.requiredMxVersion))
return getattr(self, '.mx_compat')
def dependency(self, name, fatalIfMissing=True, context=None):
"""
Find a dependency defined by this Suite.
"""
def _find_in_registry(reg):
for lib in reg:
if lib.name == name:
return lib
result = _find_in_registry(self.libs) or \
_find_in_registry(self.jreLibs) or \
_find_in_registry(self.jdkLibs) or \
_find_in_registry(self.dists)
if fatalIfMissing and result is None:
abort("Couldn't find '{}' in '{}'".format(name, self.name), context=context)
return result
def _parse_env(self):
nyi('_parse_env', self)
# Cache of config names keyed by a 2-tuple of booleans representing
# the `platformDependent` and `jdkDependent` parameters to `_make_config` respectively.
_output_root_config = {}
@staticmethod
def _make_config(platformDependent=False, jdkDependent=None):
assert Suite._output_root_config is not None
config_key = (platformDependent, jdkDependent)
config = Suite._output_root_config.get(config_key)
if config is None:
config = []
jdk_releases = []
if jdkDependent is True or (jdkDependent is None and platformDependent is False):
for jdk in _get_all_jdks():
release = str(jdk.javaCompliance.value)
if release not in jdk_releases:
jdk_releases.append(release)
if not jdk_releases:
logv('No JDK releases found while computing JDK dependent output root')
if platformDependent:
config.append(get_os() + '-' + get_arch())
if jdk_releases:
config.append('jdk' + '+'.join(jdk_releases))
config = '-'.join(config)
Suite._output_root_config[config_key] = config # pylint: disable=unsupported-assignment-operation
return config
def _output_root_includes_config(self):
"""
Returns whether mx output for this suite is in a directory whose path includes
the configuration (i.e. os, arch and jdk).
"""
res = getattr(self, '.output_root_includes_config', None)
if res is None:
res = os.getenv('MX_ALT_OUTPUT_ROOT') is None and os.getenv('MX_OUTPUT_ROOT_INCLUDES_CONFIG') != 'false'
setattr(self, '.output_root_includes_config', res)
return res
def get_output_root(self, platformDependent=False, jdkDependent=None):
"""
Gets the directory for artifacts generated by this suite.
The returned value will be:
``<self.dir>/mxbuild/jdk<release>[+<release>]*`` // platformDependent=False, jdkDependent=None|True
``<self.dir>/mxbuild/<os>-<arch>`` // platformDependent=True, jdkDependent=None|False
``<self.dir>/mxbuild/<os>-<arch>-jdk<release>[+<release>]*`` // platformDependent=True, jdkDependent=True
``<self.dir>/mxbuild`` // platformDependent=False, jdkDependent=False
where <release> is the same as for javac (e.g. 6, 8, 15 etc). The
<release> values are based on $JAVA_HOME and $EXTRA_JAVA_HOMES in that order.
:param platformDependent: specifies if `<os>-<arch>` should be part of the directory name
"""
if self._output_root_includes_config():
config = Suite._make_config(platformDependent, jdkDependent)
attr_name = '.output_root_{}'.format(config)
res = getattr(self, attr_name, None)
if res is None:
res = join(self.dir, 'mxbuild', config)
setattr(self, attr_name, res)
return res
if not self._outputRoot:
outputRoot = self._get_early_suite_dict_property('outputRoot')
if outputRoot:
self._outputRoot = realpath(_make_absolute(outputRoot.replace('/', os.sep), self.dir))
elif get_env('MX_ALT_OUTPUT_ROOT') is not None:
self._outputRoot = realpath(_make_absolute(join(get_env('MX_ALT_OUTPUT_ROOT'), self.name), self.dir))
else:
self._outputRoot = self.getMxCompatibility().getSuiteOutputRoot(self)
if platformDependent:
return os.path.join(self._outputRoot, get_os() + '-' + get_arch())
else:
return self._outputRoot
def get_mx_output_dir(self):
"""
Gets the directory into which mx bookkeeping artifacts should be placed.
"""
return join(self.get_output_root(), basename(self.mxDir))
def _preload_suite_dict(self):
dictName = 'suite'
moduleName = 'suite'
modulePath = self.suite_py()
assert modulePath.endswith(moduleName + ".py")
if not exists(modulePath):
abort('{} is missing'.format(modulePath))
savedModule = sys.modules.get(moduleName)
if savedModule:
warn(modulePath + ' conflicts with ' + savedModule.__file__)
# temporarily extend the Python path
sys.path.insert(0, self.mxDir)
snapshot = frozenset(sys.modules.keys())
module = __import__(moduleName)
if savedModule:
# restore the old module into the module name space
sys.modules[moduleName] = savedModule
else:
# remove moduleName from the module name space
sys.modules.pop(moduleName)
# For now fail fast if extra modules were loaded.
# This can later be relaxed to simply remove the extra modules
# from the sys.modules name space if necessary.
extraModules = frozenset(sys.modules.keys()) - snapshot
assert len(extraModules) == 0, 'loading ' + modulePath + ' caused extra modules to be loaded: ' + ', '.join(extraModules)
# revert the Python path
del sys.path[0]
def expand(value, context):
if isinstance(value, dict):
for n, v in value.items():
value[n] = expand(v, context + [n])
elif isinstance(value, list):
for i in range(len(value)):
value[i] = expand(value[i], context + [str(i)])
elif isinstance(value, str):
value = expandvars(value)
if '$' in value or '%' in value:
abort('value of ' + '.'.join(context) + ' contains an undefined environment variable: ' + value)
elif isinstance(value, bool):
pass
else:
abort('value of ' + '.'.join(context) + ' is of unexpected type ' + str(type(value)))
return value
if not hasattr(module, dictName):
abort(modulePath + ' must define a variable named "' + dictName + '"')
self._preloaded_suite_dict = expand(getattr(module, dictName), [dictName])
if self.name == 'mx':
self.requiredMxVersion = version
elif 'mxversion' in self._preloaded_suite_dict:
try:
self.requiredMxVersion = VersionSpec(self._preloaded_suite_dict['mxversion'])
except AssertionError as ae:
abort('Exception while parsing "mxversion" in suite file: ' + str(ae), context=self)
conflictResolution = self._preloaded_suite_dict.get('versionConflictResolution')
if conflictResolution:
self.versionConflictResolution = conflictResolution
_imports = self._preloaded_suite_dict.get('imports', {})
for _suite in _imports.get('suites', []):
context = "suite import '" + _suite.get('name', '<undefined>') + "'"
os_arch = Suite._pop_os_arch(_suite, context)
Suite._merge_os_arch_attrs(_suite, os_arch, context)
(jsonifiable, errorMessage) = self._is_jsonifiable(modulePath)
if not jsonifiable:
msg = "Cannot parse file {}. Please make sure that this file only contains dicts and arrays. {}".format(modulePath, errorMessage)
if self.getMxCompatibility().requireJsonifiableSuite():
abort(msg)
else:
warn(msg)
def _is_jsonifiable(self, suiteFile):
"""Other tools require the suite.py files to be parseable without running a python interpreter.
Therefore suite.py file must consist out of JSON like dict, array, string, integer and boolean
structures. Function calls, string concatenations and other python expressions are not allowed."""
with open(suiteFile, "r") as f:
suiteContents = f.read()
try:
result = re.match(".*?suite\\s*=\\s*(\\{.*)", suiteContents, re.DOTALL)
part = result.group(1)
stack = 0
endIdx = 0
for c in part:
if c == "{":
stack += 1
elif c == "}":
stack -= 1
endIdx += 1
if stack == 0:
break
part = part[:endIdx]
# convert python boolean constants to json boolean constants
part = re.sub("True", "true", part)
part = re.sub("False", "false", part)
# remove python comments
part = re.sub("(.*?)#.*", "\\1", part)
def python_to_json_string(m):
return "\"" + m.group(1).replace("\n", "\\n") + "\""
# remove all spaces between a comma and ']' or '{'
part = re.sub(",\\s*(\\]|\\})", "\\1", part)
# convert python multiline strings to json strings with embedded newlines
part = re.sub("\"\"\"(.*?)\"\"\"", python_to_json_string, part, flags=re.DOTALL)
part = re.sub("'''(.*?)'''", python_to_json_string, part, flags=re.DOTALL)
# convert python single-quoted strings to json double-quoted strings
part = re.sub("'(.*?)'", python_to_json_string, part, flags=re.DOTALL)
json.loads(part)
return (True, None)
except:
return (False, sys.exc_info()[1])
def _register_url_rewrites(self):
urlrewrites = self._get_early_suite_dict_property('urlrewrites')
if urlrewrites:
for urlrewrite in urlrewrites:
def _error(msg):
abort(msg, context=self)
mx_urlrewrites.register_urlrewrite(urlrewrite, onError=_error)
def _load_suite_dict(self):
supported = [
'imports',
'projects',
'libraries',
'jrelibraries',
'jdklibraries',
'distributions',
'name',
'outputRoot',
'mxversion',
'sourceinprojectwhitelist',
'versionConflictResolution',
'developer',
'url',
'licenses',
'licences',
'defaultLicense',
'defaultLicence',
'snippetsPattern',
'repositories',
'javac.lint.overrides',
'urlrewrites',
'scm',
'version',
'externalProjects',
'groupId',
'release',
]
if self._preloaded_suite_dict is None:
self._preload_suite_dict()
d = self._preloaded_suite_dict
if self.requiredMxVersion is None:
self.requiredMxVersion = mx_compat.minVersion()
warn("The {} suite does not express any required mx version. Assuming version {}. Consider adding 'mxversion=<version>' to your suite file ({}).".format(self.name, self.requiredMxVersion, self.suite_py()))
elif self.requiredMxVersion > version:
abort("The {} suite requires mx version {} while your current mx version is {}.\nPlease update mx by running \"{} update\"".format(self.name, self.requiredMxVersion, version, _mx_path))
if not self.getMxCompatibility():
abort("The {} suite requires mx version {} while your version of mx only supports suite versions {} to {}.".format(self.name, self.requiredMxVersion, mx_compat.minVersion(), version))
javacLintOverrides = d.get('javac.lint.overrides', None)
if javacLintOverrides:
self.javacLintOverrides = javacLintOverrides.split(',')
if d.get('snippetsPattern'):
self.snippetsPattern = d.get('snippetsPattern')
unknown = set(d.keys()) - frozenset(supported)
suiteExtensionAttributePrefix = self.name + ':'
suiteSpecific = {n[len(suiteExtensionAttributePrefix):]: d[n] for n in d.keys() if n.startswith(suiteExtensionAttributePrefix) and n != suiteExtensionAttributePrefix}
for n, v in suiteSpecific.items():
if hasattr(self, n):
abort('Cannot override built-in suite attribute "' + n + '"', context=self)
setattr(self, n, v)
unknown.remove(suiteExtensionAttributePrefix + n)
if unknown:
abort(self.suite_py() + ' defines unsupported suite attribute: ' + ', '.join(unknown))
self.suiteDict = d
self._preloaded_suite_dict = None
def _register_metadata(self):
"""
Registers the metadata loaded by _load_metadata into the relevant
global dictionaries such as _projects, _libs, _jreLibs and _dists.
"""
for l in self.libs:
existing = _libs.get(l.name)
# Check that suites that define same library are consistent
if existing is not None and existing != l and _check_global_structures:
abort('inconsistent library redefinition of ' + l.name + ' in ' + existing.suite.dir + ' and ' + l.suite.dir, context=l)
_libs[l.name] = l
for l in self.jreLibs:
existing = _jreLibs.get(l.name)
# Check that suites that define same library are consistent
if existing is not None and existing != l:
abort('inconsistent JRE library redefinition of ' + l.name + ' in ' + existing.suite.dir + ' and ' + l.suite.dir, context=l)
_jreLibs[l.name] = l
for l in self.jdkLibs:
existing = _jdkLibs.get(l.name)
# Check that suites that define same library are consistent
if existing is not None and existing != l:
abort('inconsistent JDK library redefinition of ' + l.name + ' in ' + existing.suite.dir + ' and ' + l.suite.dir, context=l)
_jdkLibs[l.name] = l
for d in self.dists:
self._register_distribution(d)
for d in self.distTemplates:
existing = _distTemplates.get(d.name)
if existing is not None and _check_global_structures:
abort('inconsistent distribution template redefinition of ' + d.name + ' in ' + existing.suite.dir + ' and ' + d.suite.dir, context=d)
_distTemplates[d.name] = d
for l in self.licenseDefs:
existing = _licenses.get(l.name)
if existing is not None and _check_global_structures and l != existing:
abort("inconsistent license redefinition of {} in {} (initialy defined in {})".format(l.name, self.name, existing.suite.name), context=l)
_licenses[l.name] = l
for r in self.repositoryDefs:
existing = _repositories.get(r.name)
if existing is not None and _check_global_structures and r != existing:
abort("inconsistent repository redefinition of {} in {} (initialy defined in {})".format(r.name, self.name, existing.suite.name), context=r)
_repositories[r.name] = r
def _register_distribution(self, d):
existing = _dists.get(d.name)
if existing is not None and _check_global_structures:
warn('distribution ' + d.name + ' redefined', context=d)
_dists[d.name] = d
def _resolve_dependencies(self):
for d in self.libs + self.jdkLibs + self.dists:
d.resolveDeps()
for r in self.repositoryDefs:
r.resolveLicenses()
self.resolved_dependencies = True
def _post_init_finish(self):
if hasattr(self, 'mx_post_parse_cmd_line'):
self.mx_post_parse_cmd_line(_opts)
self.post_init = True
def version(self, abortOnError=True):
abort('version not implemented')
def isDirty(self, abortOnError=True):
abort('isDirty not implemented')
def _load_metadata(self):
suiteDict = self.suiteDict
if suiteDict.get('name') is None:
abort('Missing "suite=<name>" in ' + self.suite_py())
libsMap = self._check_suiteDict('libraries')
jreLibsMap = self._check_suiteDict('jrelibraries')
jdkLibsMap = self._check_suiteDict('jdklibraries')
distsMap = self._check_suiteDict('distributions')
importsMap = self._check_suiteDict('imports')
scmDict = self._check_suiteDict('scm')
self.developer = self._check_suiteDict('developer')
self.url = suiteDict.get('url')
if not _validate_abolute_url(self.url, acceptNone=True):
abort('Invalid url in {}'.format(self.suite_py()))
self.defaultLicense = suiteDict.get(self.getMxCompatibility().defaultLicenseAttribute())
if isinstance(self.defaultLicense, str):
self.defaultLicense = [self.defaultLicense]
if scmDict:
try:
read = scmDict.pop('read')
except NameError:
abort("Missing required 'read' attribute for 'scm'", context=self)
write = scmDict.pop('write', read)
url = scmDict.pop('url', read)
self.scm = SCMMetadata(url, read, write)
for name, attrs in sorted(jreLibsMap.items()):
jar = attrs.pop('jar')
# JRE libraries are optional by default
optional = attrs.pop('optional', 'true') != 'false'
theLicense = attrs.pop(self.getMxCompatibility().licenseAttribute(), None)
l = JreLibrary(self, name, jar, optional, theLicense, **attrs)
self.jreLibs.append(l)
for name, attrs in sorted(jdkLibsMap.items()):
path = attrs.pop('path')
deps = Suite._pop_list(attrs, 'dependencies', context='jdklibrary ' + name)
# JRE libraries are optional by default
theLicense = attrs.pop(self.getMxCompatibility().licenseAttribute(), None)
optional = attrs.pop('optional', False)
if isinstance(optional, str):
optional = optional != 'false'
jdkStandardizedSince = JavaCompliance(attrs.pop('jdkStandardizedSince', '1.2'))
l = JdkLibrary(self, name, path, deps, optional, theLicense, jdkStandardizedSince=jdkStandardizedSince, **attrs)
self.jdkLibs.append(l)
for name, attrs in sorted(importsMap.items()):
if name == 'suites':
pass
elif name == 'libraries':
self._load_libraries(attrs)
else:
abort('illegal import kind: ' + name)
licenseDefs = self._check_suiteDict(self.getMxCompatibility().licensesAttribute())
repositoryDefs = self._check_suiteDict('repositories')
if suiteDict.get('release') not in [None, True, False]:
abort("Invalid 'release' attribute: it should be a boolean", context=self)
self._load_libraries(libsMap)
self._load_distributions(distsMap)
self._load_licenses(licenseDefs)
self._load_repositories(repositoryDefs)
def _check_suiteDict(self, key):
return dict() if self.suiteDict.get(key) is None else self.suiteDict[key]
def imports_dir(self, kind):
return join(join(self.dir, 'mx.imports'), kind)
def binary_imports_dir(self):
return self.imports_dir('binary')
def source_imports_dir(self):
return self.imports_dir('source')
def binary_suite_dir(self, name):
"""
Returns the mxDir for an imported BinarySuite, creating the parent if necessary
"""
dotMxDir = self.binary_imports_dir()
ensure_dir_exists(dotMxDir)
return join(dotMxDir, name)
def _find_binary_suite_dir(self, name):
"""Attempts to locate a binary_suite directory for suite 'name', returns the mx dir or None"""
suite_dir = join(self.binary_imports_dir(), name)
return _is_suite_dir(suite_dir, _mxDirName(name))
def _extensions_name(self):
return 'mx_' + self.name.replace('-', '_')
def _find_extensions(self, name):
extensionsPath = join(self.mxDir, name + '.py')
if exists(extensionsPath):
return name
else:
return None
def _load_extensions(self):
extensionsName = self._find_extensions(self._extensions_name())
if extensionsName is not None:
if extensionsName in sys.modules:
abort(extensionsName + '.py in suite ' + self.name + ' duplicates ' + sys.modules[extensionsName].__file__)
# temporarily extend the Python path
sys.path.insert(0, self.mxDir)
with currently_loading_suite.set_scoped(self):
mod = __import__(extensionsName)
self.extensions = sys.modules[extensionsName]
# revert the Python path
del sys.path[0]
if hasattr(mod, 'mx_post_parse_cmd_line'):
self.mx_post_parse_cmd_line = mod.mx_post_parse_cmd_line
if hasattr(mod, 'mx_register_dynamic_suite_constituents'):
self.mx_register_dynamic_suite_constituents = mod.mx_register_dynamic_suite_constituents # pylint: disable=C0103
"""
Extension point for suites that want to dynamically create projects or distributions.
Such suites should define `mx_register_dynamic_suite_constituents(register_project, register_distribution)` at the
module level. `register_project` and `register_distribution` take 1 argument (the project/distribution object).
"""
if hasattr(mod, 'mx_init'):
mod.mx_init(self)
self.extensions = mod
def _get_early_suite_dict_property(self, name, default=None):
if self._preloaded_suite_dict is not None:
return self._preloaded_suite_dict.get(name, default)
else:
return self.suiteDict.get(name, default)
def _init_imports(self):
importsMap = self._get_early_suite_dict_property('imports', {})
suiteImports = importsMap.get("suites")
if suiteImports:
if not isinstance(suiteImports, list):
abort('suites must be a list-valued attribute')
for entry in suiteImports:
if not isinstance(entry, dict):
abort('suite import entry must be a dict')
import_dict = entry
imported_suite_name = import_dict.get('name', '<unknown>')
if import_dict.get('ignore', False):
log("Ignoring '{}' on your platform ({}/{})".format(imported_suite_name, get_os(), get_arch()))
continue
if import_dict.get('dynamic', False) and imported_suite_name not in (name for name, _ in get_dynamic_imports()):
continue
suite_import = SuiteImport.parse_specification(import_dict, context=self, importer=self, dynamicImport=self.dynamicallyImported)
jdkProvidedSince = import_dict.get('jdkProvidedSince', None)
if jdkProvidedSince and get_jdk(tag=DEFAULT_JDK_TAG).javaCompliance >= jdkProvidedSince:
_jdkProvidedSuites.add(suite_import.name)
else:
self.suite_imports.append(suite_import)
def re_init_imports(self):
"""
If a suite is updated, e.g. by sforceimports, we must re-initialize the potentially
stale import data from the updated suite.py file
"""
self.suite_imports = []
self._preload_suite_dict()
self._init_imports()
def _load_distributions(self, distsMap):
for name, attrs in sorted(distsMap.items()):
if '<' in name:
parameters = re.findall(r'<(.+?)>', name)
self.distTemplates.append(DistributionTemplate(self, name, attrs, parameters))
else:
self._load_distribution(name, attrs)
def _load_distribution(self, name, attrs):
""":rtype : Distribution"""
assert not '>' in name
context = 'distribution ' + name
className = attrs.pop('class', None)
native = attrs.pop('native', False)
theLicense = attrs.pop(self.getMxCompatibility().licenseAttribute(), None)
os_arch = Suite._pop_os_arch(attrs, context)
Suite._merge_os_arch_attrs(attrs, os_arch, context)
exclLibs = Suite._pop_list(attrs, 'exclude', context)
deps = Suite._pop_list(attrs, 'dependencies', context)
pd = attrs.pop('platformDependent', False)
platformDependent = bool(os_arch) or pd
testDistribution = attrs.pop('testDistribution', None)
path = attrs.pop('path', None)
layout = attrs.pop('layout', None)
def create_layout(default_type):
layout_type = attrs.pop('type', default_type)
if layout_type == 'tar':
layout_class = LayoutTARDistribution
elif layout_type == 'jar':
layout_class = LayoutJARDistribution
elif layout_type == 'zip':
layout_class = LayoutZIPDistribution
else:
raise abort("Unknown layout distribution type: {}".format(layout_type), context=context)
return layout_class(self, name, deps, layout, path, platformDependent, theLicense, testDistribution=testDistribution, **attrs)
if className:
if not self.extensions or not hasattr(self.extensions, className):
raise abort('Distribution {} requires a custom class ({}) which was not found in {}'.format(name, className, join(self.mxDir, self._extensions_name() + '.py')))
d = getattr(self.extensions, className)(self, name, deps, exclLibs, platformDependent, theLicense, testDistribution=testDistribution, layout=layout, path=path, **attrs)
elif native:
if layout is not None:
d = create_layout('tar')
else:
relpath = attrs.pop('relpath', False)
output = attrs.pop('output', None)
output = output if output is None else output.replace('/', os.sep)
d = NativeTARDistribution(self, name, deps, path, exclLibs, platformDependent, theLicense, relpath, output, testDistribution=testDistribution, **attrs)
elif layout is not None:
d = create_layout('jar')
else:
subDir = attrs.pop('subDir', None)
sourcesPath = attrs.pop('sourcesPath', None)
if sourcesPath == "<unified>":
sourcesPath = path
mainClass = attrs.pop('mainClass', None)
distDeps = Suite._pop_list(attrs, 'distDependencies', context)
manifestEntries = attrs.pop('manifestEntries', None)
javaCompliance = attrs.pop('javaCompliance', None)
maven = attrs.pop('maven', True)
stripConfigFileNames = attrs.pop('strip', None)
stripMappingFileNames = attrs.pop('stripMap', None)
assert stripConfigFileNames is None or isinstance(stripConfigFileNames, list)
if isinstance(maven, dict) and maven.get('version', None):
abort("'version' is not supported in maven specification for distributions")
if attrs.pop('buildDependencies', None):
abort("'buildDependencies' is not supported for JAR distributions")
d = JARDistribution(self, name, subDir, path, sourcesPath, deps, mainClass, exclLibs, distDeps,
javaCompliance, platformDependent, theLicense, maven=maven,
stripConfigFileNames=stripConfigFileNames, stripMappingFileNames=stripMappingFileNames,
testDistribution=testDistribution, manifestEntries=manifestEntries, **attrs)
self.dists.append(d)
return d
def _unload_unregister_distribution(self, name):
self.dists = [d for d in self.dists if d.name != name]
d = _dists[name]
del _dists[name]
return d
@staticmethod
def _pop_list(attrs, name, context):
v = attrs.pop(name, None)
if not v:
return []
if not isinstance(v, list):
abort('Attribute "' + name + '" for ' + context + ' must be a list', context)
return v
@staticmethod
def _pop_os_arch(attrs, context):
os_arch = attrs.pop('os_arch', None)
if os_arch:
os_key = None
if get_os_variant():
os_key = get_os() + '-' + get_os_variant()
if os_key is None or os_key not in os_arch:
os_key = get_os()
if os_key not in os_arch:
os_key = '<others>'
os_attrs = os_arch.pop(os_key, None)
if os_attrs:
arch_attrs = os_attrs.pop(get_arch(), None)
if not arch_attrs:
arch_attrs = os_attrs.pop('<others>', None)
if arch_attrs:
return arch_attrs
else:
warn("No platform-specific definition is available for {} for your architecture ({})".format(context, get_arch()))
else:
warn("No platform-specific definition is available for {} for your OS ({})".format(context, get_os()))
return None
@staticmethod
def _merge_os_arch_attrs(attrs, os_arch_attrs, context, path=''):
if os_arch_attrs:
for k, v in os_arch_attrs.items():
if k in attrs:
other = attrs[k]
key_path = path + '.' + str(k)
if isinstance(v, dict) and isinstance(other, dict):
Suite._merge_os_arch_attrs(other, v, context, key_path)
elif isinstance(v, list) and isinstance(other, list):
attrs[k] = v + other
else:
abort("OS/Arch attribute must not override non-OS/Arch attribute '{}' in {}".format(key_path, context))
else:
attrs[k] = v
def _load_libraries(self, libsMap):
for name, attrs in sorted(libsMap.items()):
context = 'library ' + name
orig_attrs = deepcopy(attrs)
attrs.pop('native', False) # TODO use to make non-classpath libraries
os_arch = Suite._pop_os_arch(attrs, context)
Suite._merge_os_arch_attrs(attrs, os_arch, context)
deps = Suite._pop_list(attrs, 'dependencies', context)
ext = attrs.pop('ext', None)
path = attrs.pop('path', None)
urls = Suite._pop_list(attrs, 'urls', context)
sha1 = attrs.pop('sha1', None)
sourceExt = attrs.pop('sourceExt', None)
sourcePath = attrs.pop('sourcePath', None)
sourceUrls = Suite._pop_list(attrs, 'sourceUrls', context)
sourceSha1 = attrs.pop('sourceSha1', None)
maven = attrs.get('maven', None)
optional = attrs.pop('optional', False)
resource = attrs.pop('resource', False)
packedResource = attrs.pop('packedResource', False)
theLicense = attrs.pop(self.getMxCompatibility().licenseAttribute(), None)
# Resources with the "maven" attribute can get their "urls" and "sourceUrls" from the Maven repository definition.
need_maven_urls = not urls and sha1
need_maven_sourceUrls = not sourceUrls and sourceSha1
if maven and (need_maven_urls or need_maven_sourceUrls):
# Make sure we have complete "maven" metadata.
maven_attrs = ['groupId', 'artifactId', 'version']
if not isinstance(maven, dict) or any(x not in maven for x in maven_attrs):
abort('The "maven" attribute must be a dictionary containing "{0}"'.format('", "'.join(maven_attrs)), context)
if 'suffix' in maven:
if self.getMxCompatibility().mavenSupportsClassifier():
abort('The use of "suffix" as maven metadata is not supported in this version of mx, use "classifier" instead', context)
else:
maven['classifier'] = maven['suffix']
del maven['suffix']
if need_maven_urls:
urls = maven_download_urls(**maven)
if need_maven_sourceUrls:
if 'classifier' in maven:
abort('Cannot download sources for "maven" library with "classifier" attribute', context)
else:
sourceUrls = maven_download_urls(classifier='sources', **maven)
# Construct the required resource type.
if packedResource:
l = PackedResourceLibrary(self, name, path, optional, urls, sha1, **attrs)
elif resource:
l = ResourceLibrary(self, name, path, optional, urls, sha1, ext=ext, **attrs)
else:
l = Library(self, name, path, optional, urls, sha1, sourcePath, sourceUrls, sourceSha1, deps, theLicense, ext=ext, sourceExt=sourceExt, **attrs)
l._orig_attrs = orig_attrs
self.libs.append(l)
def _load_licenses(self, licenseDefs):
for name, attrs in sorted(licenseDefs.items()):
fullname = attrs.pop('name')
url = attrs.pop('url')
if not _validate_abolute_url(url):
abort('Invalid url in license {} in {}'.format(name, self.suite_py()))
l = License(self, name, fullname, url)
l.__dict__.update(attrs)
self.licenseDefs.append(l)
def _load_repositories(self, repositoryDefs):
for name, attrs in sorted(repositoryDefs.items()):
context = 'repository ' + name
if 'url' in attrs:
snapshots_url = attrs.pop('url')
releases_url = snapshots_url
else:
snapshots_url = attrs.pop('snapshotsUrl')
releases_url = attrs.pop('releasesUrl')
if not _validate_abolute_url(snapshots_url):
abort('Invalid url in repository {}: {}'.format(self.suite_py(), snapshots_url), context=context)
if releases_url != snapshots_url and not _validate_abolute_url(releases_url):
abort('Invalid url in repository {}: {}'.format(self.suite_py(), releases_url), context=context)
licenses = Suite._pop_list(attrs, self.getMxCompatibility().licensesAttribute(), context=context)
r = Repository(self, name, snapshots_url, releases_url, licenses)
r.__dict__.update(attrs)
self.repositoryDefs.append(r)
def recursive_post_init(self):
"""depth first _post_init driven by imports graph"""
self.visit_imports(Suite._init_metadata_visitor)
self._init_metadata()
self.visit_imports(Suite._resolve_dependencies_visitor)
self._resolve_dependencies()
self.visit_imports(Suite._post_init_visitor)
self._post_init()
@staticmethod
def _init_metadata_visitor(importing_suite, suite_import, **extra_args):
imported_suite = suite(suite_import.name)
if not imported_suite._metadata_initialized:
# avoid recursive initialization
imported_suite._metadata_initialized = True
imported_suite.visit_imports(imported_suite._init_metadata_visitor)
imported_suite._init_metadata()
@staticmethod
def _post_init_visitor(importing_suite, suite_import, **extra_args):
imported_suite = suite(suite_import.name)
if not imported_suite.post_init:
imported_suite.visit_imports(imported_suite._post_init_visitor)
imported_suite._post_init()
@staticmethod
def _resolve_dependencies_visitor(importing_suite, suite_import, **extra_args):
imported_suite = suite(suite_import.name)
if not imported_suite.resolved_dependencies:
imported_suite.visit_imports(imported_suite._resolve_dependencies_visitor)
imported_suite._resolve_dependencies()
def _init_metadata(self):
self._load_metadata()
self._register_metadata()
def _post_init(self):
self._post_init_finish()
def visit_imports(self, visitor, **extra_args):
"""
Visitor support for the suite imports list
For each entry the visitor function is called with this suite and a SuiteImport instance
from the entry and any extra args passed to this call.
N.B. There is no built-in support for avoiding visiting the same suite multiple times,
as this function only visits the imports of a single suite. If a (recursive) visitor function
wishes to visit a suite exactly once, it must manage that through extra_args.
"""
for suite_import in self.suite_imports:
visitor(self, suite_import, **extra_args)
def get_import(self, suite_name):
for suite_import in self.suite_imports:
if suite_import.name == suite_name:
return suite_import
return None
def import_suite(self, name, version=None, urlinfos=None, kind=None, in_subdir=False):
"""Dynamic import of a suite. Returns None if the suite cannot be found"""
imported_suite = suite(name, fatalIfMissing=False)
if imported_suite:
return imported_suite
suite_import = SuiteImport(name, version, urlinfos, kind, dynamicImport=True, in_subdir=in_subdir)
imported_suite, cloned = _find_suite_import(self, suite_import, fatalIfMissing=False, load=False, clone_binary_first=True)
if imported_suite:
if not cloned and imported_suite.isBinarySuite():
if imported_suite.vc.update(imported_suite.vc_dir, rev=suite_import.version, mayPull=True):
imported_suite.re_init_imports()
imported_suite.reload_binary_suite()
for suite_import in imported_suite.suite_imports:
if not suite(suite_import.name, fatalIfMissing=False):
warn("Programmatically imported suite '{}' imports '{}' which is not loaded.".format(name, suite_import.name))
_register_suite(imported_suite)
assert not imported_suite.post_init
imported_suite._load()
imported_suite._init_metadata()
imported_suite._resolve_dependencies()
imported_suite._post_init()
if not imported_suite.isBinarySuite():
for dist in imported_suite.dists:
dist.post_init()
return imported_suite
def scm_metadata(self, abortOnError=False):
return self.scm
def suite_py(self):
return join(self.mxDir, 'suite.py')
def suite_py_mtime(self):
if not hasattr(self, '_suite_py_mtime'):
self._suite_py_mtime = getmtime(self.suite_py())
return self._suite_py_mtime
def __abort_context__(self):
"""
Returns a string describing where this suite was defined in terms its source file.
If no such description can be generated, returns None.
"""
path = self.suite_py()
if exists(path):
return 'In definition of suite {} in {}'.format(self.name, path)
return None
def isBinarySuite(self):
return isinstance(self, BinarySuite)
def isSourceSuite(self):
return isinstance(self, SourceSuite)
def _resolve_suite_version_conflict(suiteName, existingSuite, existingVersion, existingImporter, otherImport, otherImportingSuite, dry_run=False):
conflict_resolution = _opts.version_conflict_resolution
if otherImport.dynamicImport and (not existingSuite or not existingSuite.dynamicallyImported) and conflict_resolution != 'latest_all':
return None
if not otherImport.version:
return None
if conflict_resolution == 'suite':
if otherImportingSuite:
conflict_resolution = otherImportingSuite.versionConflictResolution
elif not dry_run:
warn("Conflict resolution was set to 'suite' but importing suite is not available")
if conflict_resolution == 'ignore':
if not dry_run:
warn("mismatched import versions on '{}' in '{}' ({}) and '{}' ({})".format(suiteName, otherImportingSuite.name, otherImport.version, existingImporter.name if existingImporter else '?', existingVersion))
return None
elif conflict_resolution in ('latest', 'latest_all'):
if not existingSuite or not existingSuite.vc:
return None # can not resolve at the moment
if existingSuite.vc.kind != otherImport.kind:
return None
if not isinstance(existingSuite, SourceSuite):
if dry_run:
return 'ERROR'
else:
abort("mismatched import versions on '{}' in '{}' and '{}', 'latest' conflict resolution is only supported for source suites".format(suiteName, otherImportingSuite.name, existingImporter.name if existingImporter else '?'))
if not existingSuite.vc.exists(existingSuite.vc_dir, rev=otherImport.version):
return otherImport.version
resolved = existingSuite.vc.latest(existingSuite.vc_dir, otherImport.version, existingSuite.vc.parent(existingSuite.vc_dir))
# TODO currently this only handles simple DAGs and it will always do an update assuming that the repo is at a version controlled by mx
if existingSuite.vc.parent(existingSuite.vc_dir) == resolved:
return None
return resolved
if conflict_resolution == 'none':
if dry_run:
return 'ERROR'
else:
abort("mismatched import versions on '{}' in '{}' ({}) and '{}' ({})".format(suiteName, otherImportingSuite.name, otherImport.version, existingImporter.name if existingImporter else '?', existingVersion))
return None
### ~~~~~~~~~~~~~ Repository / Suite
class Repository(SuiteConstituent):
"""A Repository is a remote binary repository that can be used to upload binaries with deploy_binary."""
def __init__(self, suite, name, snapshots_url, releases_url, licenses):
SuiteConstituent.__init__(self, suite, name)
self.snapshots_url = snapshots_url
self.releases_url = releases_url
self.licenses = licenses
self.url = snapshots_url # for compatibility
def get_url(self, version, rewrite=True):
url = self.snapshots_url if version.endswith('-SNAPSHOT') else self.releases_url
if rewrite:
url = mx_urlrewrites.rewriteurl(url)
return url
def get_maven_id(self):
if hasattr(self, 'mavenId'):
return getattr(self, 'mavenId')
return self.name
def _comparison_key(self):
return self.name, self.snapshots_url, self.releases_url, tuple((l.name if isinstance(l, License) else l for l in self.licenses))
def resolveLicenses(self):
self.licenses = get_license(self.licenses)
class SourceSuite(Suite):
"""A source suite"""
def __init__(self, mxDir, primary=False, load=True, internal=False, importing_suite=None, dynamicallyImported=False):
vc, vc_dir = VC.get_vc_root(dirname(mxDir), abortOnError=False)
if not vc_dir:
current_dir = realpath(dirname(mxDir))
while True:
# Use the heuristic of a 'ci.hocon' or '.mx_vcs_root' file being
# at the root of a repo that contains multiple suites.
hocon = join(current_dir, 'ci.hocon')
mx_vcs_root = join(current_dir, '.mx_vcs_root')
if exists(hocon) or exists(mx_vcs_root):
vc_dir = current_dir
# don't break here to get the top most directory as the vc_dir
if os.path.splitdrive(current_dir)[1] == os.sep:
break
current_dir = dirname(current_dir)
Suite.__init__(self, mxDir, primary, internal, importing_suite, load, vc, vc_dir, dynamicallyImported=dynamicallyImported)
logvv("SourceSuite.__init__({}), got vc={}, vc_dir={}".format(mxDir, self.vc, self.vc_dir))
self.projects = []
self.removed_projects = []
self._releaseVersion = {}
def dependency(self, name, fatalIfMissing=True, context=None):
for p in self.projects:
if p.name == name:
return p
return super(SourceSuite, self).dependency(name, fatalIfMissing=fatalIfMissing, context=context)
def _resolve_dependencies(self):
for d in self.projects:
d.resolveDeps()
super(SourceSuite, self)._resolve_dependencies()
def version(self, abortOnError=True):
"""
Return the current head changeset of this suite.
"""
# we do not cache the version because it changes in development
if not self.vc:
return None
return self.vc.parent(self.vc_dir, abortOnError=abortOnError)
def isDirty(self, abortOnError=True):
"""
Check whether there are pending changes in the source.
"""
return self.vc.isDirty(self.vc_dir, abortOnError=abortOnError)
def is_release(self):
"""
Returns True if the release tag from VC is known and is not a snapshot
"""
_release = self._get_early_suite_dict_property('release')
if _release is not None:
return _release
if not self.vc:
return False
_version = self._get_early_suite_dict_property('version')
if _version:
return '{}-{}'.format(self.name, _version) in self.vc.parent_tags(self.vc_dir)
else:
return self.vc.is_release_from_tags(self.vc_dir, self.name)
def release_version(self, snapshotSuffix='dev'):
"""
Gets the release tag from VC or create a time based once if VC is unavailable
"""
if snapshotSuffix not in self._releaseVersion:
_version = self._get_early_suite_dict_property('version')
if _version and self.getMxCompatibility().addVersionSuffixToExplicitVersion():
if not self.is_release():
_version = _version + '-' + snapshotSuffix
if not _version and self.vc:
_version = self.vc.release_version_from_tags(self.vc_dir, self.name, snapshotSuffix=snapshotSuffix)
if not _version:
_version = 'unknown-{0}-{1}'.format(platform.node(), time.strftime('%Y-%m-%d_%H-%M-%S_%Z'))
self._releaseVersion[snapshotSuffix] = _version
return self._releaseVersion[snapshotSuffix]
def scm_metadata(self, abortOnError=False):
scm = self.scm
if scm:
return scm
pull = self.vc.default_pull(self.vc_dir, abortOnError=abortOnError)
if abortOnError and not pull:
abort("Can not find scm metadata for suite {0} ({1})".format(self.name, self.vc_dir))
push = self.vc.default_push(self.vc_dir, abortOnError=abortOnError)
if not push:
push = pull
return SCMMetadata(pull, pull, push)
def _load_metadata(self):
super(SourceSuite, self)._load_metadata()
self._load_projects()
if hasattr(self, 'mx_register_dynamic_suite_constituents'):
def _register_project(proj):
self.projects.append(proj)
def _register_distribution(dist):
self.dists.append(dist)
self.mx_register_dynamic_suite_constituents(_register_project, _register_distribution)
self._finish_load_projects()
def _load_projects(self):
"""projects are unique to source suites"""
projsMap = self._check_suiteDict('projects')
for name, attrs in sorted(projsMap.items()):
try:
context = 'project ' + name
className = attrs.pop('class', None)
theLicense = attrs.pop(self.getMxCompatibility().licenseAttribute(), None)
os_arch = Suite._pop_os_arch(attrs, context)
Suite._merge_os_arch_attrs(attrs, os_arch, context)
deps = Suite._pop_list(attrs, 'dependencies', context)
genDeps = Suite._pop_list(attrs, 'generatedDependencies', context)
if genDeps:
deps += genDeps
# Re-add generatedDependencies attribute so it can be used in canonicalizeprojects
attrs['generatedDependencies'] = genDeps
workingSets = attrs.pop('workingSets', None)
jlintOverrides = attrs.pop('lint.overrides', None)
if className:
if not self.extensions or not hasattr(self.extensions, className):
abort('Project {} requires a custom class ({}) which was not found in {}'.format(name, className, join(self.mxDir, self._extensions_name() + '.py')))
p = getattr(self.extensions, className)(self, name, deps, workingSets, theLicense=theLicense, **attrs)
else:
srcDirs = Suite._pop_list(attrs, 'sourceDirs', context)
projectDir = attrs.pop('dir', None)
subDir = attrs.pop('subDir', None)
if projectDir:
d = join(self.dir, projectDir)
elif subDir is None:
d = join(self.dir, name)
else:
d = join(self.dir, subDir, name)
native = attrs.pop('native', False)
if not native:
project_type_name = attrs.pop('type', 'JavaProject')
else:
project_type_name = None
old_test_project = attrs.pop('isTestProject', None)
if old_test_project is not None:
abort_or_warn("`isTestProject` attribute has been renamed to `testProject`", self.getMxCompatibility().deprecateIsTestProject(), context)
testProject = attrs.pop('testProject', old_test_project)
if native:
if isinstance(native, bool) or native.lower() == "true":
output = attrs.pop('output', None)
if output and os.sep != '/':
output = output.replace('/', os.sep)
results = Suite._pop_list(attrs, 'results', context)
p = NativeProject(self, name, subDir, srcDirs, deps, workingSets, results, output, d,
theLicense=theLicense, testProject=testProject, **attrs)
else:
from mx_native import DefaultNativeProject
p = DefaultNativeProject(self, name, subDir, srcDirs, deps, workingSets, d, kind=native,
theLicense=theLicense, testProject=testProject, **attrs)
elif project_type_name == 'JavaProject':
javaCompliance = attrs.pop('javaCompliance', None)
if javaCompliance is None:
abort('javaCompliance property required for non-native project ' + name)
p = JavaProject(self, name, subDir, srcDirs, deps, javaCompliance, workingSets, d, theLicense=theLicense, testProject=testProject, **attrs)
p.checkstyleProj = attrs.pop('checkstyle', name)
if p.checkstyleProj != name and 'checkstyleVersion' in attrs:
compat = self.getMxCompatibility()
should_abort = compat.check_checkstyle_config()
abort_or_warn('Cannot specify both "checkstyle and "checkstyleVersion" attribute', should_abort, context=p)
p.checkPackagePrefix = attrs.pop('checkPackagePrefix', 'true') == 'true'
ap = Suite._pop_list(attrs, 'annotationProcessors', context)
if ap:
p.declaredAnnotationProcessors = ap
if jlintOverrides:
p._javac_lint_overrides = jlintOverrides
if hasattr(p, "javaVersionExclusion") and self.getMxCompatibility().supports_disjoint_JavaCompliance_range():
abort('The "javaVersionExclusion" is no longer supported. Use a disjoint range for the "javaCompliance" attribute instead (e.g. "8,13+")', context=p)
else:
assert project_type_name
project_type = getattr(self.extensions, project_type_name, None)
if not project_type:
abort("unknown project type '{}'".format(project_type_name))
p = project_type(self, name, subDir, srcDirs, deps, workingSets, d,
theLicense=theLicense, testProject=testProject, **attrs)
if self.getMxCompatibility().overwriteProjectAttributes():
p.__dict__.update(attrs)
else:
for k, v in attrs.items():
# We first try with `dir` to avoid computing attribute values
# due to `hasattr` if possible (e.g., for properties).
if k not in dir(p) and not hasattr(p, k):
setattr(p, k, v)
self.projects.append(p)
except:
log_error("Error while creating project {}".format(name))
raise
def _finish_load_projects(self):
# Record the projects that define annotation processors
apProjects = {}
for p in self.projects:
if not p.isJavaProject():
continue
annotationProcessors = None
for srcDir in p.source_dirs():
configFile = join(srcDir, 'META-INF', 'services', 'javax.annotation.processing.Processor')
if exists(configFile):
with open(configFile) as fp:
annotationProcessors = [ap.strip() for ap in fp]
if len(annotationProcessors) != 0 and p.checkPackagePrefix:
for ap in annotationProcessors:
if not ap.startswith(p.name):
abort(ap + ' in ' + configFile + ' does not start with ' + p.name)
if annotationProcessors:
p.definedAnnotationProcessors = annotationProcessors
apProjects[p.name] = p
# Initialize the definedAnnotationProcessors list for distributions with direct
# dependencies on projects that define one or more annotation processors.
for dist in self.dists:
aps = []
for dep in dist.deps:
name = dep if isinstance(dep, str) else dep.name
if name in apProjects:
aps += apProjects[name].definedAnnotationProcessors
if aps:
dist.definedAnnotationProcessors = aps
# Restrict exported annotation processors to those explicitly defined by the projects
def _refineAnnotationProcessorServiceConfig(dist):
apsJar = dist.path
config = 'META-INF/services/javax.annotation.processing.Processor'
currentAps = None
with zipfile.ZipFile(apsJar, 'r') as zf:
if config in zf.namelist():
currentAps = zf.read(config).split()
# Overwriting of open files doesn't work on Windows, so now that
# `apsJar` is closed we can safely overwrite it if necessary
if currentAps is not None and currentAps != dist.definedAnnotationProcessors:
logv('[updating ' + config + ' in ' + apsJar + ']')
with Archiver(apsJar) as arc:
with zipfile.ZipFile(apsJar, 'r') as lp:
for arcname in lp.namelist():
if arcname == config:
arc.zf.writestr(arcname, '\n'.join(dist.definedAnnotationProcessors) + '\n')
else:
arc.zf.writestr(arcname, lp.read(arcname))
dist.add_update_listener(_refineAnnotationProcessorServiceConfig)
@staticmethod
def _load_env_in_mxDir(mxDir, env=None, file_name='env', abort_if_missing=False):
e = join(mxDir, file_name)
SourceSuite._load_env_file(e, env, abort_if_missing=abort_if_missing)
@staticmethod
def _load_env_file(e, env=None, abort_if_missing=False):
if exists(e):
with open(e) as f:
lineNum = 0
for line in f:
lineNum = lineNum + 1
line = line.strip()
if len(line) != 0 and line[0] != '#':
if not '=' in line:
abort(e + ':' + str(lineNum) + ': line does not match pattern "key=value"')
key, value = line.split('=', 1)
key = key.strip()
value = expandvars_in_property(value.strip())
if env is None:
os.environ[key] = value
logv('Setting environment variable %s=%s from %s' % (key, value, e))
else:
env[key] = value
logv('Read variable %s=%s from %s' % (key, value, e))
elif abort_if_missing:
abort("Could not find env file: {}".format(e))
def _parse_env(self):
SourceSuite._load_env_in_mxDir(self.mxDir, _loadedEnv)
def _register_metadata(self):
Suite._register_metadata(self)
for p in self.projects:
existing = _projects.get(p.name)
if existing is not None and _check_global_structures:
abort('cannot override project {} in {} with project of the same name in {}'.format(p.name, existing.dir, p.dir))
if not hasattr(_opts, 'ignored_projects') or not p.name in _opts.ignored_projects:
_projects[p.name] = p
# check all project dependencies are local
for d in p.deps:
dp = project(d, False)
if dp:
if not dp in self.projects:
dists = [(dist.suite.name + ':' + dist.name) for dist in dp.suite.dists if dp in dist.archived_deps()]
if len(dists) > 1:
dists = ', '.join(dists[:-1]) + ' or ' + dists[-1]
elif dists:
dists = dists[0]
else:
dists = '<name of distribution containing ' + dp.name + '>'
p.abort("dependency to project '{}' defined in an imported suite must use {} instead".format(dp.name, dists))
elif dp == p:
p.abort("recursive dependency in suite '{}' in project '{}'".format(self.name, d))
@staticmethod
def _projects_recursive(importing_suite, imported_suite, projects, visitmap):
if imported_suite.name in visitmap:
return
projects += imported_suite.projects
visitmap[imported_suite.name] = True
imported_suite.visit_imports(importing_suite._projects_recursive_visitor, projects=projects, visitmap=visitmap)
@staticmethod
def _projects_recursive_visitor(importing_suite, suite_import, projects, visitmap, **extra_args):
if isinstance(importing_suite, SourceSuite):
importing_suite._projects_recursive(importing_suite, suite(suite_import.name), projects, visitmap)
def projects_recursive(self):
"""return all projects including those in imported suites"""
result = []
result += self.projects
visitmap = dict()
self.visit_imports(self._projects_recursive_visitor, projects=result, visitmap=visitmap,)
return result
def mx_binary_distribution_jar_path(self):
"""
returns the absolute path of the mx binary distribution jar.
"""
return join(self.dir, _mx_binary_distribution_jar(self.name))
def create_mx_binary_distribution_jar(self):
"""
Creates a jar file named name-mx.jar that contains
the metadata for another suite to import this suite as a BinarySuite.
TODO check timestamps to avoid recreating this repeatedly, or would
the check dominate anyway?
TODO It would be cleaner for subsequent loading if we actually wrote a
transformed suite.py file that only contained distribution info, to
detect access to private (non-distribution) state
"""
mxMetaJar = self.mx_binary_distribution_jar_path()
mxfiles = glob.glob(join(self.mxDir, '*.py'))
mxfiles += glob.glob(join(self.mxDir, '*.properties'))
with Archiver(mxMetaJar) as arc:
for mxfile in mxfiles:
mxDirBase = basename(self.mxDir)
arc.zf.write(mxfile, arcname=join(mxDirBase, basename(mxfile)))
def eclipse_settings_sources(self):
"""
Gets a dictionary from the name of an Eclipse settings file to
the list of files providing its generated content, in overriding order
(i.e., settings from files later in the list override settings from
files earlier in the list).
A new dictionary is created each time this method is called so it's
safe for the caller to modify it.
"""
esdict = {}
# start with the mxtool defaults
defaultEclipseSettingsDir = join(_mx_suite.dir, 'eclipse-settings')
if exists(defaultEclipseSettingsDir):
for name in os.listdir(defaultEclipseSettingsDir):
esdict[name] = [os.path.abspath(join(defaultEclipseSettingsDir, name))]
# append suite overrides
eclipseSettingsDir = join(self.mxDir, 'eclipse-settings')
if exists(eclipseSettingsDir):
for name in os.listdir(eclipseSettingsDir):
esdict.setdefault(name, []).append(os.path.abspath(join(eclipseSettingsDir, name)))
return esdict
def netbeans_settings_sources(self):
"""
Gets a dictionary from the name of an NetBeans settings file to
the list of files providing its generated content, in overriding order
(i.e., settings from files later in the list override settings from
files earlier in the list).
A new dictionary is created each time this method is called so it's
safe for the caller to modify it.
"""
esdict = {}
# start with the mxtool defaults
defaultNetBeansSuiteDir = join(_mx_suite.dir, 'netbeans-settings')
if exists(defaultNetBeansSuiteDir):
for name in os.listdir(defaultNetBeansSuiteDir):
esdict[name] = [os.path.abspath(join(defaultNetBeansSuiteDir, name))]
# append suite overrides
netBeansSettingsDir = join(self.mxDir, 'netbeans-settings')
if exists(netBeansSettingsDir):
for name in os.listdir(netBeansSettingsDir):
esdict.setdefault(name, []).append(os.path.abspath(join(netBeansSettingsDir, name)))
return esdict
"""
A pre-built suite downloaded from a Maven repository.
"""
class BinarySuite(Suite):
def __init__(self, mxDir, importing_suite, dynamicallyImported=False, load=True):
Suite.__init__(self, mxDir, False, False, importing_suite, load, BinaryVC(), dirname(mxDir), dynamicallyImported=dynamicallyImported)
# At this stage the suite directory is guaranteed to exist as is the mx.suitname
# directory. For a freshly downloaded suite, the actual distribution jars
# have not been downloaded as we need info from the suite.py for that
def _load(self):
self._load_binary_suite()
super(BinarySuite, self)._load()
def reload_binary_suite(self):
for d in self.dists:
_dists.pop(d.name, None)
self.dists = []
self._load_binary_suite()
def version(self, abortOnError=True):
"""
Return the current head changeset of this suite.
"""
# we do not cache the version because it changes in development
return self.vc.parent(self.vc_dir)
def release_version(self):
return self.version()
def isDirty(self, abortOnError=True):
# a binary suite can not be dirty
return False
def _load_binary_suite(self):
"""
Always load the suite.py file and the distribution info defined there,
download the jar files for a freshly cloned suite
"""
self._load_suite_dict()
Suite._load_distributions(self, self._check_suiteDict('distributions'))
def _load_libraries(self, libsMap):
super(BinarySuite, self)._load_libraries(libsMap)
for l in self.libs:
if l.isLibrary() or l.isResourceLibrary():
l.get_path(resolve=True)
if l.isLibrary():
l.get_source_path(resolve=True)
def _parse_env(self):
pass
def _load_distributions(self, distsMap):
# This gets done explicitly in _load_binary_suite as we need the info there
# so, in that mode, we don't want to call the superclass method again
pass
def _load_metadata(self):
super(BinarySuite, self)._load_metadata()
if hasattr(self, 'mx_register_dynamic_suite_constituents'):
def _register_distribution(dist):
self.dists.append(dist)
self.mx_register_dynamic_suite_constituents(None, _register_distribution)
def _load_distribution(self, name, attrs):
ret = super(BinarySuite, self)._load_distribution(name, attrs)
ret.post_init()
self.vc.getDistribution(self.dir, ret)
return ret
def _register_metadata(self):
# since we are working with the original suite.py file, we remove some
# values that should not be visible
self.projects = []
Suite._register_metadata(self)
def _resolve_dependencies(self):
super(BinarySuite, self)._resolve_dependencies()
# Remove projects from dist dependencies
for d in self.dists:
d.deps = [dep for dep in d.deps if dep and not dep.isJavaProject()]
class InternalSuite(SourceSuite):
def __init__(self, mxDir):
mxMxDir = _is_suite_dir(mxDir)
assert mxMxDir
SourceSuite.__init__(self, mxMxDir, internal=True)
_register_suite(self)
class MXSuite(InternalSuite):
def __init__(self):
InternalSuite.__init__(self, _mx_home)
def _parse_env(self):
# Only load the env file from mx when it's the primary suite. This can only
# be determined when the primary suite has been set so it must be deferred but
# since the primary suite env should be loaded last this should be ok.
def _deferrable():
assert primary_suite()
if self == primary_suite():
SourceSuite._load_env_in_mxDir(self.mxDir)
_primary_suite_deferrables.append(_deferrable)
def _complete_init(self):
"""
Initialization steps to be completed once mx.mx/env has been processed
"""
self._init_metadata()
self._resolve_dependencies()
self._post_init()
class MXTestsSuite(InternalSuite):
def __init__(self):
InternalSuite.__init__(self, join(_mx_home, "tests"))
def suites(opt_limit_to_suite=False, includeBinary=True, include_mx=False):
"""
Get the list of all loaded suites.
"""
res = [s for s in _suites.values() if not s.internal and (includeBinary or isinstance(s, SourceSuite))]
if include_mx:
res.append(_mx_suite)
if opt_limit_to_suite and _opts.specific_suites:
res = [s for s in res if s.name in _opts.specific_suites]
return res
def suite(name, fatalIfMissing=True, context=None):
"""
Get the suite for a given name.
:rtype: Suite
"""
s = _suites.get(name)
if s is None and fatalIfMissing:
abort('suite named ' + name + ' not found', context=context)
return s
def primary_or_specific_suites():
""":rtype: list[Suite]"""
if _opts.specific_suites:
return [suite(name) for name in _opts.specific_suites]
return [primary_suite()]
def _suitename(mxDir):
parts = basename(mxDir).split('.')
if len(parts) == 3:
assert parts[0] == ''
assert parts[1] == 'mx'
return parts[2]
assert len(parts) == 2, parts
assert parts[0] == 'mx'
return parts[1]
def _is_suite_dir(d, mxDirName=None):
"""
Checks if d contains a suite.
If mxDirName is None, matches any suite name, otherwise checks for exactly `mxDirName` or `mxDirName` with a ``.`` prefix.
"""
if os.path.isdir(d):
for f in [mxDirName, '.' + mxDirName] if mxDirName else [e for e in os.listdir(d) if e.startswith('mx.') or e.startswith('.mx.')]:
mxDir = join(d, f)
if exists(mxDir) and isdir(mxDir) and (exists(join(mxDir, 'suite.py'))):
return mxDir
def _findPrimarySuiteMxDirFrom(d):
""" search for a suite directory upwards from 'd' """
while d:
mxDir = _is_suite_dir(d)
if mxDir is not None:
return mxDir
parent = dirname(d)
if d == parent:
return None
d = parent
return None
def _findPrimarySuiteMxDir():
# check for explicit setting
if _primary_suite_path is not None:
mxDir = _is_suite_dir(_primary_suite_path)
if mxDir is not None:
return mxDir
else:
abort(_primary_suite_path + ' does not contain an mx suite')
# try current working directory first
mxDir = _findPrimarySuiteMxDirFrom(os.getcwd())
if mxDir is not None:
return mxDir
return None
def _register_suite(s):
assert s.name not in _suites, s.name
_suites[s.name] = s
def _use_binary_suite(suite_name):
return _binary_suites is not None and (len(_binary_suites) == 0 or suite_name in _binary_suites)
def _find_suite_import(importing_suite, suite_import, fatalIfMissing=True, load=True, clone_binary_first=False):
"""
:rtype : (Suite | None, bool)
"""
search_mode = 'binary' if _use_binary_suite(suite_import.name) else 'source'
clone_mode = 'binary' if clone_binary_first else search_mode
def _find_suite_dir(mode):
"""
Attempts to locate an existing suite in the local context
Returns the path to the mx.name dir if found else None
"""
if mode == 'binary':
# binary suites are always stored relative to the importing suite in mx-private directory
return importing_suite._find_binary_suite_dir(suite_import.name)
else:
# use the SuiteModel to locate a local source copy of the suite
return _suitemodel.find_suite_dir(suite_import)
def _get_import_dir(url, mode):
"""Return directory where the suite will be cloned to"""
if mode == 'binary':
return importing_suite.binary_suite_dir(suite_import.name)
else:
# Try use the URL first so that a big repo is cloned to a local
# directory whose named is based on the repo instead of a suite
# nested in the big repo.
root, _ = os.path.splitext(basename(_urllib_parse.urlparse(url).path))
if root:
import_dir = join(SiblingSuiteModel.siblings_dir(importing_suite.dir), root)
else:
import_dir, _ = _suitemodel.importee_dir(importing_suite.dir, suite_import, check_alternate=False)
if exists(import_dir):
abort("Suite import directory ({0}) for suite '{1}' exists but no suite definition could be found.".format(import_dir, suite_import.name))
return import_dir
def _clone_kwargs(mode):
if mode == 'binary':
return dict(result=dict(), suite_name=suite_import.name)
else:
return dict()
_clone_status = [False]
_found_mode = [None]
def _find_or_clone():
_import_mx_dir = _find_suite_dir(search_mode)
if _import_mx_dir is not None:
_found_mode[0] = search_mode
return _import_mx_dir
if clone_mode != search_mode:
_import_mx_dir = _find_suite_dir(clone_mode)
if _import_mx_dir is None:
# No local copy, so use the URLs in order to "download" one
clone_kwargs = _clone_kwargs(clone_mode)
for urlinfo in suite_import.urlinfos:
if urlinfo.abs_kind() != clone_mode or not urlinfo.vc.check(abortOnError=False):
continue
import_dir = _get_import_dir(urlinfo.url, clone_mode)
if exists(import_dir):
warn("Trying to clone suite '{suite_name}' but directory {import_dir} already exists and does not seem to contain suite {suite_name}".format(suite_name=suite_import.name, import_dir=import_dir))
continue
if urlinfo.vc.clone(urlinfo.url, import_dir, suite_import.version, abortOnError=False, **clone_kwargs):
_import_mx_dir = _find_suite_dir(clone_mode)
if _import_mx_dir is None:
warn("Cloned suite '{suite_name}' but the result ({import_dir}) does not seem to contain suite {suite_name}".format(suite_name=suite_import.name, import_dir=import_dir))
else:
_clone_status[0] = True
break
else:
# it is possible that the clone partially populated the target
# which will mess up further attempts, so we "clean" it
if exists(import_dir):
shutil.rmtree(import_dir)
if _import_mx_dir is not None:
_found_mode[0] = clone_mode
return _import_mx_dir
import_mx_dir = _find_or_clone()
if import_mx_dir is None:
if clone_mode == 'binary':
if search_mode != 'source' or any((urlinfo.abs_kind() == 'source' for urlinfo in suite_import.urlinfos)):
warn("Binary import suite '{0}' not found, falling back to source dependency".format(suite_import.name))
search_mode = 'source'
clone_mode = 'source'
import_mx_dir = _find_or_clone()
elif all(urlinfo.abs_kind() == 'binary' for urlinfo in suite_import.urlinfos):
logv("Import suite '{0}' has no source urls, falling back to binary dependency".format(suite_import.name))
search_mode = 'binary'
clone_mode = 'binary'
import_mx_dir = _find_or_clone()
if import_mx_dir is None:
if fatalIfMissing:
suffix = ''
if _use_binary_suite(suite_import.name) and not any((urlinfo.abs_kind() == 'binary' for urlinfo in suite_import.urlinfos)):
suffix = " No binary URLs in {} for import of '{}' into '{}'.".format(importing_suite.suite_py(), suite_import.name, importing_suite.name)
abort("Imported suite '{}' not found (binary or source).{}".format(suite_import.name, suffix))
else:
return None, False
# Factory method?
if _found_mode[0] == 'binary':
return BinarySuite(import_mx_dir, importing_suite=importing_suite, load=load, dynamicallyImported=suite_import.dynamicImport), _clone_status[0]
else:
assert _found_mode[0] == 'source'
return SourceSuite(import_mx_dir, importing_suite=importing_suite, load=load, dynamicallyImported=suite_import.dynamicImport), _clone_status[0]
def _discover_suites(primary_suite_dir, load=True, register=True, update_existing=False):
def _log_discovery(msg):
dt = datetime.utcnow() - _mx_start_datetime
logvv(str(dt) + colorize(" [suite-discovery] ", color='green', stream=sys.stdout) + msg)
_log_discovery("Starting discovery with primary dir " + primary_suite_dir)
primary = SourceSuite(primary_suite_dir, load=False, primary=True)
_suitemodel.set_primary_dir(primary.dir)
primary._register_url_rewrites()
discovered = {}
ancestor_names = {}
importer_names = {}
original_version = {}
vc_dir_to_suite_names = {}
versions_from = {}
class VersionType:
CLONED = 0
REVISION = 1
BRANCH = 2
worklist = deque()
dynamic_imports_added = [False]
def _add_discovered_suite(_discovered_suite, first_importing_suite_name):
if first_importing_suite_name:
importer_names[_discovered_suite.name] = {first_importing_suite_name}
ancestor_names[_discovered_suite.name] = {first_importing_suite_name} | ancestor_names[first_importing_suite_name]
else:
assert _discovered_suite == primary
importer_names[_discovered_suite.name] = frozenset()
ancestor_names[primary.name] = frozenset()
for _suite_import in _discovered_suite.suite_imports:
if _discovered_suite.name == _suite_import.name:
abort("Error: suite '{}' imports itself".format(_discovered_suite.name))
_log_discovery("Adding {discovered} -> {imported} in worklist after discovering {discovered}".format(discovered=_discovered_suite.name, imported=_suite_import.name))
if dynamic_imports_added[0] and (_suite_import.urlinfos or _suite_import.version):
# check if this provides coordinates for a dynamic import that is in the queue
def _is_weak_import(importing_suite_name, imported_suite_name):
if imported_suite_name != _suite_import.name or importing_suite_name != primary.name:
return False
if importing_suite_name == _discovered_suite.name:
importing_suite = _discovered_suite
else:
importing_suite = discovered[importing_suite_name]
suite_import = importing_suite.get_import(imported_suite_name)
return not suite_import.urlinfos and not suite_import.version and suite_import.dynamicImport
for importing_suite_name, imported_suite_name in worklist:
if _is_weak_import(importing_suite_name, imported_suite_name):
# remove those imports from the worklist
if _opts.very_verbose:
_log_discovery("Dropping weak imports from worklist: {}".format(["{}->{}".format(_f, _t) for _f, _t in worklist if _is_weak_import(_f, _t)]))
new_worklist = [(_f, _t) for _f, _t in worklist if not _is_weak_import(_f, _t)]
worklist.clear()
worklist.extend(new_worklist)
break
worklist.append((_discovered_suite.name, _suite_import.name))
if _discovered_suite.vc_dir:
vc_dir_to_suite_names.setdefault(_discovered_suite.vc_dir, set()).add(_discovered_suite.name)
discovered[_discovered_suite.name] = _discovered_suite
_add_discovered_suite(primary, None)
def _is_imported_by_primary(_discovered_suite):
for _suite_name in vc_dir_to_suite_names[_discovered_suite.vc_dir]:
if primary.name == _suite_name:
return True
if primary.name in importer_names[_suite_name]:
assert primary.get_import(_suite_name), primary.name + ' ' + _suite_name
if not primary.get_import(_suite_name).dynamicImport:
return True
return False
def _clear_pyc_files(_updated_suite):
if _updated_suite.vc_dir in vc_dir_to_suite_names:
suites_to_clean = set((discovered[name] for name in vc_dir_to_suite_names[_updated_suite.vc_dir]))
else:
suites_to_clean = set()
suites_to_clean.add(_updated_suite)
for collocated_suite in suites_to_clean:
pyc_file = collocated_suite.suite_py() + 'c'
if exists(pyc_file):
os.unlink(pyc_file)
def _was_cloned_or_updated_during_discovery(_discovered_suite):
return _discovered_suite.vc_dir is not None and _discovered_suite.vc_dir in original_version
def _update_repo(_discovered_suite, update_version, forget=False, update_reason="to resolve conflict"):
if not _discovered_suite.vc:
warn('No version control info for suite ' + _discovered_suite)
return False
current_version = _discovered_suite.vc.parent(_discovered_suite.vc_dir)
if _discovered_suite.vc_dir not in original_version:
branch = _discovered_suite.vc.active_branch(_discovered_suite.vc_dir, abortOnError=False)
if branch is not None:
original_version[_discovered_suite.vc_dir] = VersionType.BRANCH, branch
else:
original_version[_discovered_suite.vc_dir] = VersionType.REVISION, current_version
if current_version == update_version:
return False
_discovered_suite.vc.update(_discovered_suite.vc_dir, rev=update_version, mayPull=True)
_clear_pyc_files(_discovered_suite)
if forget:
# we updated, this may change the DAG so
# "un-discover" anything that was discovered based on old information
_log_discovery("Updated needed {}: updating {} to {}".format(update_reason, _discovered_suite.vc_dir, update_version))
forgotten_edges = {}
def _forget_visitor(_, __suite_import):
_forget_suite(__suite_import.name)
def _forget_suite(suite_name):
if suite_name not in discovered:
return
_log_discovery("Forgetting {} after update".format(suite_name))
if suite_name in ancestor_names:
del ancestor_names[suite_name]
if suite_name in importer_names:
for importer_name in importer_names[suite_name]:
forgotten_edges.setdefault(importer_name, set()).add(suite_name)
del importer_names[suite_name]
if suite_name in discovered:
s = discovered[suite_name]
del discovered[suite_name]
s.visit_imports(_forget_visitor)
for suite_names in vc_dir_to_suite_names.values():
suite_names.discard(suite_name)
new_worklist = [(_f, _t) for _f, _t in worklist if _f != suite_name]
worklist.clear()
worklist.extend(new_worklist)
new_versions_from = {_s: (_f, _i) for _s, (_f, _i) in versions_from.items() if _i != suite_name}
versions_from.clear()
versions_from.update(new_versions_from)
if suite_name in forgotten_edges:
del forgotten_edges[suite_name]
for _collocated_suite_name in list(vc_dir_to_suite_names[_discovered_suite.vc_dir]):
_forget_suite(_collocated_suite_name)
# Add all the edges that need re-resolution
for __importing_suite, imported_suite_set in forgotten_edges.items():
for imported_suite in imported_suite_set:
_log_discovery("Adding {} -> {} in worklist after conflict".format(__importing_suite, imported_suite))
worklist.appendleft((__importing_suite, imported_suite))
else:
_discovered_suite.re_init_imports()
return True
# This is used to honor the "version_from" directives. Note that we only reach here if the importer is in a different repo.
# 1. we may only ignore an edge that points to a suite that has a "version_from", or to an ancestor of such a suite
# 2. we do not ignore an edge if the importer is one of the "from" suites (a suite that is designated by a "version_from" of an other suite)
# 3. otherwise if the edge points directly some something that has a "version_from", we ignore it for sure
# 4. and finally, we do not ignore edges that point to a "from" suite or its ancestor in the repo
# This give the suite mentioned in "version_from" priority
def _should_ignore_conflict_edge(_imported_suite, _importer_name):
vc_suites = vc_dir_to_suite_names[_imported_suite.vc_dir]
for suite_with_from, (from_suite, _) in versions_from.items():
if suite_with_from not in vc_suites:
continue
suite_with_from_and_ancestors = {suite_with_from}
suite_with_from_and_ancestors |= vc_suites & ancestor_names[suite_with_from]
if _imported_suite.name in suite_with_from_and_ancestors: # 1. above
if _importer_name != from_suite: # 2. above
if _imported_suite.name == suite_with_from: # 3. above
_log_discovery("Ignoring {} -> {} because of version_from({}) = {} (fast-path)".format(_importer_name, _imported_suite.name, suite_with_from, from_suite))
return True
if from_suite not in ancestor_names:
_log_discovery("Temporarily ignoring {} -> {} because of version_from({}) = {f_suite} ({f_suite} is not yet discovered)".format(_importer_name, _imported_suite.name, suite_with_from, f_suite=from_suite))
return True
vc_from_suite_and_ancestors = {from_suite}
vc_from_suite_and_ancestors |= vc_suites & ancestor_names[from_suite]
if _imported_suite.name not in vc_from_suite_and_ancestors: # 4. above
_log_discovery("Ignoring {} -> {} because of version_from({}) = {}".format(_importer_name, _imported_suite.name, suite_with_from, from_suite))
return True
return False
def _check_and_handle_version_conflict(_suite_import, _importing_suite, _discovered_suite):
if _importing_suite.vc_dir == _discovered_suite.vc_dir:
return True
if _is_imported_by_primary(_discovered_suite):
_log_discovery("Re-reached {} from {}, nothing to do (imported by primary)".format(_suite_import.name, importing_suite.name))
return True
if _should_ignore_conflict_edge(_discovered_suite, _importing_suite.name):
return True
# check that all other importers use the same version
for collocated_suite_name in vc_dir_to_suite_names[_discovered_suite.vc_dir]:
for other_importer_name in importer_names[collocated_suite_name]:
if other_importer_name == _importing_suite.name:
continue
if _should_ignore_conflict_edge(_discovered_suite, other_importer_name):
continue
other_importer = discovered[other_importer_name]
other_importers_import = other_importer.get_import(collocated_suite_name)
if other_importers_import.version and _suite_import.version and other_importers_import.version != _suite_import.version:
# conflict, try to resolve it
if _suite_import.name == collocated_suite_name:
_log_discovery("Re-reached {} from {} with conflicting version compared to {}".format(collocated_suite_name, _importing_suite.name, other_importer_name))
else:
_log_discovery("Re-reached {} (collocated with {}) from {} with conflicting version compared to {}".format(collocated_suite_name, _suite_import.name, _importing_suite.name, other_importer_name))
if update_existing or _was_cloned_or_updated_during_discovery(_discovered_suite):
resolved = _resolve_suite_version_conflict(_discovered_suite.name, _discovered_suite, other_importers_import.version, other_importer, _suite_import, _importing_suite)
if resolved and _update_repo(_discovered_suite, resolved, forget=True):
return False
else:
# This suite was already present
resolution = _resolve_suite_version_conflict(_discovered_suite.name, _discovered_suite, other_importers_import.version, other_importer, _suite_import, _importing_suite, dry_run=True)
if resolution is not None:
if _suite_import.name == collocated_suite_name:
warn("{importing} and {other_import} import different versions of {conflicted}: {version} vs. {other_version}".format(
conflicted=collocated_suite_name,
importing=_importing_suite.name,
other_import=other_importer_name,
version=_suite_import.version,
other_version=other_importers_import.version
))
else:
warn("{importing} and {other_import} import different versions of {conflicted} (collocated with {conflicted_src}): {version} vs. {other_version}".format(
conflicted=collocated_suite_name,
conflicted_src=_suite_import.name,
importing=_importing_suite.name,
other_import=other_importer_name,
version=_suite_import.version,
other_version=other_importers_import.version
))
else:
if _suite_import.name == collocated_suite_name:
_log_discovery("Re-reached {} from {} with same version as {}".format(collocated_suite_name, _importing_suite.name, other_importer_name))
else:
_log_discovery("Re-reached {} (collocated with {}) from {} with same version as {}".format(collocated_suite_name, _suite_import.name, _importing_suite.name, other_importer_name))
return True
try:
def _maybe_add_dynamic_imports():
if not worklist and not dynamic_imports_added[0]:
for name, in_subdir in get_dynamic_imports():
if name not in discovered:
primary.suite_imports.append(SuiteImport(name, version=None, urlinfos=None, dynamicImport=True, in_subdir=in_subdir))
worklist.append((primary.name, name))
_log_discovery("Adding {}->{} dynamic import".format(primary.name, name))
else:
_log_discovery("Skipping {}->{} dynamic import (already imported)".format(primary.name, name))
dynamic_imports_added[0] = True
_maybe_add_dynamic_imports()
while worklist:
importing_suite_name, imported_suite_name = worklist.popleft()
importing_suite = discovered[importing_suite_name]
suite_import = importing_suite.get_import(imported_suite_name)
if suite_import.version_from:
if imported_suite_name not in versions_from:
versions_from[imported_suite_name] = suite_import.version_from, importing_suite_name
_log_discovery("Setting 'version_from({imported}, {from_suite})' as requested by {importing}".format(
importing=importing_suite_name, imported=imported_suite_name, from_suite=suite_import.version_from))
elif suite_import.version_from != versions_from[imported_suite_name][0]:
_log_discovery("Ignoring 'version_from({imported}, {from_suite})' directive from {importing} because we already have 'version_from({imported}, {previous_from_suite})' from {previous_importing}".format(
importing=importing_suite_name, imported=imported_suite_name, from_suite=suite_import.version_from,
previous_importing=versions_from[imported_suite_name][1], previous_from_suite=versions_from[imported_suite_name][0]))
elif suite_import.name in discovered:
if suite_import.name in ancestor_names[importing_suite.name]:
abort("Import cycle detected: {importer} imports {importee} but {importee} transitively imports {importer}".format(importer=importing_suite.name, importee=suite_import.name))
discovered_suite = discovered[suite_import.name]
assert suite_import.name in vc_dir_to_suite_names[discovered_suite.vc_dir]
# Update importer data after re-reaching
importer_names[suite_import.name].add(importing_suite.name)
ancestor_names[suite_import.name] |= ancestor_names[importing_suite.name]
_check_and_handle_version_conflict(suite_import, importing_suite, discovered_suite)
else:
discovered_suite, is_clone = _find_suite_import(importing_suite, suite_import, load=False)
_log_discovery("Discovered {} from {} ({}, newly cloned: {})".format(discovered_suite.name, importing_suite_name, discovered_suite.dir, is_clone))
if is_clone:
original_version[discovered_suite.vc_dir] = VersionType.CLONED, None
_add_discovered_suite(discovered_suite, importing_suite.name)
elif discovered_suite.vc_dir in vc_dir_to_suite_names and not vc_dir_to_suite_names[discovered_suite.vc_dir]:
# we re-discovered a suite that we had cloned and then "un-discovered".
_log_discovery("This is a re-discovery of a previously forgotten repo: {}. Leaving it as-is".format(discovered_suite.vc_dir))
_add_discovered_suite(discovered_suite, importing_suite.name)
elif _was_cloned_or_updated_during_discovery(discovered_suite):
# we are re-reaching a repo through a different imported suite
_add_discovered_suite(discovered_suite, importing_suite.name)
_check_and_handle_version_conflict(suite_import, importing_suite, discovered_suite)
elif (update_existing or discovered_suite.isBinarySuite()) and suite_import.version:
_add_discovered_suite(discovered_suite, importing_suite.name)
if _update_repo(discovered_suite, suite_import.version, forget=True, update_reason="(update_existing mode)"):
actual_version = discovered_suite.vc.parent(discovered_suite.vc_dir)
if actual_version != suite_import.version:
warn("Failed to update {} (in {}) to version {}! Leaving it at {}.".format(discovered_suite.name, discovered_suite.vc_dir, suite_import.version, actual_version))
else:
_log_discovery("Updated {} after discovery (`update_existing` mode) to {}".format(discovered_suite.vc_dir, suite_import.version))
else:
_log_discovery("{} was already at the right revision: {} (`update_existing` mode)".format(discovered_suite.vc_dir, suite_import.version))
else:
_add_discovered_suite(discovered_suite, importing_suite.name)
_maybe_add_dynamic_imports()
except SystemExit as se:
cloned_during_discovery = [d for d, (t, _) in original_version.items() if t == VersionType.CLONED]
if cloned_during_discovery:
log_error("There was an error, removing " + ', '.join(("'" + d + "'" for d in cloned_during_discovery)))
for d in cloned_during_discovery:
shutil.rmtree(d)
for d, (t, v) in original_version.items():
if t == VersionType.REVISION:
log_error("Reverting '{}' to version '{}'".format(d, v))
VC.get_vc(d).update(d, v)
elif t == VersionType.BRANCH:
log_error("Reverting '{}' to branch '{}'".format(d, v))
VC.get_vc(d).update_to_branch(d, v)
raise se
_log_discovery("Discovery finished")
if register:
# Register & finish loading discovered suites
def _register_visit(s):
_register_suite(s)
for _suite_import in s.suite_imports:
if _suite_import.name not in _suites:
_register_visit(discovered[_suite_import.name])
if load:
s._load()
_register_visit(primary)
_log_discovery("Registration/Loading finished")
return primary
import mx_spotbugs
import mx_sigtest
import mx_gate
import mx_compat
import mx_urlrewrites
import mx_benchmark
import mx_benchplot
import mx_proftool # pylint: disable=unused-import
import mx_logcompilation # pylint: disable=unused-import
import mx_downstream
import mx_subst
import mx_ideconfig # pylint: disable=unused-import
import mx_ide_eclipse
import mx_compdb
from mx_javamodules import make_java_module # pylint: disable=unused-import
from mx_javamodules import JavaModuleDescriptor, get_java_module_info, lookup_package, \
get_module_name, parse_requiresConcealed_attribute, \
as_java_module
ERROR_TIMEOUT = 0x700000000 # not 32 bits
_mx_home = realpath(dirname(__file__))
_mx_path = 'mx' if _mx_home in os.environ.get('PATH', '').split(os.pathsep) else join(_mx_home, 'mx')
try:
# needed to work around https://bugs.python.org/issue1927
import readline #pylint: disable=unused-import
except ImportError:
pass
### ~~~~~~~~~~~~~ OS/Arch/Platform/System related
def download_file_exists(urls):
"""
Returns true if one of the given urls denotes an existing resource.
"""
for url in urls:
try:
_urlopen(url, timeout=0.5).close()
return True
except:
pass
return False
def download_file_with_sha1(name, path, urls, sha1, sha1path, resolve, mustExist, ext=None, sources=False, canSymlink=True):
"""
Downloads an entity from a URL in the list 'urls' (tried in order) to 'path',
checking the sha1 digest of the result against 'sha1' (if not 'NOCHECK')
Manages an internal cache of downloads and will link path to the cache entry unless 'canSymLink=False'
in which case it copies the cache entry.
"""
sha1Check = sha1 and sha1 != 'NOCHECK'
canSymlink = canSymlink and can_symlink()
if len(urls) == 0 and not sha1Check:
return path
if not _check_file_with_sha1(path, urls, sha1, sha1path, mustExist=resolve and mustExist):
if len(urls) == 0:
abort('SHA1 of {} ({}) does not match expected value ({})'.format(path, sha1OfFile(path), sha1))
if is_cache_path(path):
cachePath = path
else:
cachePath = _get_path_in_cache(name, sha1, urls, sources=sources, ext=ext)
def _copy_or_symlink(source, link_name):
ensure_dirname_exists(link_name)
if canSymlink:
logvv('Symlinking {} to {}'.format(link_name, source))
if os.path.lexists(link_name):
os.unlink(link_name)
try:
os.symlink(source, link_name)
except OSError as e:
# When doing parallel building, the symlink can fail
# if another thread wins the race to create the symlink
if not os.path.lexists(link_name):
# It was some other error
raise Exception(link_name, e)
else:
# If we can't symlink, then atomically copy. Never move as that
# can cause problems in the context of multiple processes/threads.
with SafeFileCreation(link_name) as sfc:
logvv('Copying {} to {}'.format(source, link_name))
shutil.copy(source, sfc.tmpPath)
cache_path_parent = dirname(cachePath)
if is_cache_path(cache_path_parent):
if exists(cache_path_parent) and not isdir(cache_path_parent):
logv('Wiping bad cache file: {}'.format(cache_path_parent))
# Some old version created bad files at this location, wipe it!
try:
os.unlink(cache_path_parent)
except OSError as e:
# we don't care about races, only about getting rid of this file
if exists(cache_path_parent) and not isdir(cache_path_parent):
raise e
if not exists(cachePath):
oldCachePath = _get_path_in_cache(name, sha1, urls, sources=sources, oldPath=True, ext=ext)
if exists(oldCachePath):
logv('Migrating cache file of {} from {} to {}'.format(name, oldCachePath, cachePath))
_copy_or_symlink(oldCachePath, cachePath)
_copy_or_symlink(oldCachePath + '.sha1', cachePath + '.sha1')
if not exists(cachePath) or (sha1Check and sha1OfFile(cachePath) != sha1):
if exists(cachePath):
log('SHA1 of ' + cachePath + ' does not match expected value (' + sha1 + ') - found ' + sha1OfFile(cachePath) + ' - re-downloading')
log('Downloading ' + ("sources " if sources else "") + name + ' from ' + str(urls))
download(cachePath, urls)
if path != cachePath:
_copy_or_symlink(cachePath, path)
if not _check_file_with_sha1(path, urls, sha1, sha1path, newFile=True, logErrors=True):
abort("No valid file for {} after download. Broken download? SHA1 not updated in suite.py file?".format(path))
return path
def dir_contains_files_recursively(directory, file_pattern):
for file_name in os.listdir(directory):
file_path = join(directory, file_name)
found = dir_contains_files_recursively(file_path, file_pattern) if isdir(file_path) \
else re.match(file_pattern, file_name)
if found:
return True
return False
def _cygpathU2W(p):
"""
Translate a path from unix-style to windows-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or not is_cygwin():
return p
return _check_output_str(['cygpath', '-a', '-w', p]).strip()
def _cygpathW2U(p):
"""
Translate a path from windows-style to unix-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or not is_cygwin():
return p
return _check_output_str(['cygpath', '-a', '-u', p]).strip()
def _separatedCygpathU2W(p):
"""
Translate a group of paths, separated by a path separator.
unix-style to windows-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or p == "" or not is_cygwin():
return p
return ';'.join(map(_cygpathU2W, p.split(os.pathsep)))
def _separatedCygpathW2U(p):
"""
Translate a group of paths, separated by a path separator.
windows-style to unix-style.
This method has no effects on other platforms than cygwin.
"""
if p is None or p == "" or not is_cygwin():
return p
return os.pathsep.join(map(_cygpathW2U, p.split(';')))
def get_arch():
return getattr(_opts, 'arch', None) or _get_real_arch()
def _get_real_arch():
machine = platform.uname()[4]
if machine in ['aarch64', 'arm64']:
return 'aarch64'
if machine in ['amd64', 'AMD64', 'x86_64', 'i86pc']:
return 'amd64'
if machine in ['sun4v', 'sun4u', 'sparc64']:
return 'sparcv9'
if machine == 'i386' and is_darwin():
try:
# Support for Snow Leopard and earlier version of MacOSX
if _check_output_str(['sysctl', '-n', 'hw.cpu64bit_capable']).strip() == '1':
return 'amd64'
except OSError:
# sysctl is not available
pass
abort('unknown or unsupported architecture: os=' + get_os() + ', machine=' + machine)
mx_subst.results_substitutions.register_no_arg('arch', get_arch)
def vc_system(kind, abortOnError=True):
for vc in _vc_systems:
if vc.kind == kind:
vc.check()
return vc
if abortOnError:
abort('no VC system named ' + kind)
else:
return None
@suite_context_free
def sha1(args):
"""generate sha1 digest for given file"""
parser = ArgumentParser(prog='sha1')
parser.add_argument('--path', action='store', help='path to file', metavar='<path>', required=True)
parser.add_argument('--plain', action='store_true', help='just the 40 chars', )
args = parser.parse_args(args)
value = sha1OfFile(args.path)
if args.plain:
sys.stdout.write(value)
else:
print('sha1 of ' + args.path + ': ' + value)
def sha1OfFile(path):
with open(path, 'rb') as f:
d = hashlib.sha1()
while True:
buf = f.read(4096)
if not buf:
break
d.update(buf)
return d.hexdigest()
def user_home():
return _opts.user_home if hasattr(_opts, 'user_home') else os.path.expanduser('~')
def dot_mx_dir():
return join(user_home(), '.mx')
def is_cache_path(path):
return path.startswith(_cache_dir())
def relpath_or_absolute(path, start, prefix=""):
"""
Finds a relative path and joins it to 'prefix', or otherwise tries to use 'path' as an absolute path.
If 'path' is not an absolute path, an error is thrown.
"""
try:
return join(prefix, os.path.relpath(path, start))
except ValueError:
if not os.path.isabs(path):
raise ValueError('can not find a relative path to dependency and path is not absolute: ' + path)
return path
def cpu_count():
cpus = None
if sys.version_info[0] >= 3:
try:
# takes into account CPU affinity restrictions which is available on some unix platforms
cpus = len(os.sched_getaffinity(0))
except AttributeError:
cpus = None
if cpus is None:
import multiprocessing
cpus = multiprocessing.cpu_count()
if _opts.cpu_count:
return cpus if cpus <= _opts.cpu_count else _opts.cpu_count
else:
return cpus
def env_var_to_bool(name, default='false'):
"""
:type name: str
:type default: str
:rtype: bool
"""
val = get_env(name, default)
b = str_to_bool(val)
if isinstance(b, bool):
return b
else:
raise abort("Invalid boolean env var value {}={}; expected: <true | false>".format(name, val))
def str_to_bool(val):
"""
:type val: str
:rtype: str | bool
"""
low_val = val.lower()
if low_val in ('false', '0', 'no'):
return False
elif low_val in ('true', '1', 'yes'):
return True
return val
def is_continuous_integration():
return env_var_to_bool("CONTINUOUS_INTEGRATION")
def is_darwin():
return sys.platform.startswith('darwin')
def is_linux():
return sys.platform.startswith('linux')
def is_openbsd():
return sys.platform.startswith('openbsd')
def is_sunos():
return sys.platform.startswith('sunos')
def is_windows():
return sys.platform.startswith('win32')
def is_cygwin():
return sys.platform.startswith('cygwin')
def get_os():
"""
Get a canonical form of sys.platform.
"""
if is_darwin():
return 'darwin'
elif is_linux():
return 'linux'
elif is_openbsd():
return 'openbsd'
elif is_sunos():
return 'solaris'
elif is_windows():
return 'windows'
elif is_cygwin():
return 'cygwin'
else:
abort('Unknown operating system ' + sys.platform)
_os_variant = None
def get_os_variant():
global _os_variant
if _os_variant is None:
if get_os() == 'linux':
try:
proc_output = _check_output_str(['ldd', '--version'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
proc_output = e.output
if proc_output and 'musl' in proc_output:
_os_variant = 'musl'
if _os_variant is None:
_os_variant = ''
logv('OS variant detected: {}'.format(_os_variant if _os_variant else 'none'))
return _os_variant
def _is_process_alive(p):
if isinstance(p, subprocess.Popen):
return p.poll() is None
assert isinstance(p, multiprocessing.Process), p
return p.is_alive()
def _send_sigquit():
if is_windows():
warn("mx: implement me! want to send SIGQUIT to my child process")
return
try:
from psutil import Process, NoSuchProcess
def _get_args(pid):
try:
proc = Process(pid)
return proc.cmdline()
except NoSuchProcess:
return None
except ImportError:
warn("psutil is not available, java process detection is less accurate")
def _get_args(pid):
return None
for p, args in _currentSubprocesses:
if p is None or not _is_process_alive(p):
continue
real_args = _get_args(p.pid)
if real_args:
args = real_args
if not args:
continue
exe_name = args[0].split(os.sep)[-1]
# Send SIGQUIT to "java" processes or things that started as "native-image"
# if we can't see the current exectuable name
if exe_name == "java" or exe_name == "native-image" and not real_args:
# only send SIGQUIT to the child not the process group
logv('sending SIGQUIT to ' + str(p.pid))
os.kill(p.pid, signal.SIGQUIT)
time.sleep(0.1)
def abort(codeOrMessage, context=None, killsig=signal.SIGTERM):
"""
Aborts the program with a SystemExit exception.
If `codeOrMessage` is a plain integer, it specifies the system exit status;
if it is None, the exit status is zero; if it has another type (such as a string),
the object's value is printed and the exit status is 1.
The `context` argument can provide extra context for an error message.
If `context` is callable, it is called and the returned value is printed.
If `context` defines a __abort_context__ method, the latter is called and
its return value is printed. Otherwise str(context) is printed.
"""
import threading
if sys.version_info[0] < 3 or threading.current_thread() is threading.main_thread():
if is_continuous_integration() or _opts and hasattr(_opts, 'killwithsigquit') and _opts.killwithsigquit:
logv('sending SIGQUIT to subprocesses on abort')
_send_sigquit()
for p, args in _currentSubprocesses:
if _is_process_alive(p):
if is_windows():
p.terminate()
else:
_kill_process(p.pid, killsig)
time.sleep(0.1)
if _is_process_alive(p):
try:
if is_windows():
p.terminate()
else:
_kill_process(p.pid, signal.SIGKILL)
except BaseException as e:
if _is_process_alive(p):
log_error('error while killing subprocess {0} "{1}": {2}'.format(p.pid, ' '.join(args), e))
sys.stdout.flush()
if is_continuous_integration() or (_opts and hasattr(_opts, 'verbose') and _opts.verbose):
import traceback
traceback.print_stack()
if context is not None:
if callable(context):
contextMsg = context()
elif hasattr(context, '__abort_context__'):
contextMsg = context.__abort_context__()
else:
contextMsg = str(context)
else:
contextMsg = ""
if isinstance(codeOrMessage, int):
# Log the context separately so that SystemExit
# communicates the intended exit status
error_message = contextMsg
error_code = codeOrMessage
elif contextMsg:
error_message = contextMsg + ":\n" + codeOrMessage
error_code = 1
else:
error_message = codeOrMessage
error_code = 1
log_error(error_message)
raise SystemExit(error_code)
def abort_or_warn(message, should_abort, context=None):
if should_abort:
abort(message, context)
else:
warn(message, context)
def _suggest_http_proxy_error(e):
"""
Displays a message related to http proxies that may explain the reason for the exception `e`.
"""
proxyVars = ['http_proxy', 'https_proxy', 'HTTP_PROXY', 'HTTPS_PROXY']
proxyDefs = {k: _original_environ[k] for k in proxyVars if k in _original_environ.keys()}
if not proxyDefs:
warn('** If behind a firewall without direct internet access, use the http_proxy environment variable ' \
'(e.g. "env http_proxy=proxy.company.com:80 mx ...") or download manually with a web browser.')
else:
defs = [i[0] + '=' + i[1] for i in proxyDefs.items()]
warn(
'** You have the following environment variable(s) set which may be the cause of the URL error:\n ' + '\n '.join(
defs))
def _suggest_tlsv1_error(e):
"""
Displays a message related to TLS errors that can occur when connecting to certain websites
(e.g., github) on a version of Python that uses an older implementaiton of OpenSSL.
"""
if 'tlsv1 alert protocol version' in str(e):
warn('It seems that you have a version of python ({}) that uses an older version of OpenSSL. '.format(
sys.executable) +
'This should be fixed by installing the latest 2.7 release from https://www.python.org/downloads')
def _init_can_symlink():
if 'symlink' in dir(os):
try:
dst = '.symlink_dst.{}'.format(os.getpid())
while exists(dst):
dst = '{}.{}'.format(dst, time.time())
os.symlink(__file__, dst)
os.remove(dst)
return True
except (OSError, NotImplementedError):
pass
return False
_can_symlink = _init_can_symlink()
# Can only warn about lack of symlink support once options
# have been parsed so that the warning is suppressed by --no-warning.
_can_symlink_warned = False
def can_symlink():
"""
Determines if ``os.symlink`` is supported on the current platform.
"""
if not _can_symlink:
global _can_symlink_warned
if not _can_symlink_warned:
# The warning may actually be issue multiple times if this
# method is called by multiple mx build subprocesses.
warn('symlinking not supported')
_can_symlink_warned = True
return False
return True
def getmtime(name):
"""
Wrapper for builtin open function that handles long path names on Windows.
"""
return os.path.getmtime(_safe_path(name))
def stat(name):
"""
Wrapper for builtin open function that handles long path names on Windows.
"""
return os.stat(_safe_path(name))
def lstat(name):
"""
Wrapper for builtin open function that handles long path names on Windows.
"""
return os.lstat(_safe_path(name))
def open(name, mode='r', encoding='utf-8'): # pylint: disable=redefined-builtin
"""
Wrapper for builtin open function that handles long path names on Windows.
Also, it handles supplying a default value of 'utf-8' for the encoding
parameter.
"""
if 'b' in mode or sys.version_info[0] < 3:
# When opening files in binary mode, no encoding can be specified.
# Also, on Python 2, `open` doesn't take any encoding parameter since
# the strings are not decoded at read time.
return builtins.open(_safe_path(name), mode=mode)
else:
return builtins.open(_safe_path(name), mode=mode, encoding=encoding)
def copytree(src, dst, symlinks=False, ignore=None):
shutil.copytree(_safe_path(src), _safe_path(dst), symlinks, ignore)
def copyfile(src, dst):
shutil.copyfile(_safe_path(src), _safe_path(dst))
def rmtree(path, ignore_errors=False):
path = _safe_path(path)
if ignore_errors:
def on_error(*args):
pass
elif is_windows():
def on_error(func, _path, exc_info):
os.chmod(_path, S_IWRITE)
if isdir(_path):
os.rmdir(_path)
else:
os.unlink(_path)
else:
def on_error(*args):
raise # pylint: disable=misplaced-bare-raise
if isdir(path) and not islink(path):
shutil.rmtree(path, onerror=on_error)
else:
try:
os.remove(path)
except OSError:
on_error(os.remove, path, sys.exc_info())
def clean(args, parser=None):
"""remove all class files, images, and executables
Removes all files created by a build, including Java class files, executables, and
generated images.
"""
suppliedParser = parser is not None
parser = parser if suppliedParser else ArgumentParser(prog='mx clean')
parser.add_argument('--no-native', action='store_false', dest='native', help='do not clean native projects')
parser.add_argument('--no-java', action='store_false', dest='java', help='do not clean Java projects')
parser.add_argument('--dependencies', '--projects', action='store',
help='comma separated projects to clean (omit to clean all projects)')
parser.add_argument('--no-dist', action='store_false', dest='dist', help='do not delete distributions')
parser.add_argument('--all', action='store_true', help='clear all dependencies (not just default targets)')
parser.add_argument('--aggressive', action='store_true', help='clear all suite output')
parser.add_argument('--disk-usage', action='store_true', help='compute and show disk usage before and after')
args = parser.parse_args(args)
suite_roots = {s:s.get_output_root(platformDependent=False, jdkDependent=False) for s in suites()}
disk_usage = None
if args.disk_usage:
disk_usage = {s:mx_gc._get_size_in_bytes(root) for s, root in suite_roots.items()}
def _collect_clean_dependencies():
if args.all:
return dependencies(True)
_, roots = defaultDependencies(True)
res = []
walk_deps(roots, visit=lambda d, e: res.append(d))
return _dependencies_opt_limit_to_suites(res)
if args.dependencies is not None:
deps = [dependency(name) for name in args.dependencies.split(',')]
else:
deps = _collect_clean_dependencies()
# TODO should we clean all the instantiations of a template?, how to enumerate all instantiations?
for dep in deps:
task = dep.getBuildTask(args)
if getattr(task, 'cleanForbidden', lambda: True)():
continue
task.logClean()
task.clean()
for configName in ['netbeans-config.zip', 'eclipse-config.zip']:
config = TimeStampFile(join(dep.suite.get_mx_output_dir(), configName))
if config.exists():
os.unlink(config.path)
if args.aggressive:
for s, root in suite_roots.items():
if exists(root):
log('Cleaning {}...'.format(root))
rmtree(root)
if args.disk_usage:
for s in disk_usage:
before = disk_usage[s]
after = mx_gc._get_size_in_bytes(suite_roots[s])
log('{}: {} -> {}'.format(s, mx_gc._format_bytes(before), mx_gc._format_bytes(after)))
if suppliedParser:
return args
_tar_compressed_extensions = {'bz2', 'gz', 'lz', 'lzma', 'xz', 'Z'}
_known_zip_pre_extensions = {'src'}
def get_file_extension(path):
root, ext = os.path.splitext(path)
if len(ext) > 0:
ext = ext[1:] # remove leading .
if ext in _tar_compressed_extensions and os.path.splitext(root)[1] == ".tar":
return "tar." + ext
if ext == 'zip':
_, pre_ext = os.path.splitext(root)
if len(pre_ext) > 0:
pre_ext = pre_ext[1:] # remove leading .
if pre_ext in _known_zip_pre_extensions:
return pre_ext + ".zip"
if ext == 'map':
_, pre_ext = os.path.splitext(root)
if len(pre_ext) > 0:
pre_ext = pre_ext[1:] # remove leading .
return pre_ext + ".map"
return ext
def change_file_extension(path, new_extension):
ext = get_file_extension(path)
if not ext:
return path + '.' + new_extension
return path[:-len(ext)] + new_extension
def change_file_name(path, new_file_name):
return join(dirname(path), new_file_name + '.' + get_file_extension(path))
def ensure_dirname_exists(path, mode=None):
d = dirname(path)
if d != '':
ensure_dir_exists(d, mode)
def ensure_dir_exists(path, mode=None):
"""
Ensures all directories on 'path' exists, creating them first if necessary with os.makedirs().
"""
if not isdir(path):
try:
if mode:
os.makedirs(path, mode=mode)
else:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and isdir(path):
# be happy if another thread already created the path
pass
else:
raise e
return path
def show_envs(args):
"""print environment variables and their values
By default only variables starting with "MX" are shown.
The --all option forces all variables to be printed"""
parser = ArgumentParser(prog='mx envs')
parser.add_argument('--all', action='store_true', help='show all variables, not just those starting with "MX"')
args = parser.parse_args(args)
for key, value in os.environ.items():
if args.all or key.startswith('MX'):
print('{0}: {1}'.format(key, value))
def _attempt_download(url, path, jarEntryName=None):
"""
Attempts to download content from `url` and save it to `path`.
If `jarEntryName` is not None, then the downloaded content is
expected to be a zip/jar file and the entry of the corresponding
name is extracted and written to `path`.
:return: True if the download succeeded, "retry" if it failed but might succeed
if retried, False otherwise
"""
progress = not _opts.no_download_progress and sys.stdout.isatty()
conn = None
try:
# Use a temp file while downloading to avoid multiple threads overwriting the same file
with SafeFileCreation(path) as sfc:
tmp = sfc.tmpPath
# 10 second timeout to establish connection
url = url.replace('\\', '/')
conn = _urlopen(url, timeout=10)
# Not all servers support the "Content-Length" header
lengthHeader = conn.headers.get('Content-Length')
length = int(lengthHeader.strip()) if lengthHeader else -1
bytesRead = 0
chunkSize = 8192
with open(tmp, 'wb') as fp:
chunk = conn.read(chunkSize)
while chunk:
bytesRead += len(chunk)
fp.write(chunk)
if length == -1:
if progress:
sys.stdout.write('\r {} bytes'.format(bytesRead))
else:
if progress:
sys.stdout.write('\r {} bytes ({:.0f}%)'.format(bytesRead, bytesRead * 100 / length))
if bytesRead == length:
break
chunk = conn.read(chunkSize)
if progress:
sys.stdout.write('\n')
if length not in (-1, bytesRead):
log_error('Download of {} truncated: read {} of {} bytes.'.format(url, bytesRead, length))
return "retry"
if jarEntryName:
with zipfile.ZipFile(tmp, 'r') as zf:
jarEntry = zf.read(jarEntryName)
with open(tmp, 'wb') as fp:
fp.write(jarEntry)
return True
except (IOError, socket.timeout, _urllib_error.HTTPError) as e:
# In case of an exception the temp file is removed automatically, so no cleanup is necessary
log_error("Error downloading from " + url + " to " + path + ": " + str(e))
_suggest_http_proxy_error(e)
_suggest_tlsv1_error(e)
if isinstance(e, _urllib_error.HTTPError) and e.code == 500:
return "retry"
finally:
if conn:
conn.close()
return False
class _JarURL(object):
"""
A URL denoting an entry in a JAR file. The syntax of a JAR URL is:
jar:<base_url>!/{entry}
for example:
jar:http://www.foo.com/bar/baz.jar!/COM/foo/Quux.class
More info at https://docs.oracle.com/en/java/javase/15/docs/api/java.base/java/net/JarURLConnection.html
"""
_pattern = re.compile('jar:(.*)!/(.*)')
@staticmethod
def parse(url):
if not url.startswith('jar:'):
return None
m = _JarURL._pattern.match(url)
if m:
return _JarURL(m.group(1), m.group(2))
return None
def __init__(self, base_url, entry):
self.base_url = base_url
self.entry = entry
def __repr__(self):
return 'jar:{}!/{}'.format(self.base_url, self.entry)
def download(path, urls, verbose=False, abortOnError=True, verifyOnly=False):
"""
Attempts to downloads content for each URL in a list, stopping after the first successful download.
If the content cannot be retrieved from any URL, the program is aborted, unless abortOnError=False.
The downloaded content is written to the file indicated by `path`.
"""
if not verifyOnly:
ensure_dirname_exists(path)
assert not path.endswith(os.sep)
verify_errors = {}
for url in urls:
if not verifyOnly and verbose:
log('Downloading ' + url + ' to ' + path)
jar_url = _JarURL.parse(url)
jarEntryName = None
if jar_url:
url = jar_url.base_url
jarEntryName = jar_url.entry
if not _opts.trust_http and (url.lower().startswith('http://') or url.lower().startswith('ftp://')):
warn('Downloading from non-https URL {}. Use --trust-http mx option to suppress this warning.'.format(url))
if verifyOnly:
try:
conn = _urlopen(url, timeout=5, timeout_retries=1)
conn.close()
except (IOError, socket.timeout) as e:
_suggest_tlsv1_error(e)
verify_errors[url] = e
else:
for i in range(4):
if i != 0:
time.sleep(1)
warn('Retry {} to download from {}'.format(i, url))
res = _attempt_download(url, path, jarEntryName)
if res == "retry":
continue
if res:
return True # Download was successful
verify_msg = None
if verifyOnly and len(verify_errors) > 0: # verify-mode -> print error details
verify_msg = 'Could not download to ' + path + ' from any of the following URLs: ' + ', '.join(urls)
for url, e in verify_errors.items():
verify_msg += '\n ' + url + ': ' + str(e)
if verifyOnly and len(verify_errors) < len(urls): # verify-mode at least one success -> success
if verify_msg is not None:
warn(verify_msg)
return True
else: # Either verification error or no download was successful
if not verify_msg:
verify_msg = 'Could not download to ' + path + ' from any of the following URLs: ' + ', '.join(urls)
for url in urls:
verify_msg += '\n ' + url
if abortOnError:
abort(verify_msg)
else:
warn(verify_msg)
return False
def update_file(path, content, showDiff=False):
"""
Updates a file with some given content if the content differs from what's in
the file already. The return value indicates if the file was updated.
"""
existed = exists(path)
try:
old = None
if existed:
with open(path, 'r') as f:
old = f.read()
if old == content:
return False
if existed and _opts.backup_modified:
shutil.move(path, path + '.orig')
with open(path, 'w') as f:
f.write(content)
if existed:
logv('modified ' + path)
if _opts.backup_modified:
log('backup ' + path + '.orig')
if showDiff:
log('diff: ' + path)
log(''.join(difflib.unified_diff(old.splitlines(1), content.splitlines(1))))
else:
logv('created ' + path)
return True
except IOError as e:
abort('Error while writing to ' + path + ': ' + str(e))
try: zipfile.ZipFile.__enter__
except:
zipfile.ZipFile.__enter__ = lambda self: self
zipfile.ZipFile.__exit__ = lambda self, t, value, traceback: self.close()
_projects = dict()
_libs = dict()
"""
:type: dict[str, ResourceLibrary|Library]
"""
_jreLibs = dict()
"""
:type: dict[str, JreLibrary]
"""
_jdkLibs = dict()
"""
:type: dict[str, JdkLibrary]
"""
_dists = dict()
_removed_projects = dict()
_removed_libs = dict()
_removed_jreLibs = dict()
_removed_jdkLibs = dict()
_removed_dists = dict()
_distTemplates = dict()
_licenses = dict()
_repositories = dict()
_mavenRepoBaseURLs = [
"https://repo1.maven.org/maven2/",
"https://search.maven.org/remotecontent?filepath="
]
"""
Map of the environment variables loaded by parsing the suites.
"""
_loadedEnv = dict()
_jdkFactories = {}
_annotationProcessorProjects = None
_mx_tests_suite = None
_suitemodel = None
_opts = Namespace()
_extra_java_homes = []
_default_java_home = None
_check_global_structures = True # can be set False to allow suites with duplicate definitions to load without aborting
_vc_systems = []
_mvn = None
_binary_suites = None # source suites only if None, [] means all binary, otherwise specific list
_urlrewrites = [] # list of URLRewrite objects
_original_environ = dict(os.environ)
_original_directory = os.getcwd()
_jdkProvidedSuites = set()
# List of functions to run after options have been parsed
_opts_parsed_deferrables = []
def nyi(name, obj):
abort('{} is not implemented for {}'.format(name, obj.__class__.__name__))
raise NotImplementedError()
def _first(g):
try:
return next(g)
except StopIteration:
return None
### Dependencies
"""
Map from the name of a removed dependency to the reason it was removed.
A reason may be the name of another removed dependency, forming a causality chain.
"""
_removedDeps = {}
def _check_dependency_cycles():
"""
Checks for cycles in the dependency graph.
"""
path = []
def _visitEdge(src, dst, edge):
if dst in path:
abort('dependency cycle detected: ' + ' -> '.join([d.name for d in path] + [dst.name]), context=dst)
def _preVisit(dep, edge):
path.append(dep)
return True
def _visit(dep, edge):
last = path.pop(-1)
assert last is dep
walk_deps(ignoredEdges=[DEP_EXCLUDED], preVisit=_preVisit, visitEdge=_visitEdge, visit=_visit)
def _remove_unsatisfied_deps():
"""
Remove projects and libraries that (recursively) depend on an optional library
whose artifact does not exist or on a JRE library that is not present in the
JDK for a project. Also remove projects whose Java compliance requirement
cannot be satisfied by the configured JDKs. Removed projects and libraries are
also removed from distributions in which they are listed as dependencies.
Returns a map from the name of a removed dependency to the reason it was removed.
A reason may be the name of another removed dependency.
"""
removedDeps = OrderedDict()
def visit(dep, edge):
if dep.isLibrary():
if dep.optional:
if not dep.is_available():
note_removal(dep, 'optional library {0} was removed as it is not available'.format(dep))
for depDep in list(dep.deps):
if depDep in removedDeps:
note_removal(dep, 'removed {} because {} was removed'.format(dep, depDep))
elif dep.isJavaProject():
# TODO this lookup should be the same as the one used in build
depJdk = get_jdk(dep.javaCompliance, cancel='some projects will be removed which may result in errors', purpose="building projects with compliance " + repr(dep.javaCompliance), tag=DEFAULT_JDK_TAG)
if depJdk is None:
note_removal(dep, 'project {0} was removed as JDK {0.javaCompliance} is not available'.format(dep))
elif hasattr(dep, "javaVersionExclusion") and getattr(dep, "javaVersionExclusion") == depJdk.javaCompliance:
note_removal(dep, 'project {0} was removed due to its "javaVersionExclusion" attribute'.format(dep))
else:
for depDep in list(dep.deps):
if depDep in removedDeps:
note_removal(dep, 'removed {} because {} was removed'.format(dep, depDep))
elif depDep.isJreLibrary() or depDep.isJdkLibrary():
lib = depDep
if not lib.is_provided_by(depJdk):
if lib.optional:
note_removal(dep, 'project {} was removed as dependency {} is missing'.format(dep, lib))
else:
abort('{} library {} required by {} not provided by {}'.format('JDK' if lib.isJdkLibrary() else 'JRE', lib, dep, depJdk), context=dep)
elif dep.isJARDistribution() and not dep.suite.isBinarySuite():
prune(dep, discard=lambda d: not any(dd.isProject()
or (dd.isBaseLibrary()
and not dd.isJdkLibrary()
and not dd.isJreLibrary()
and dd not in d.excludedLibs)
for dd in d.deps))
elif dep.isTARDistribution():
if dep.isLayoutDistribution():
prune(dep, discard=LayoutDistribution.canDiscard)
else:
prune(dep)
if hasattr(dep, 'ignore'):
reasonAttr = getattr(dep, 'ignore')
if isinstance(reasonAttr, bool):
if reasonAttr:
abort('"ignore" attribute must be False/"false" or a non-empty string providing the reason the dependency is ignored', context=dep)
else:
assert isinstance(reasonAttr, str)
strippedReason = reasonAttr.strip()
if len(strippedReason) != 0:
if not strippedReason == "false":
note_removal(dep, '{} removed: {}'.format(dep, strippedReason))
else:
abort('"ignore" attribute must be False/"false" or a non-empty string providing the reason the dependency is ignored', context=dep)
if hasattr(dep, 'buildDependencies'):
for buildDep in list(dep.buildDependencies):
if buildDep in removedDeps:
note_removal(dep, 'removed {} because {} was removed'.format(dep, buildDep))
def prune(dist, discard=lambda d: not (d.deps or d.buildDependencies)):
assert dist.isDistribution()
if dist.deps or dist.buildDependencies:
distRemovedDeps = []
for distDep in list(dist.deps) + list(dist.buildDependencies):
if distDep in removedDeps:
logv('[{} was removed from distribution {}]'.format(distDep, dist))
dist.removeDependency(distDep)
distRemovedDeps.append(distDep)
if discard(dist):
note_removal(dist, 'distribution {} was removed as all its dependencies were removed'.format(dist),
details=[e.name for e in distRemovedDeps])
def note_removal(dep, reason, details=None):
logv('[' + reason + ']')
removedDeps[dep] = reason if details is None else (reason, details)
walk_deps(visit=visit, ignoredEdges=[DEP_EXCLUDED])
res = OrderedDict()
for dep, reason in removedDeps.items():
if not isinstance(reason, str):
assert isinstance(reason, tuple)
res[dep.name] = reason
dep.getSuiteRegistry().remove(dep)
dep.getSuiteRemovedRegistry().append(dep)
dep.getGlobalRegistry().pop(dep.name)
dep.getGlobalRemovedRegistry()[dep.name] = dep
return res
DEP_STANDARD = "standard dependency"
DEP_BUILD = "a build dependency"
DEP_ANNOTATION_PROCESSOR = "annotation processor dependency"
DEP_EXCLUDED = "library excluded from a distribution"
#: Set of known dependency edge kinds
DEP_KINDS = frozenset([DEP_STANDARD, DEP_BUILD, DEP_ANNOTATION_PROCESSOR, DEP_EXCLUDED])
def _is_edge_ignored(edge, ignoredEdges):
return ignoredEdges and edge in ignoredEdges
DEBUG_WALK_DEPS = False
DEBUG_WALK_DEPS_LINE = 1
def _debug_walk_deps_helper(dep, edge, ignoredEdges):
assert edge not in ignoredEdges
global DEBUG_WALK_DEPS_LINE
if DEBUG_WALK_DEPS:
if edge:
print('{}:walk_deps:{}{} # {}'.format(DEBUG_WALK_DEPS_LINE, ' ' * edge.path_len(), dep, edge.kind))
else:
print('{}:walk_deps:{}'.format(DEBUG_WALK_DEPS_LINE, dep))
DEBUG_WALK_DEPS_LINE += 1
class DepEdge:
"""
Represents an edge traversed while visiting a spanning tree of the dependency graph.
"""
def __init__(self, src, kind, prev):
"""
:param src: the source of this dependency edge
:param kind: one of the values in `DEP_KINDS`
:param prev: the dependency edge traversed to reach `src` or None if `src` is a root
"""
assert kind in DEP_KINDS
self.src = src
self.kind = kind
self.prev = prev
def __str__(self):
return '{}@{}'.format(self.src, self.kind)
def path(self):
if self.prev:
return self.prev.path() + [self.src]
return [self.src]
def path_len(self):
return 1 + self.prev.path_len() if self.prev else 0
# for backwards compatibility
def _replaceResultsVar(m):
return mx_subst.results_substitutions.substitute(m.group(0))
# for backwards compatibility
def _replacePathVar(m):
return mx_subst.path_substitutions.substitute(m.group(0))
def _get_dependency_path(dname):
d = dependency(dname)
path = None
if d.isJARDistribution() and hasattr(d, "path"):
path = d.path
elif d.isTARDistribution() and hasattr(d, "output"):
path = d.output
elif d.isLibrary() or d.isResourceLibrary():
path = d.get_path(resolve=True)
elif d.isProject():
path = d.dir
if path:
return join(d.suite.dir, path)
else:
abort('dependency ' + dname + ' has no path')
mx_subst.path_substitutions.register_with_arg('path', _get_dependency_path)
class ClasspathDependency(Dependency):
"""
A dependency that can be put on the classpath of a Java commandline.
"""
def __init__(self, **kwArgs): # pylint: disable=super-init-not-called
pass
def classpath_repr(self, resolve=True):
"""
Gets this dependency as an element on a class path.
If 'resolve' is True, then this method aborts if the file or directory
denoted by the class path element does not exist.
:rtype : str
"""
nyi('classpath_repr', self)
def isJar(self):
cp_repr = self.classpath_repr() #pylint: disable=assignment-from-no-return
if cp_repr:
return cp_repr.endswith('.jar') or cp_repr.endswith('.JAR') or '.jar_' in cp_repr
return True
def getJavaProperties(self, replaceVar=mx_subst.path_substitutions):
"""
A dictionary of custom Java properties that should be added to the commandline
"""
ret = {}
if hasattr(self, "javaProperties"):
for key, value in self.javaProperties.items():
ret[key] = replaceVar.substitute(value, dependency=self)
return ret
def get_declaring_module_name(self):
"""
Gets the name of the module corresponding to this ClasspathDependency.
:rtype: str | None
"""
return None
### JNI
def _get_jni_gen(pname):
p = project(pname)
if p.jni_gen_dir() is None:
abort("Project {0} does not produce JNI headers, it can not be used in <jnigen:{0}> substitution.".format(pname))
return join(p.suite.dir, p.jni_gen_dir())
mx_subst.path_substitutions.register_with_arg('jnigen', _get_jni_gen)
### ~~~~~~~~~~~~~ Build
class Task(_with_metaclass(ABCMeta), object):
"""A task executed during a build.
:type deps: list[Task]
:param Dependency subject: the dependency for which this task is executed
:param list[str] args: arguments of the build command
:param int parallelism: the number of CPUs used when executing this task
"""
def __init__(self, subject, args, parallelism): # pylint: disable=super-init-not-called
self.subject = subject
self.args = args
self.parallelism = parallelism
self.deps = []
self.proc = None
def __str__(self):
nyi('__str__', self)
def __repr__(self):
return str(self)
@property
def name(self):
return self.subject.name
@property
def build_time(self):
return getattr(self.subject, "build_time", 1)
def initSharedMemoryState(self):
pass
def pushSharedMemoryState(self):
pass
def pullSharedMemoryState(self):
pass
def cleanSharedMemoryState(self):
pass
def prepare(self, daemons):
"""
Perform any task initialization that must be done in the main process.
This will be called just before the task is launched.
The 'daemons' argument is a dictionary for storing any persistent state
that might be shared between tasks.
"""
@abstractmethod
def execute(self):
"""Executes this task."""
class NoOpTask(Task):
def __init__(self, subject, args):
super(NoOpTask, self).__init__(subject, args, 1)
def __str__(self):
return 'NoOp'
def execute(self):
pass
class TaskSequence(Task): #pylint: disable=R0921
"""A Task that executes a sequence of subtasks."""
def __init__(self, subject, args):
super(TaskSequence, self).__init__(subject, args, max(t.parallelism for t in self.subtasks))
def __str__(self):
def indent(s, padding=' '):
return padding + s.replace('\n', '\n' + padding)
return self.__class__.__name__ + '[\n' + indent('\n'.join(map(str, self.subtasks))) + '\n]'
@abstractproperty
def subtasks(self):
""":rtype: typing.Sequence[Task]"""
def execute(self):
for subtask in self.subtasks:
assert subtask.subject == self.subject
subtask.deps += self.deps
subtask.execute()
class Buildable(object):
"""A mixin for Task subclasses that can be built."""
built = False
def initSharedMemoryState(self):
self._builtBox = multiprocessing.Value('b', 1 if self.built else 0)
def pushSharedMemoryState(self):
self._builtBox.value = 1 if self.built else 0
def pullSharedMemoryState(self):
self.built = bool(self._builtBox.value)
def cleanSharedMemoryState(self):
self._builtBox = None
# @abstractmethod should be abstract but subclasses in some suites miss this method
def newestOutput(self):
"""
Gets a TimeStampFile representing the build output file for this task
with the newest modification time or None if no build output file exists.
"""
nyi('newestOutput', self)
class BuildTask(Buildable, Task):
"""A Task used to build a dependency."""
def __init__(self, subject, args, parallelism):
super(BuildTask, self).__init__(subject, args, parallelism)
self._saved_deps_path = join(subject.suite.get_mx_output_dir(), 'savedDeps', type(subject).__name__,
subject._extra_artifact_discriminant(), self.name)
def _persist_deps(self):
"""
Saves the dependencies for this task's subject to a file.
"""
if self.deps:
with SafeFileCreation(self._saved_deps_path) as sfc:
with open(sfc.tmpPath, 'w') as f:
for d in self.deps:
print(d.subject.name, file=f)
elif exists(self._saved_deps_path):
os.remove(self._saved_deps_path)
def _deps_changed(self):
"""
Returns True if there are saved dependencies for this task's subject and
they have changed since the last time it was built.
"""
if exists(self._saved_deps_path):
with open(self._saved_deps_path) as f:
last_deps = f.read().splitlines()
curr_deps = [d.subject.name for d in self.deps]
if last_deps != curr_deps:
return True
return False
def execute(self):
"""
Execute the build task.
"""
if self.buildForbidden():
self.logSkip()
return
buildNeeded = False
if self.args.clean and not self.cleanForbidden():
self.logClean()
self.clean()
buildNeeded = True
reason = 'clean'
if not buildNeeded:
updated = [dep for dep in self.deps if getattr(dep, 'built', False)]
if updated:
buildNeeded = True
if not _opts.verbose:
reason = 'dependency {} updated'.format(updated[0].subject)
else:
reason = 'dependencies updated: ' + ', '.join(str(u.subject) for u in updated)
if not buildNeeded and self._deps_changed():
buildNeeded = True
reason = 'dependencies were added, removed or re-ordered'
if not buildNeeded:
newestInput = None
newestInputDep = None
for dep in self.deps:
depNewestOutput = getattr(dep, 'newestOutput', lambda: None)()
if depNewestOutput and (not newestInput or depNewestOutput.isNewerThan(newestInput)):
newestInput = depNewestOutput
newestInputDep = dep
if newestInputDep:
logvv('Newest dependency for {}: {} ({})'.format(self.subject.name, newestInputDep.subject.name,
newestInput))
if get_env('MX_BUILD_SHALLOW_DEPENDENCY_CHECKS') is None:
shallow_dependency_checks = self.args.shallow_dependency_checks is True
else:
shallow_dependency_checks = get_env('MX_BUILD_SHALLOW_DEPENDENCY_CHECKS') == 'true'
if self.args.shallow_dependency_checks is not None and shallow_dependency_checks is True:
warn('Explicit -s argument to build command is overridden by MX_BUILD_SHALLOW_DEPENDENCY_CHECKS')
if newestInput and shallow_dependency_checks and not self.subject.isNativeProject():
newestInput = None
if __name__ != self.__module__ and not self.subject.suite.getMxCompatibility().newestInputIsTimeStampFile():
newestInput = newestInput.timestamp if newestInput else float(0)
buildNeeded, reason = self.needsBuild(newestInput)
if buildNeeded:
if not self.args.clean and not self.cleanForbidden():
self.clean(forBuild=True)
start_time = time.time()
self.logBuild(reason)
try:
_built = self.build()
except:
if self.args.parallelize:
# In concurrent builds, this helps identify on the console which build failed
log(self._timestamp() + "{}: Failed due to error: {}".format(self, sys.exc_info()[1]))
raise
self._persist_deps()
# The build task is `built` if the `build()` function returns True or None (legacy)
self.built = _built or _built is None
self.logBuildDone(time.time() - start_time)
logv('Finished {}'.format(self))
else:
self.logSkip(reason)
def _timestamp(self):
if self.args.print_timing:
return time.strftime('[%H:%M:%S] ')
return ''
def logBuild(self, reason=None):
if reason:
log(self._timestamp() + '{}... [{}]'.format(self, reason))
else:
log(self._timestamp() + '{}...'.format(self))
def logBuildDone(self, duration):
timestamp = self._timestamp()
if timestamp:
duration = str(timedelta(seconds=duration))
# Strip hours if 0
if duration.startswith('0:'):
duration = duration[2:]
log(timestamp + '{} [duration: {}]'.format(self, duration))
def logClean(self):
log('Cleaning {}...'.format(self.name))
def logSkip(self, reason=None):
if reason:
logv('[{} - skipping {}]'.format(reason, self.name))
else:
logv('[skipping {}]'.format(self.name))
def needsBuild(self, newestInput):
"""
Returns True if the current artifacts of this task are out dated.
The 'newestInput' argument is either None or a TimeStampFile
denoting the artifact of a dependency with the most recent modification time.
Apart from 'newestInput', this method does not inspect this task's dependencies.
"""
if self.args.force:
return (True, 'forced build')
return (False, 'unimplemented')
def buildForbidden(self):
if not self.args.only:
return False
projectNames = self.args.only.split(',')
return self.subject.name not in projectNames
def cleanForbidden(self):
return False
@abstractmethod
def build(self):
"""
Build the artifacts.
"""
nyi('build', self)
@abstractmethod
def clean(self, forBuild=False):
"""
Clean the build artifacts.
"""
nyi('clean', self)
### ~~~~~~~~~~~~~ Distribution, Archive, Dependency
class DistributionTemplate(SuiteConstituent):
def __init__(self, suite, name, attrs, parameters):
SuiteConstituent.__init__(self, suite, name)
self.attrs = attrs
self.parameters = parameters
class Distribution(Dependency):
"""
A distribution is a file containing the output of one or more dependencies.
It is a `Dependency` because a `Project` or another `Distribution` may express a dependency on it.
:param Suite suite: the suite in which the distribution is defined
:param str name: the name of the distribution which must be unique across all suites
:param list deps: the dependencies of the distribution. How these dependencies are consumed
is defined by the `Distribution` subclasses.
:param list excludedLibs: libraries whose contents should be excluded from this distribution's built artifact
:param bool platformDependent: specifies if the built artifact is platform dependent
:param str theLicense: license applicable when redistributing the built artifact of the distribution
"""
def __init__(self, suite, name, deps, excludedLibs, platformDependent, theLicense, testDistribution=False, platforms=None, **kwArgs):
Dependency.__init__(self, suite, name, theLicense, **kwArgs)
self.deps = deps
self.update_listeners = set()
self.excludedLibs = excludedLibs
self.platformDependent = platformDependent
self.platforms = platforms or [None]
self.buildDependencies = []
if testDistribution is None:
self.testDistribution = name.endswith('_TEST') or name.endswith('_TESTS')
else:
self.testDistribution = testDistribution
def is_test_distribution(self):
return self.testDistribution
def isPlatformDependent(self):
return self.platformDependent
def add_update_listener(self, listener):
self.update_listeners.add(listener)
def notify_updated(self):
for l in self.update_listeners:
l(self)
def removeDependency(self, dep):
if dep in self.deps:
self.deps.remove(dep)
if dep in self.buildDependencies:
self.buildDependencies.remove(dep)
def resolveDeps(self):
self._resolveDepsHelper(self.deps, fatalIfMissing=not isinstance(self.suite, BinarySuite))
if self.suite.getMxCompatibility().automatic_overlay_distribution_deps():
# Overlays must come before overlayed when walking dependencies (e.g. to create a class path)
new_deps = []
for d in self.deps:
if d.isJavaProject() and d._overlays:
for o in d._overlays:
if o in self.deps:
abort('Distribution must not explicitly specify a dependency on {} as it is derived automatically.'.format(o), context=self)
new_deps.append(o)
new_deps.append(d)
self.deps = new_deps
self._resolveDepsHelper(self.buildDependencies, fatalIfMissing=not isinstance(self.suite, BinarySuite))
self._resolveDepsHelper(self.excludedLibs)
self._resolveDepsHelper(getattr(self, 'moduledeps', None))
overlaps = getattr(self, 'overlaps', [])
if not isinstance(overlaps, list):
abort('Attribute "overlaps" must be a list', self)
original_overlaps = list(overlaps)
self._resolveDepsHelper(overlaps)
self.resolved_overlaps = overlaps
self.overlaps = original_overlaps
for l in self.excludedLibs:
if not l.isBaseLibrary():
abort('"exclude" attribute can only contain libraries: ' + l.name, context=self)
licenseId = self.theLicense if self.theLicense else self.suite.defaultLicense # pylint: disable=access-member-before-definition
if licenseId:
self.theLicense = get_license(licenseId, context=self)
def _walk_deps_visit_edges(self, visited, in_edge, preVisit=None, visit=None, ignoredEdges=None, visitEdge=None):
deps = [(DEP_STANDARD, self.deps), (DEP_EXCLUDED, self.excludedLibs), (DEP_BUILD, self.buildDependencies)]
self._walk_deps_visit_edges_helper(deps, visited, in_edge, preVisit=preVisit, visit=visit, ignoredEdges=ignoredEdges, visitEdge=visitEdge)
def make_archive(self):
nyi('make_archive', self)
def archived_deps(self):
"""
Gets the projects and libraries whose artifacts are the contents of the archive
created by `make_archive`.
Direct distribution dependencies are considered as _distDependencies_.
Anything contained in the _distDependencies_ will not be included in the result.
Libraries listed in `excludedLibs` will also be excluded.
Otherwise, the result will contain everything this distribution depends on (including
indirect distribution dependencies and libraries).
"""
if not hasattr(self, '.archived_deps'):
excluded = set()
def _visitDists(dep, edges):
if dep is not self:
excluded.add(dep)
if dep.isDistribution():
for o in dep.overlapped_distributions():
excluded.add(o)
excluded.update(dep.archived_deps())
self.walk_deps(visit=_visitDists, preVisit=lambda dst, edge: dst.isDistribution())
def _list_excluded(dst, edge):
if not edge:
assert dst == self
return True
if edge and edge.kind == DEP_EXCLUDED:
assert edge.src == self
excluded.add(dst)
return False
self.walk_deps(preVisit=_list_excluded, ignoredEdges=[]) # shallow walk to get excluded elements
deps = []
def _visit(dep, edges):
if dep is not self:
if dep.isJARDistribution():
if _use_exploded_build():
abort('When MX_BUILD_EXPLODED=true, distribution {} depended on by {} must be in the "distDependencies" attribute'.format(dep, self), context=self)
# A distribution that defines a module cannot include another distribution's contents
import mx_javamodules
module_name = mx_javamodules.get_module_name(self)
if module_name is not None:
abort('Distribution {} depended on by {} (which defines module {}) must be in the "distDependencies" attribute'.format(dep, self, module_name), context=self)
deps.append(dep)
def _preVisit(dst, edge):
if edge and edge.src.isNativeProject():
# A native project dependency only denotes a build order dependency
return False
return dst not in excluded and not dst.isJreLibrary() and not dst.isJdkLibrary()
self.walk_deps(visit=_visit, preVisit=_preVisit)
if self.suite.getMxCompatibility().automatic_overlay_distribution_deps():
for d in deps:
if d.isJavaProject() and d._overlays and d not in self.deps:
abort('Distribution must explicitly specify a dependency on {} as it has overlays. {}'.format(d, self), context=self)
setattr(self, '.archived_deps', deps)
return getattr(self, '.archived_deps')
@abstractmethod
def exists(self):
nyi('exists', self)
@abstractmethod
def remoteExtension(self):
nyi('remoteExtension', self)
@abstractmethod
def localExtension(self):
nyi('localExtension', self)
def _default_path(self):
return join(self.suite.get_output_root(platformDependent=self.platformDependent), 'dists',
self._extra_artifact_discriminant(), self.default_filename())
def default_filename(self):
return _map_to_maven_dist_name(self.name) + '.' + self.localExtension()
@classmethod
def platformName(cls):
return '{os}_{arch}'.format(os=get_os(), arch=get_arch())
"""
Provide remoteName of distribution.
:param str platform: If the distribution is platform dependent and platform is provided
it will be used instead of the usual platform suffix (provided by platformName()).
"""
def remoteName(self, platform=None):
if self.platformDependent:
if not platform:
platform = self.platformName()
return '{name}_{platform}'.format(name=self.name, platform=platform)
return self.name
def postPull(self, f):
pass
def prePush(self, f):
return f
def needsUpdate(self, newestInput):
"""
Determines if this distribution needs updating taking into account the
'newestInput' TimeStampFile if 'newestInput' is not None. Returns the
reason this distribution needs updating or None if it doesn't need updating.
"""
nyi('needsUpdate', self)
"""
Provide maven artifactId string for distribution.
:param str platform: If the distribution is platform dependent and platform is provided
it will be used instead of the usual platform suffix (provided by platformName()).
"""
def maven_artifact_id(self, platform=None):
if hasattr(self, 'maven') and isinstance(self.maven, dict):
artifact_id = self.maven.get('artifactId', None)
if artifact_id:
return artifact_id
return _map_to_maven_dist_name(self.remoteName(platform=platform))
"""
Provide maven groupId string for distribution.
"""
def maven_group_id(self):
if hasattr(self, 'maven') and isinstance(self.maven, dict):
group_id = self.maven.get('groupId', None)
if group_id:
return group_id
return _mavenGroupId(self.suite)
def overlapped_distribution_names(self):
return self.overlaps
def overlapped_distributions(self):
return self.resolved_overlaps
def post_init(self):
pass
def extra_suite_revisions_data(self):
"""
Yield (tag_name, attributes_dict) tuples to be appended to the suite-revisions metadata file optionally generated by maven-deploy.
:rtype: Iterator[str, dict[str, str]]
"""
return
yield # pylint: disable=unreachable
from mx_jardistribution import JARDistribution, _get_proguard_cp, _use_exploded_build, _stage_file_impl
class JMHArchiveParticipant(object):
""" Archive participant for building JMH benchmarking jars. """
def __init__(self, dist):
if not dist.mainClass:
# set default JMH main class
dist.mainClass = "org.openjdk.jmh.Main"
def __opened__(self, arc, srcArc, services):
self.arc = arc
self.meta_files = {
'META-INF/BenchmarkList': None,
'META-INF/CompilerHints': None,
}
def __process__(self, arcname, contents_supplier, is_source):
if not is_source and arcname in self.meta_files:
if self.meta_files[arcname] is None:
self.meta_files[arcname] = contents_supplier()
else:
self.meta_files[arcname] += contents_supplier()
return True
return False
def __closing__(self):
return ({filename: content for filename, content in self.meta_files.items() if content is not None}, None)
class AbstractArchiveTask(BuildTask):
def __init__(self, args, dist):
BuildTask.__init__(self, dist, args, 1)
def needsBuild(self, newestInput):
sup = BuildTask.needsBuild(self, newestInput)
if sup[0]:
return sup
reason = self.subject.needsUpdate(newestInput)
if reason:
return True, reason
return False, None
def build(self):
self.subject.make_archive()
def __str__(self):
return "Archiving {}".format(self.subject.name)
def buildForbidden(self):
if super(AbstractArchiveTask, self).buildForbidden():
return True
return isinstance(self.subject.suite, BinarySuite)
def cleanForbidden(self):
if super(AbstractArchiveTask, self).cleanForbidden():
return True
return isinstance(self.subject.suite, BinarySuite)
class JARArchiveTask(AbstractArchiveTask):
def buildForbidden(self):
if super(JARArchiveTask, self).buildForbidden():
return True
if not self.args.java:
return True
return False
def newestOutput(self):
return TimeStampFile.newest([self.subject.path, self.subject.sourcesPath])
def clean(self, forBuild=False):
if isinstance(self.subject.suite, BinarySuite): # make sure we never clean distributions from BinarySuites
abort('should not reach here')
for path in self.subject.paths_to_clean():
if exists(path):
if isdir(path) and not islink(path):
rmtree(path)
else:
os.remove(path)
def cleanForbidden(self):
if super(JARArchiveTask, self).cleanForbidden():
return True
if not self.args.java:
return True
return False
def build(self):
self.subject.make_archive(getattr(self, 'javac_daemon', None))
def prepare(self, daemons):
if self.args.no_daemon or self.subject.suite.isBinarySuite():
return
compliance = self.subject._compliance_for_build()
if compliance is not None and compliance >= '9':
info = get_java_module_info(self.subject)
if info:
jdk = get_jdk(compliance)
key = 'javac-daemon:' + jdk.java + ' '.join(jdk.java_args)
self.javac_daemon = daemons.get(key)
if not self.javac_daemon:
self.javac_daemon = JavacDaemon(jdk, jdk.java_args)
daemons[key] = self.javac_daemon
class AbstractDistribution(Distribution):
def __init__(self, suite, name, deps, path, excludedLibs, platformDependent, theLicense, output, **kwArgs):
super(AbstractDistribution, self).__init__(suite, name, deps, excludedLibs, platformDependent, theLicense, **kwArgs)
self.path = _make_absolute(path.replace('/', os.sep) if path else self._default_path(), suite.dir)
self.output = output
def get_output(self):
if self.output:
return join(self.suite.dir, self.output)
return None
def exists(self):
return exists(self.path)
def getArchivableResults(self, use_relpath=True, single=False):
yield self.path, self.default_filename()
def needsUpdate(self, newestInput):
path_up = _needsUpdate(newestInput, self.path)
if path_up:
return path_up
if self.output:
output_up = _needsUpdate(newestInput, self.get_output())
if output_up:
return output_up
return None
def getBuildTask(self, args):
return DefaultArchiveTask(args, self)
class AbstractTARDistribution(AbstractDistribution):
__gzip_binary = None
def __init__(self, suite, name, deps, path, excludedLibs, platformDependent, theLicense, output=None, **kw_args):
self._include_dirs = kw_args.pop("include_dirs", [])
super(AbstractTARDistribution, self).__init__(suite, name, deps, path, excludedLibs, platformDependent, theLicense, output=output, **kw_args)
@property
def include_dirs(self):
"""Directories with headers provided by this archive."""
return [join(self.get_output(), i) for i in self._include_dirs]
def remoteExtension(self):
return 'tar.gz'
def localExtension(self):
return 'tar'
def postPull(self, f):
assert f.endswith('.gz')
logv('Decompressing {}...'.format(f))
tarfilename = f[:-len('.gz')]
if AbstractTARDistribution._has_gzip():
with open(tarfilename, 'wb') as tar:
# force, quiet, decompress, cat to stdout
run([AbstractTARDistribution._gzip_binary(), '-f', '-q', '-d', '-c', f], out=tar)
else:
with gzip.open(f, 'rb') as gz, open(tarfilename, 'wb') as tar:
shutil.copyfileobj(gz, tar)
os.remove(f)
if self.output:
output = self.get_output()
with tarfile.open(tarfilename, 'r:') as tar:
logv('Extracting {} to {}'.format(tarfilename, output))
tar.extractall(output)
return tarfilename
def prePush(self, f):
tgz = f + '.gz'
logv('Compressing {}...'.format(f))
if AbstractTARDistribution._has_gzip():
with open(tgz, 'wb') as tar:
# force, quiet, cat to stdout
run([AbstractTARDistribution._gzip_binary(), '-f', '-q', '-c', f], out=tar)
else:
with gzip.open(tgz, 'wb') as gz, open(f, 'rb') as tar:
shutil.copyfileobj(tar, gz)
return tgz
@staticmethod
def _gzip_binary():
if not AbstractTARDistribution._has_gzip():
abort("No gzip binary could be found")
return AbstractTARDistribution.__gzip_binary
@staticmethod
def _has_gzip():
if AbstractTARDistribution.__gzip_binary is None:
# Probe for pigz (parallel gzip) first and then try common gzip
for binary_name in ["pigz", "gzip"]:
gzip_ret_code = None
try:
gzip_ret_code = run([binary_name, '-V'], nonZeroIsFatal=False, err=subprocess.STDOUT, out=OutputCapture())
except OSError as e:
gzip_ret_code = e
if gzip_ret_code == 0:
AbstractTARDistribution.__gzip_binary = binary_name
break
return AbstractTARDistribution.__gzip_binary is not None
class AbstractZIPDistribution(AbstractDistribution):
def remoteExtension(self):
return 'zip'
def localExtension(self):
return 'zip'
def classpath_repr(self, resolve=True):
return self.path
@abstractmethod
def compress_locally(self):
pass
@abstractmethod
def compress_remotely(self):
pass
def postPull(self, f):
if self.compress_locally() or not self.compress_remotely():
return None
logv('Decompressing {}...'.format(f))
tmp_dir = mkdtemp("." + self.localExtension(), self.name)
with zipfile.ZipFile(f) as zf:
zf.extractall(tmp_dir)
tmp_fd, tmp_file = mkstemp("." + self.localExtension(), self.name)
with os.fdopen(tmp_fd, 'w') as tmp_f, zipfile.ZipFile(tmp_f, 'w', compression=zipfile.ZIP_STORED) as zf:
for root, _, files in os.walk(tmp_dir):
arc_dir = os.path.relpath(root, tmp_dir)
for f_ in files:
zf.write(join(root, f_), join(arc_dir, f_))
rmtree(tmp_dir)
return tmp_file
def prePush(self, f):
if not self.compress_remotely() or self.compress_locally():
return f
logv('Compressing {}...'.format(f))
tmpdir = mkdtemp("." + self.remoteExtension(), self.name)
with zipfile.ZipFile(f) as zf:
zf.extractall(tmpdir)
tmp_fd, tmp_file = mkstemp("." + self.remoteExtension(), self.name)
with os.fdopen(tmp_fd, 'wb') as tmp_f, zipfile.ZipFile(tmp_f, 'w', compression=zipfile.ZIP_DEFLATED) as zf:
for root, _, files in os.walk(tmpdir):
arc_dir = os.path.relpath(root, tmpdir)
for f_ in files:
zf.write(join(root, f_), join(arc_dir, f_))
rmtree(tmpdir)
return tmp_file
class AbstractJARDistribution(AbstractZIPDistribution, ClasspathDependency):
def remoteExtension(self):
return 'jar'
def localExtension(self):
return 'jar'
class NativeTARDistribution(AbstractTARDistribution):
"""
A distribution dependencies are only `NativeProject`s. It packages all the resources specified by
`NativeProject.getResults` and `NativeProject.headers` for each constituent project.
:param Suite suite: the suite in which the distribution is defined
:param str name: the name of the distribution which must be unique across all suites
:param list deps: the `NativeProject` dependencies of the distribution
:param bool platformDependent: specifies if the built artifact is platform dependent
:param str theLicense: license applicable when redistributing the built artifact of the distribution
:param bool relpath: specifies if the names of tar file entries should be relative to the output
directories of the constituent native projects' output directories
:param str output: specifies where the content of the distribution should be copied upon creation
or extracted after pull
:param bool auto_prefix: specifies if the names of tar file entries from constituent
platform dependent projects should be prefixed with `<os>-<arch>`
Attributes:
path: suite-local path to where the tar file will be placed
"""
def __init__(self, suite, name, deps, path, excludedLibs, platformDependent, theLicense, relpath, output,
auto_prefix=False, **kwArgs):
super(NativeTARDistribution, self).__init__(suite, name, deps, path, excludedLibs, platformDependent,
theLicense, output, **kwArgs)
assert not auto_prefix or relpath, "{}: 'auto_prefix' requires 'relpath'".format(name)
self.relpath = relpath
if self.output is not None: # pylint: disable=access-member-before-definition
self.output = mx_subst.results_substitutions.substitute(self.output, dependency=self)
self.auto_prefix = auto_prefix
def make_archive(self):
ensure_dirname_exists(self.path)
if self.output:
output_path = self.get_output()
if exists(output_path):
os.utime(output_path, None)
with Archiver(self.path, kind='tar') as arc:
files = set()
def archive_and_copy(name, arcname):
assert arcname not in files, arcname
files.add(arcname)
arc.zf.add(name, arcname=arcname)
if self.output:
dest = join(self.get_output(), arcname)
# Make path separators consistent for string compare
dest = normpath(dest)
name = normpath(name)
if name != dest:
ensure_dirname_exists(dest)
shutil.copy2(name, dest)
for d in self.archived_deps():
if d.isNativeProject() or d.isArchivableProject():
arc_prefix = ''
if self.auto_prefix and d.isPlatformDependent():
arc_prefix = self.platformName().replace('_', '-')
for file_path, arc_name in d.getArchivableResults(self.relpath):
archive_and_copy(file_path, join(arc_prefix, arc_name))
elif hasattr(d, 'getResults') and not d.getResults():
logv("[{}: ignoring dependency {} with no results]".format(self.name, d.name))
else:
abort('Unsupported dependency for native distribution {}: {}'.format(self.name, d.name))
self.notify_updated()
class DefaultArchiveTask(AbstractArchiveTask):
def newestOutput(self):
return TimeStampFile(self.subject.path)
def buildForbidden(self):
if AbstractArchiveTask.buildForbidden(self):
return True
if not self.args.native:
return True
def clean(self, forBuild=False):
if isinstance(self.subject.suite, BinarySuite): # make sure we never clean distributions from BinarySuites
abort('should not reach here')
if exists(self.subject.path):
os.remove(self.subject.path)
if self.subject.output and (self.clean_output_for_build() or not forBuild) and self.subject.output != '.':
output_dir = self.subject.get_output()
if exists(output_dir):
rmtree(output_dir)
def clean_output_for_build(self):
# some distributions have `output` set to the same directory as their input or to some directory that contains other files
return False
def cleanForbidden(self):
if AbstractArchiveTask.cleanForbidden(self):
return True
if not self.args.native:
return True
return False
class LayoutArchiveTask(DefaultArchiveTask):
def clean_output_for_build(self):
return True
def needsBuild(self, newestInput):
sup = super(LayoutArchiveTask, self).needsBuild(newestInput)
if sup[0]:
return sup
# TODO check for *extra* files that should be removed in `output`
return False, None
class LayoutDistribution(AbstractDistribution):
_linky = AbstractDistribution
def __init__(self, suite, name, deps, layout, path, platformDependent, theLicense, excludedLibs=None, path_substitutions=None, string_substitutions=None, archive_factory=None, compress=False, **kw_args):
"""
See docs/layout-distribution.md
:type layout: dict[str, str]
:type path_substitutions: mx_subst.SubstitutionEngine
:type string_substitutions: mx_subst.SubstitutionEngine
"""
super(LayoutDistribution, self).__init__(suite, name, deps, path, excludedLibs or [], platformDependent, theLicense, output=None, **kw_args)
self.buildDependencies += LayoutDistribution._extract_deps(layout, suite, name)
self.output = join(self.get_output_base(), name) # initialized here rather than passed above since `get_output_base` is not ready before the super constructor
self.layout = layout
self.path_substitutions = path_substitutions or mx_subst.path_substitutions
self.string_substitutions = string_substitutions or mx_subst.string_substitutions
self._source_location_cache = {}
self.archive_factory = archive_factory or Archiver
self.compress = compress
self._removed_deps = set()
def getBuildTask(self, args):
return LayoutArchiveTask(args, self)
def removeDependency(self, d):
super(LayoutDistribution, self).removeDependency(d)
self._removed_deps.add(d.qualifiedName())
if d.suite == self.suite:
self._removed_deps.add(d.name)
def canDiscard(self):
"""Returns true if all dependencies have been removed and the layout does not specify any fixed sources (string:, file:)."""
return not (self.deps or self.buildDependencies or any(
# if there is any other source type (e.g., 'file' or 'string') we cannot remove it
source_dict['source_type'] not in ['dependency', 'extracted-dependency', 'skip']
for _, source_dict in self._walk_layout()
))
@staticmethod
def _is_linky(path=None):
if LayoutDistribution._linky is AbstractDistribution:
value = get_env('LINKY_LAYOUT')
if value is None:
LayoutDistribution._linky = None
else:
if is_windows():
raise abort("LINKY_LAYOUT is not supported on Windows")
LayoutDistribution._linky = re.compile(fnmatch.translate(value))
if not LayoutDistribution._linky:
return False
if path is None:
return True
return LayoutDistribution._linky.match(path)
@staticmethod
def _extract_deps(layout, suite, distribution_name):
deps = set()
for _, source in LayoutDistribution._walk_static_layout(layout, distribution_name, context=suite):
if 'dependency' in source:
deps.add(source['dependency'])
return sorted(deps)
@staticmethod
def _as_source_dict(source, distribution_name, destination, path_substitutions=None, string_substitutions=None, distribution_object=None, context=None):
if isinstance(source, str):
if ':' not in source:
abort("Invalid source '{}' in layout for '{}': should be of the form '<type>:<specification>'\n"
"Type could be `file`, `string`, `link`, `dependency` or `extracted-dependency`.".format(source, distribution_name), context=context)
source_type, source_spec = source.split(':', 1)
source_dict = {
"source_type": source_type,
"_str_": source,
}
if source_type in ('dependency', 'extracted-dependency', 'skip'):
if '/' in source_spec:
source_dict["dependency"], source_dict["path"] = source_spec.split('/', 1)
else:
source_dict["dependency"], source_dict["path"] = source_spec, None
if source_type == 'extracted-dependency':
source_dict["dereference"] = "root"
source_dict["optional"] = False
elif source_type == 'file':
source_dict["path"] = source_spec
elif source_type == 'link':
source_dict["path"] = source_spec
elif source_type == 'string':
source_dict["value"] = source_spec
else:
abort("Unsupported source type: '{}' in '{}'".format(source_type, destination), context=context)
else:
source_dict = source
source_type = source_dict['source_type']
# TODO check structure
if source_type in ('dependency', 'extracted-dependency', 'skip'):
source_dict['_str_'] = source_type + ":" + source_dict['dependency']
if source_type == 'extracted-dependency':
if 'dereference' not in source_dict:
source_dict["dereference"] = "root"
elif source_dict["dereference"] not in ("root", "never", "always"):
raise abort("Unsupported dereference mode: '{}' in '{}'".format(source_dict["dereference"], destination), context=context)
if source_dict['path']:
source_dict['_str_'] += '/{}'.format(source_dict['path'])
if 'optional' not in source_dict:
source_dict["optional"] = False
elif source_type == 'file':
source_dict['_str_'] = "file:" + source_dict['path']
elif source_type == 'link':
source_dict['_str_'] = "link:" + source_dict['path']
elif source_type == 'string':
source_dict['_str_'] = "string:" + source_dict['value']
else:
raise abort("Unsupported source type: '{}' in '{}'".format(source_type, destination), context=context)
if 'exclude' in source_dict:
if isinstance(source_dict['exclude'], str):
source_dict['exclude'] = [source_dict['exclude']]
if path_substitutions and source_dict.get("path"):
path = mx_subst.as_engine(path_substitutions).substitute(source_dict["path"], distribution=distribution_object)
if path != source_dict["path"]:
source_dict = source_dict.copy()
source_dict["path"] = path
if string_substitutions and source_dict.get("value") and not source_dict.get("ignore_value_subst"):
value = mx_subst.as_engine(string_substitutions).substitute(source_dict["value"], distribution=distribution_object)
if value != source_dict["value"]:
source_dict = source_dict.copy()
source_dict["value"] = value
return source_dict
@staticmethod
def _walk_static_layout(layout, distribution_name, path_substitutions=None, string_substitutions=None, distribution_object=None, context=None):
substs = mx_subst.as_engine(path_substitutions) if path_substitutions else None
for destination, sources in sorted(layout.items()):
if not isinstance(sources, list):
sources = [sources]
for source in sources:
source_dict = LayoutDistribution._as_source_dict(source, distribution_name, destination, path_substitutions, string_substitutions, distribution_object, context)
if substs:
destination = substs.substitute(destination)
yield destination, source_dict
def _walk_layout(self):
for (destination, source_dict) in LayoutDistribution._walk_static_layout(self.layout, self.name, self.path_substitutions, self.string_substitutions, self, self):
dep = source_dict.get("dependency")
if dep not in self._removed_deps:
yield (destination, source_dict)
def _install_source(self, source, output, destination, archiver):
clean_destination = destination
if destination.startswith('./'):
clean_destination = destination[2:]
absolute_destination = join(output, clean_destination.replace('/', os.sep))
source_type = source['source_type']
provenance = "{}<-{}".format(destination, source['_str_'])
def add_symlink(source_file, src, abs_dest, archive_dest, archive=True):
destination_directory = dirname(abs_dest)
ensure_dir_exists(destination_directory)
resolved_output_link_target = normpath(join(destination_directory, src))
if archive:
if not resolved_output_link_target.startswith(output):
raise abort("Cannot add symlink that escapes the archive: link from '{}' would point to '{}' which is not in '{}'".format(source_file, resolved_output_link_target, output), context=self)
archiver.add_link(src, archive_dest, provenance)
if is_windows():
def strip_suffix(path):
return os.path.splitext(path)[0]
abs_dest = strip_suffix(abs_dest) + '.cmd'
if lexists(abs_dest):
# Since the `archiver.add_link` above already does "the right thing" regarding duplicates (warn or abort) here we just delete the existing file
os.remove(abs_dest)
if is_windows():
link_template_name = join(_mx_suite.mxDir, 'exe_link_template.cmd')
with open(link_template_name, 'r') as template, SafeFileCreation(abs_dest) as sfc, open(sfc.tmpPath, 'w') as link:
_template_subst = mx_subst.SubstitutionEngine(mx_subst.string_substitutions)
_template_subst.register_no_arg('target', normpath(strip_suffix(src)))
for line in template:
link.write(_template_subst.substitute(line))
else:
os.symlink(src, abs_dest)
def merge_recursive(src, dst, src_arcname, excludes, archive=True):
"""
Copies `src` to `dst`. If `src` is a directory copies recursively.
"""
if glob_match_any(excludes, src_arcname):
return
absolute_destination = _safe_path(join(output, dst.replace('/', os.sep)))
if islink(src):
link_target = os.readlink(src)
src_target = join(dirname(src), os.readlink(src))
if LayoutDistribution._is_linky(absolute_destination) and not isabs(link_target) and normpath(relpath(src_target, output)).startswith('..'):
add_symlink(src, normpath(relpath(src_target, dirname(absolute_destination))), absolute_destination, dst, archive=archive)
else:
if archive and isabs(link_target):
abort("Cannot add absolute links into archive: '{}' points to '{}'".format(src, link_target), context=self)
add_symlink(src, link_target, absolute_destination, dst, archive=archive)
elif isdir(src):
ensure_dir_exists(absolute_destination, lstat(src).st_mode)
for name in os.listdir(src):
new_dst = (dst if len(dst) == 0 or dst[-1] == '/' else dst + '/') + name
merge_recursive(join(src, name), new_dst, join(src_arcname, name), excludes, archive=archive)
else:
ensure_dir_exists(dirname(absolute_destination))
if archive:
archiver.add(src, dst, provenance)
if LayoutDistribution._is_linky(absolute_destination):
if lexists(absolute_destination):
os.remove(absolute_destination)
os.symlink(os.path.relpath(src, dirname(absolute_destination)), absolute_destination)
else:
shutil.copy(src, absolute_destination)
def _install_source_files(files, include=None, excludes=None, optional=False, archive=True):
excludes = excludes or []
if destination.endswith('/'):
ensure_dir_exists(absolute_destination)
first_file = True
for _source_file, _arcname in files:
matched = ''
if include is not None:
matched = glob_match(include, _arcname)
if matched is None:
continue
if islink(_source_file):
_source_file = join(dirname(_source_file), os.readlink(_source_file))
if destination.endswith('/'):
strip_prefix = dirname(matched)
name = _arcname
if strip_prefix:
name = name[len(strip_prefix) + 1:]
_dst = join(clean_destination, name)
else:
_dst = clean_destination
if not first_file:
abort("Unexpected source for '{dest}' expected one file but got multiple.\n"
"Either use a directory destination ('{dest}/') or change the source".format(dest=destination), context=self)
merge_recursive(_source_file, _dst, _arcname, excludes, archive=archive)
first_file = False
if first_file and not optional:
abort("Could not find any source file for '{}'".format(source['_str_']), context=self)
if source_type == 'dependency':
d = dependency(source['dependency'], context=self)
if_stripped = source.get('if_stripped')
archive = not isinstance(d, JARDistribution) or not _use_exploded_build()
if if_stripped is not None and d.isJARDistribution():
if if_stripped not in ('include', 'exclude'):
abort("Could not understand `if_stripped` value '{}'. Valid values are 'include' and 'exclude'".format(if_stripped), context=self)
if (if_stripped == 'exclude' and d.is_stripped()) or (if_stripped == 'include' and not d.is_stripped()):
return
if source.get('path') is None:
try:
_install_source_files([next(d.getArchivableResults(single=True))], archive=archive)
except ValueError as e:
assert e.args[0] == 'single not supported'
msg = "Can not use '{}' of type {} without a path.".format(d.name, d.__class__.__name__)
if destination.endswith('/'):
msg += "\nDid you mean '{}/*'".format(source['_str_'])
else:
msg += "\nUse the '{}/<path>' format".format(source['_str_'])
abort(msg)
else:
_install_source_files((
results[:2] for results in d.getArchivableResults()
), include=source['path'], excludes=source.get('exclude'), optional=source['optional'], archive=archive)
elif source_type == 'extracted-dependency':
path = source['path']
exclude = source.get('exclude', [])
d = dependency(source['dependency'], context=self)
try:
source_archive_file, _ = next(d.getArchivableResults(single=True))
except ValueError as e:
assert e.args[0] == 'single not supported'
raise abort("Can not use '{}' of type {} for an 'extracted-dependency' ('{}').".format(d.name, d.__class__.__name__, destination))
unarchiver_dest_directory = absolute_destination
if not destination.endswith('/'):
if path is None:
abort("Invalid source '{type}:{dependency}' used in destination '{dest}':\n"
"When using 'extracted-dependency' to extract to a single file, a path must be specified. Did you mean\n"
" - '{dest}/' as a destination (i.e., extracting all files from '{dependency}' into {dest})\n"
" - or '{type}:{dependency}/path/to/file/in/archive' as a source (i.e., extracting /path/to/file/in/archive from '{dependency}' to '{dest}')".format(
dest=destination,
dependency=d.name,
type=source_type),
context=self)
unarchiver_dest_directory = dirname(unarchiver_dest_directory)
dereference = source.get("dereference", "root")
ensure_dir_exists(unarchiver_dest_directory)
ext = get_file_extension(source_archive_file)
output_done = False
if isinstance(d, LayoutDistribution) and LayoutDistribution._is_linky():
_out_dir = d.get_output()
_prefix = join(_out_dir, '')
if path:
file_path = join(_out_dir, path)
else:
file_path = _out_dir
def _rel_name(_source_file):
assert _source_file.startswith(_prefix) or _source_file == _prefix[:-1]
return _source_file[len(_prefix):]
_install_source_files(((source_file, _rel_name(source_file)) for source_file in glob.iglob(file_path)), include=path, excludes=exclude, archive=False)
output_done = True
first_file_box = [True]
dest_arcname_prefix = os.path.relpath(unarchiver_dest_directory, output).replace(os.sep, '/')
if dest_arcname_prefix == '.' and self.suite.getMxCompatibility().fix_extracted_dependency_prefix():
dest_arcname_prefix = None
def dest_arcname(src_arcname):
if not dest_arcname_prefix:
return src_arcname
return dest_arcname_prefix + '/' + src_arcname
def _filter_archive_name(name):
_root_match = False
if exclude and glob_match_any(exclude, name):
return None, False
if path is not None:
matched = glob_match(path, name)
if not matched:
return None, False
_root_match = len(matched.split('/')) == len(name.split('/'))
strip_prefix = dirname(matched)
if strip_prefix:
name = name[len(strip_prefix) + 1:]
if not destination.endswith('/'):
name = '/'.join(name.split('/')[:-1] + [basename(destination)])
if not first_file_box[0]:
raise abort("Unexpected source for '{dest}' expected one file but got multiple.\n"
"Either use a directory destination ('{dest}/') or change the source".format(dest=destination), context=self)
first_file_box[0] = False
return name, _root_match
with TempDir() if output_done else NoOpContext(unarchiver_dest_directory) as unarchiver_dest_directory:
if ext.endswith('zip') or ext.endswith('jar'):
if isdir(source_archive_file):
assert d.isJARDistribution() and _use_exploded_build()
for root, _, filenames in os.walk(source_archive_file):
for filename in filenames:
filepath = join(root, filename)
rel_path = os.path.relpath(filepath, source_archive_file)
arcname, _ = _filter_archive_name(rel_path)
if arcname:
archiver.add(filepath, arcname, provenance)
else:
with zipfile.ZipFile(source_archive_file) as zf:
for zipinfo in zf.infolist():
zipinfo.filename, _ = _filter_archive_name(zipinfo.filename)
if not zipinfo.filename:
continue
extracted_file = zf.extract(zipinfo, unarchiver_dest_directory)
unix_attributes = (zipinfo.external_attr >> 16) & 0xFFFF
if unix_attributes != 0:
os.chmod(extracted_file, unix_attributes)
archiver.add(extracted_file, dest_arcname(zipinfo.filename), provenance)
elif 'tar' in ext or ext.endswith('tgz'):
with tarfile.TarFile.open(source_archive_file) as tf:
# from tarfile.TarFile.extractall:
directories = []
for tarinfo in tf:
new_name, root_match = _filter_archive_name(tarinfo.name.rstrip("/"))
if not new_name:
continue
extracted_file = join(unarchiver_dest_directory, new_name.replace("/", os.sep))
arcname = dest_arcname(new_name)
if tarinfo.issym():
if dereference == "always" or (root_match and dereference == "root"):
tf._extract_member(tf._find_link_target(tarinfo), extracted_file)
archiver.add(extracted_file, arcname, provenance)
else:
original_name = tarinfo.name
tarinfo.name = new_name
tf.extract(tarinfo, unarchiver_dest_directory)
tarinfo.name = original_name
archiver.add_link(tarinfo.linkname, arcname, provenance)
else:
original_name = tarinfo.name
tarinfo.name = new_name
tf.extract(tarinfo, unarchiver_dest_directory)
tarinfo.name = original_name
archiver.add(extracted_file, arcname, provenance)
if tarinfo.isdir():
# use a safe mode while extracting, fix later
os.chmod(extracted_file, 0o700)
new_tarinfo = copy(tarinfo)
new_tarinfo.name = new_name
directories.append(new_tarinfo)
# Reverse sort directories.
directories.sort(key=operator.attrgetter('name'))
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = join(absolute_destination, tarinfo.name)
try:
_tarfile_chown(tf, tarinfo, dirpath)
tf.utime(tarinfo, dirpath)
tf.chmod(tarinfo, dirpath)
except tarfile.ExtractError as e:
abort("tarfile: " + str(e))
else:
abort("Unsupported file type in 'extracted-dependency' for {}: '{}'".format(destination, source_archive_file))
if first_file_box[0] and path is not None and not source['optional']:
msg = """\
Could not find any source file for '{str}'.
Common causes:
- the inclusion list ('{path}') or the exclusion list ('{exclude}') are too restrictive. Note that file names starting with '.' are not matched by '*' but by '.*'
- '{name}' is empty
- the root dir of '{name}' is '.' and the inclusion list does not contain a '.' entry or one that starts with './' or '.*'""".format(
str=source['_str_'],
path=path,
exclude=exclude,
name=d.name,
)
abort(msg, context=self)
elif source_type == 'file':
files_root = self.suite.dir
source_path = source['path']
if source_path.startswith(self.suite.dir):
source_path = source_path[len(self.suite.dir) + 1:]
file_path = normpath(join(self.suite.dir, source_path))
def _rel_arcname(_source_file):
return os.path.relpath(_source_file, files_root)
_arcname_f = _rel_arcname
if not self.suite.vc or not self.suite.vc.locate(self.suite.vc_dir, file_path, abortOnError=False):
absolute_source = isabs(source_path)
if absolute_source:
_arcname_f = lambda a: a
warn("Adding file which is not in the repository: '{}' in '{}'".format(file_path, destination), context=self)
elif isabs(source_path):
abort("Source should not be absolute: '{}' in '{}'".format(source_path, destination), context=self)
_install_source_files(((source_file, _arcname_f(source_file)) for source_file in glob.iglob(file_path)), include=source_path, excludes=source.get('exclude'))
elif source_type == 'link':
link_target = source['path']
if destination.endswith('/'):
link_target_basename = basename(link_target)
absolute_destination = join(absolute_destination, link_target_basename)
clean_destination = join(clean_destination, link_target_basename)
add_symlink(destination, link_target, absolute_destination, clean_destination)
elif source_type == 'string':
if destination.endswith('/'):
abort("Can not use `string` source with a destination ending with `/` ({})".format(destination), context=self)
ensure_dir_exists(dirname(absolute_destination))
s = source['value']
with open(absolute_destination, 'w') as f:
f.write(s)
archiver.add_str(s, clean_destination, provenance)
elif source_type == 'skip':
pass
else:
abort("Unsupported source type: '{}' in '{}'".format(source_type, destination), context=self)
def _verify_layout(self):
output = realpath(self.get_output())
for destination, sources in self.layout.items():
if not isinstance(destination, str):
abort("Destination (layout keys) should be a string", context=self)
if not isinstance(sources, list):
sources = [sources]
if not destination:
abort("Destination (layout keys) can not be empty", context=self)
for source in sources:
if not isinstance(source, (str, dict)):
abort("Error in '{}': sources should be strings or dicts".format(destination), context=self)
if isabs(destination):
abort("Invalid destination: '{}': destination should not be absolute".format(destination), context=self)
final_destination = normpath(join(output, destination))
if not final_destination.startswith(output):
abort("Invalid destination: '{}': destination should not escape the output directory ('{}' is not in '{}')".format(destination, final_destination, output), context=self)
if not destination.endswith('/'):
if len(sources) > 1:
abort("Invalid layout: cannot copy multiple files to a single destination: '{dest}'\n"
"Should the destination be a directory: '{dest}/'? (note the trailing slash)".format(dest=destination), context=self)
if len(sources) < 1:
abort("Invalid layout: no file to copy to '{dest}'\n"
"Do you want an empty directory: '{dest}/'? (note the trailing slash)".format(dest=destination), context=self)
def make_archive(self):
self._verify_layout()
output = realpath(self.get_output())
with self.archive_factory(self.path,
kind=self.localExtension(),
duplicates_action='warn',
context=self,
reset_user_group=getattr(self, 'reset_user_group', False),
compress=self.compress) as arc:
for destination, source in self._walk_layout():
self._install_source(source, output, destination, arc)
self._persist_layout()
self._persist_linky_state()
def needsUpdate(self, newestInput):
sup = super(LayoutDistribution, self).needsUpdate(newestInput)
if sup:
return sup
for destination, source in self._walk_layout():
source_type = source['source_type']
if source_type == 'file':
for source_file in glob.iglob(join(self.suite.dir, source['path'].replace('/', os.sep))):
up = _needsUpdate(source_file, self.path)
if up:
return up
if islink(source_file):
source_file = join(dirname(source_file), os.readlink(source_file))
up = _needsUpdate(source_file, self.path)
if up:
return up
elif isdir(source_file):
for root, _, files in os.walk(source_file):
up = _needsUpdate(root, self.path)
if up:
return up
for f in files:
up = _needsUpdate(join(root, f), self.path)
if up:
return up
elif source_type == 'link':
pass # this is handled by _persist_layout
elif source_type == 'string':
pass # this is handled by _persist_layout
elif source_type in ('dependency', 'extracted-dependency', 'skip'):
pass # this is handled by a build task dependency
else:
abort("Unsupported source type: '{}' in '{}'".format(source_type, destination), context=suite)
if not self._check_persisted_layout():
return "layout definition has changed"
if not self._check_linky_state():
return "LINKY_LAYOUT has changed"
return None
def _persist_layout(self):
saved_layout_file = self._persisted_layout_file()
current_layout = LayoutDistribution._layout_to_stable_str(self.layout)
ensure_dir_exists(dirname(saved_layout_file))
with open(saved_layout_file, 'w') as fp:
fp.write(current_layout)
def _persisted_layout_file(self):
return join(self.suite.get_mx_output_dir(), 'savedLayouts', self.name)
@staticmethod
def _layout_to_stable_str(d):
if isinstance(d, list):
return '[' + ','.join((LayoutDistribution._layout_to_stable_str(e) for e in d)) + ']'
elif isinstance(d, dict):
return '{' + ','.join(("{}->{}".format(k, LayoutDistribution._layout_to_stable_str(d[k])) for k in sorted(d.keys()))) + '}'
else:
return '{}'.format(d)
def _check_persisted_layout(self):
saved_layout_file = self._persisted_layout_file()
current_layout = LayoutDistribution._layout_to_stable_str(self.layout)
saved_layout = ""
if exists(saved_layout_file):
with open(saved_layout_file) as fp:
saved_layout = fp.read()
if saved_layout == current_layout:
return True
logv("'{}'!='{}'".format(saved_layout, current_layout))
return False
def _linky_state_file(self):
return join(self.suite.get_mx_output_dir(), 'linkyState', self.name)
def _persist_linky_state(self):
linky_state_file = self._linky_state_file()
LayoutDistribution._is_linky() # force init
if LayoutDistribution._linky is None:
if exists(linky_state_file):
os.unlink(linky_state_file)
return
ensure_dir_exists(dirname(linky_state_file))
with open(linky_state_file, 'w') as fp:
fp.write(LayoutDistribution._linky.pattern)
def _check_linky_state(self):
linky_state_file = self._linky_state_file()
LayoutDistribution._is_linky() # force init
if not exists(linky_state_file):
return LayoutDistribution._linky is None
if LayoutDistribution._linky is None:
return False
with open(linky_state_file) as fp:
saved_pattern = fp.read()
return saved_pattern == LayoutDistribution._linky.pattern
def find_single_source_location(self, source, fatal_if_missing=True, abort_on_multiple=False):
locations = self.find_source_location(source, fatal_if_missing=fatal_if_missing)
unique_locations = set(locations)
if len(unique_locations) > 1:
nl = os.linesep
abort_or_warn("Found multiple locations for '{}' in '{}':{} {}".format(source, self.name, nl, (nl + ' ').join(unique_locations)), abort_on_multiple)
if len(locations) > 0:
return locations[0]
return None
def _matched_result(self, source):
"""
Try to find which file will be matched by the given 'dependency' or 'skip' source.
"""
assert source['source_type'] in ('dependency', 'skip')
d = dependency(source['dependency'], context=self)
try:
if source['path'] is None:
_, arcname = next(d.getArchivableResults(single=True)) # pylint: disable=stop-iteration-return
yield arcname
else:
for _, _arcname in d.getArchivableResults(single=False):
if _arcname is None:
continue
matched = glob_match(source['path'], _arcname)
if matched:
strip_prefix = dirname(matched)
arcname = _arcname
if strip_prefix:
arcname = arcname[len(strip_prefix) + 1:]
yield arcname
except OSError as e:
logv("Ignoring OSError in getArchivableResults: " + str(e))
def find_source_location(self, source, fatal_if_missing=True):
if source not in self._source_location_cache:
search_source = LayoutDistribution._as_source_dict(source, self.name, "??", self.path_substitutions, self.string_substitutions, self, self)
source_type = search_source['source_type']
if source_type in ('dependency', 'extracted-dependency', 'skip'):
dep = search_source['dependency']
if search_source['path'] is None or not any((c in search_source['path'] for c in ('*', '[', '?'))):
if search_source['path'] and source_type == 'extracted-dependency':
raise abort("find_source_location: path is not supported for `extracted-dependency`: " + source)
found_dest = []
for destination, layout_source in self._walk_layout():
if layout_source['source_type'] == source_type and layout_source['dependency'] == dep:
dest = destination
if dest.startswith('./'):
dest = dest[2:]
if search_source['path'] is not None and layout_source['path'] is not None:
# the search and this source have a `path`: check if they match
if not glob_match(layout_source['path'], search_source['path']):
continue
elif search_source['path'] is None and layout_source['path'] is not None:
search_arcname = _first(self._matched_result(search_source))
# check if the 'single' searched file matches this source's `path`
if search_arcname is None or search_arcname not in self._matched_result(layout_source):
continue
elif search_source['path'] is not None and layout_source['path'] is None:
layout_arcname = _first(self._matched_result(layout_source))
# check if the 'single' file from this source matches the searched `path`
if layout_arcname is None or layout_arcname not in self._matched_result(search_source):
continue
if source_type == 'dependency' and destination.endswith('/'):
# the files given by this source are expended
for arcname in self._matched_result(layout_source):
dest = join(dest, arcname)
found_dest.append(dest)
else:
found_dest.append(dest)
self._source_location_cache[source] = found_dest
if fatal_if_missing and not found_dest:
abort("Could not find '{}' in '{}'".format(source, self.name))
else:
abort("find_source_location: path with glob is not supported: " + source)
else:
abort("find_source_location: source type not supported: " + source)
return self._source_location_cache[source]
class LayoutTARDistribution(LayoutDistribution, AbstractTARDistribution):
pass
class LayoutZIPDistribution(LayoutDistribution, AbstractZIPDistribution):
def __init__(self, *args, **kw_args):
# we have *args here because some subclasses in suites have been written passing positional args to
# LayoutDistribution.__init__ instead of keyword args. We just forward it as-is to super(), it's risky but better
# than breaking compatibility with the mis-behaving suites
self._local_compress = kw_args.pop('localCompress', False)
self._remote_compress = kw_args.pop('remoteCompress', True)
if self._local_compress and not self._remote_compress:
abort("Incompatible local/remote compression settings: local compression requires remote compression")
super(LayoutZIPDistribution, self).__init__(*args, compress=self._local_compress, **kw_args)
def compress_locally(self):
return self._local_compress
def compress_remotely(self):
return self._remote_compress
class LayoutJARDistribution(LayoutZIPDistribution, AbstractJARDistribution):
pass
### ~~~~~~~~~~~~~ Project, Dependency
class Project(Dependency):
"""
A Project is a collection of source code that is built by mx. For historical reasons
it typically corresponds to an IDE project and the IDE support in mx assumes this.
"""
def __init__(self, suite, name, subDir, srcDirs, deps, workingSets, d, theLicense, testProject=False, **kwArgs):
"""
:param list[str] srcDirs: subdirectories of name containing sources to build
:param list[str] | list[Dependency] deps: list of dependencies, Project, Library or Distribution
"""
Dependency.__init__(self, suite, name, theLicense, **kwArgs)
self.subDir = subDir
self.srcDirs = srcDirs
self.deps = deps
self.workingSets = workingSets
self.dir = d
self.testProject = testProject
if self.testProject is None:
# The suite doesn't specify whether this is a test suite. By default,
# any project ending with .test is considered a test project. Prior
# to mx version 5.114.0, projects ending in .jtt are also treated this
# way but starting with the version any non-standard names must be
# explicitly marked as test projects.
self.testProject = self.name.endswith('.test')
if not self.testProject and not self.suite.getMxCompatibility().disableImportOfTestProjects():
self.testProject = self.name.endswith('.jtt')
# Create directories for projects that don't yet exist
ensure_dir_exists(d)
for s in self.source_dirs():
ensure_dir_exists(s)
def resolveDeps(self):
"""
Resolves symbolic dependency references to be Dependency objects.
"""
self._resolveDepsHelper(self.deps)
licenseId = self.theLicense if self.theLicense else self.suite.defaultLicense # pylint: disable=access-member-before-definition
if licenseId:
self.theLicense = get_license(licenseId, context=self)
if hasattr(self, 'buildDependencies'):
self._resolveDepsHelper(self.buildDependencies)
def _walk_deps_visit_edges(self, visited, in_edge, preVisit=None, visit=None, ignoredEdges=None, visitEdge=None):
deps = [(DEP_STANDARD, self.deps)]
if hasattr(self, 'buildDependencies'):
deps.append((DEP_BUILD, self.buildDependencies))
self._walk_deps_visit_edges_helper(deps, visited, in_edge, preVisit=preVisit, visit=visit, ignoredEdges=ignoredEdges, visitEdge=visitEdge)
def _compute_max_dep_distances(self, dep, distances, dist):
currentDist = distances.get(dep)
if currentDist is None or currentDist < dist:
distances[dep] = dist
if dep.isProject():
for depDep in dep.deps:
self._compute_max_dep_distances(depDep, distances, dist + 1)
def canonical_deps(self):
"""
Get the dependencies of this project that are not recursive (i.e. cannot be reached
via other dependencies).
"""
distances = dict()
result = set()
self._compute_max_dep_distances(self, distances, 0)
for n, d in distances.items():
assert d > 0 or n is self
if d == 1:
result.add(n)
if len(result) == len(self.deps) and frozenset(self.deps) == result:
return self.deps
return result
def max_depth(self):
"""
Get the maximum canonical distance between this project and its most distant dependency.
"""
distances = dict()
self._compute_max_dep_distances(self.name, distances, 0)
return max(distances.values())
def source_dirs(self):
"""
Get the directories in which the sources of this project are found.
"""
return [join(self.dir, s) for s in self.srcDirs]
def eclipse_settings_sources(self):
"""
Gets a dictionary from the name of an Eclipse settings file to
the list of files providing its generated content, in overriding order
(i.e., settings from files later in the list override settings from
files earlier in the list).
A new dictionary is created each time this method is called so it's
safe for the caller to modify it.
"""
nyi('eclipse_settings_sources', self)
def netbeans_settings_sources(self):
"""
Gets a dictionary from the name of an NetBeans settings file to
the list of files providing its generated content, in overriding order
(i.e., settings from files later in the list override settings from
files earlier in the list).
A new dictionary is created each time this method is called so it's
safe for the caller to modify it.
"""
nyi('netbeans_settings_sources', self)
def eclipse_config_up_to_date(self, configZip):
"""
Determines if the zipped up Eclipse configuration
"""
return True
def netbeans_config_up_to_date(self, configZip):
"""
Determines if the zipped up NetBeans configuration
"""
return True
def get_javac_lint_overrides(self):
"""
Gets a string to be added to the -Xlint javac option.
"""
nyi('get_javac_lint_overrides', self)
def _eclipseinit(self, files=None, libFiles=None, absolutePaths=False):
"""
Generates an Eclipse project configuration for this project if Eclipse
supports projects of this type.
"""
def is_test_project(self):
return self.testProject
def get_checkstyle_config(self, resolve_checkstyle_library=True):
# Workaround for GR-12809
return (None, None, None)
class ProjectBuildTask(BuildTask):
def __init__(self, args, parallelism, project):
BuildTask.__init__(self, project, args, parallelism)
class ArchivableProject(Project): # Used from other suites. pylint: disable=r0921
"""
A project that can be part of any distribution, native or not.
Users should subclass this class and implement the nyi() methods.
The files listed by getResults(), which must be under output_dir(),
will be included in the archive under the prefix archive_prefix().
"""
def __init__(self, suite, name, deps, workingSets, theLicense, **kwArgs):
d = suite.dir
Project.__init__(self, suite, name, "", [], deps, workingSets, d, theLicense, **kwArgs)
def getBuildTask(self, args):
return ArchivableBuildTask(self, args, 1)
@abstractmethod
def output_dir(self):
nyi('output_dir', self)
@abstractmethod
def archive_prefix(self):
nyi('archive_prefix', self)
@abstractmethod
def getResults(self):
nyi('getResults', self)
@staticmethod
def walk(d):
"""
Convenience method to implement getResults() by including all files under a directory.
"""
assert isabs(d)
results = []
for root, _, files in os.walk(d):
for name in files:
path = join(root, name)
results.append(path)
return results
def get_relpath(self, f, outputDir):
d = join(outputDir, "")
assert f.startswith(d), f + " not in " + outputDir
return os.path.relpath(f, outputDir)
def getArchivableResults(self, use_relpath=True, single=False):
if single:
raise ValueError("single not supported")
outputDir = self.output_dir()
archivePrefix = self.archive_prefix()
for f in self.getResults():
if use_relpath:
filename = self.get_relpath(f, outputDir)
else:
filename = basename(f)
arcname = join(archivePrefix, filename)
yield f, arcname
class ArchivableBuildTask(BuildTask):
def __str__(self):
return 'Archive {}'.format(self.subject)
def needsBuild(self, newestInput):
return (False, 'Files are already on disk')
def newestOutput(self):
return TimeStampFile.newest(self.subject.getResults())
def build(self):
pass
def clean(self, forBuild=False):
pass
#### ~~~~~~~~~~~~~ Project: Java / Maven
class MavenProject(Project, ClasspathDependency):
"""
A project producing a single jar file.
Users should subclass this class and implement getBuildTask().
Additional attributes:
jar: path to the jar
sourceDirs: list of directories containing the sources
"""
def __init__(self, suite, name, deps, workingSets, theLicense=None, **args):
context = 'project ' + name
d = suite.dir
srcDirs = Suite._pop_list(args, 'sourceDirs', context)
Project.__init__(self, suite, name, "", srcDirs, deps, workingSets, d, theLicense, **args)
ClasspathDependency.__init__(self)
jar = args.pop('jar')
assert jar.endswith('.jar')
self.jar = jar
def classpath_repr(self, resolve=True):
jar = join(self.suite.dir, self.jar)
if resolve and not exists(jar):
abort('unbuilt Maven project {} cannot be on a class path ({})'.format(self, jar))
return jar
def get_path(self, resolve):
return self.classpath_repr(resolve=resolve)
def get_source_path(self, resolve):
assert len(self.sourceDirs) == 1
return join(self.suite.dir, self.sourceDirs[0])
class JavaProject(Project, ClasspathDependency):
def __init__(self, suite, name, subDir, srcDirs, deps, javaCompliance, workingSets, d, theLicense=None, testProject=False, **kwArgs):
Project.__init__(self, suite, name, subDir, srcDirs, deps, workingSets, d, theLicense, testProject=testProject, **kwArgs)
ClasspathDependency.__init__(self, **kwArgs)
if javaCompliance is None:
abort('javaCompliance property required for Java project ' + name)
self.javaCompliance = JavaCompliance(javaCompliance, context=self)
# The annotation processors defined by this project
self.definedAnnotationProcessors = None
self.declaredAnnotationProcessors = []
self._mismatched_imports = None
self._overlays = []
@property
def include_dirs(self):
"""Directories with headers provided by this project."""
return [self.jni_gen_dir()] if self.jni_gen_dir() else []
def resolveDeps(self):
Project.resolveDeps(self)
self._resolveDepsHelper(self.declaredAnnotationProcessors)
for ap in self.declaredAnnotationProcessors:
if not ap.isDistribution() and not ap.isLibrary():
abort('annotation processor dependency must be a distribution or a library: ' + ap.name, context=self)
if self.suite.getMxCompatibility().disableImportOfTestProjects() and not self.is_test_project():
for dep in self.deps:
if isinstance(dep, Project) and dep.is_test_project():
abort('Non-test project {} can not depend on the test project {}'.format(self.name, dep.name))
overlayTargetName = getattr(self, 'overlayTarget', None)
if overlayTargetName:
project(self.overlayTarget, context=self)._overlays.append(self)
def _walk_deps_visit_edges(self, visited, in_edge, preVisit=None, visit=None, ignoredEdges=None, visitEdge=None):
deps = [(DEP_ANNOTATION_PROCESSOR, self.declaredAnnotationProcessors)]
self._walk_deps_visit_edges_helper(deps, visited, in_edge, preVisit, visit, ignoredEdges, visitEdge)
Project._walk_deps_visit_edges(self, visited, in_edge, preVisit, visit, ignoredEdges, visitEdge)
def source_gen_dir_name(self):
"""
Get the directory name in which source files generated by the annotation processor are found/placed.
"""
return basename(self.source_gen_dir())
def source_gen_dir(self, relative=False):
"""
Get the absolute path to the directory in which source files generated by the annotation processor are found/placed.
"""
res = join(self.get_output_root(), 'src_gen')
if relative:
res = os.path.relpath(res, self.dir)
return res
# GR-31142
def latest_output_dir(self):
return join(self.suite.get_output_root(False, False), self.name)
def jni_gen_dir(self, relative=False):
if getattr(self, 'jniHeaders', False):
res = join(self.get_output_root(), 'jni_gen')
if relative:
res = os.path.relpath(res, self.dir)
return res
return None
def output_dir(self, relative=False):
"""
Get the directory in which the class files of this project are found/placed.
"""
res = join(self.get_output_root(), 'bin')
if relative:
res = os.path.relpath(res, self.dir)
return res
def classpath_repr(self, resolve=True):
return self.output_dir()
def get_javac_lint_overrides(self):
if not hasattr(self, '_javac_lint_overrides'):
overrides = []
if get_env('JAVAC_LINT_OVERRIDES'):
overrides += get_env('JAVAC_LINT_OVERRIDES').split(',')
if self.suite.javacLintOverrides:
overrides += self.suite.javacLintOverrides
if hasattr(self, 'javac.lint.overrides'):
overrides += getattr(self, 'javac.lint.overrides').split(',')
self._javac_lint_overrides = overrides
return self._javac_lint_overrides
def eclipse_config_up_to_date(self, configZip):
for _, sources in self.eclipse_settings_sources().items():
for source in sources:
if configZip.isOlderThan(source):
return False
return True
def netbeans_config_up_to_date(self, configZip):
for _, sources in self.netbeans_settings_sources().items():
for source in sources:
if configZip.isOlderThan(source):
return False
if configZip.isOlderThan(join(self.dir, 'build.xml')):
return False
if configZip.isOlderThan(join(self.dir, 'nbproject', 'project.xml')):
return False
if configZip.isOlderThan(join(self.dir, 'nbproject', 'project.properties')):
return False
return True
def eclipse_settings_sources(self):
"""
Gets a dictionary from the name of an Eclipse settings file to
the list of files providing its generated content, in overriding order
(i.e., settings from files later in the list override settings from
files earlier in the list).
A new dictionary is created each time this method is called so it's
safe for the caller to modify it.
"""
esdict = self.suite.eclipse_settings_sources()
# check for project overrides
projectSettingsDir = join(self.dir, 'eclipse-settings')
if exists(projectSettingsDir):
for name in os.listdir(projectSettingsDir):
esdict.setdefault(name, []).append(os.path.abspath(join(projectSettingsDir, name)))
if not self.annotation_processors():
esdict.pop("org.eclipse.jdt.apt.core.prefs", None)
return esdict
def netbeans_settings_sources(self):
"""
Gets a dictionary from the name of an NetBeans settings file to
the list of files providing its generated content, in overriding order
(i.e., settings from files later in the list override settings from
files earlier in the list).
A new dictionary is created each time this method is called so it's
safe for the caller to modify it.
"""
nbdict = self.suite.netbeans_settings_sources()
# check for project overrides
projectSettingsDir = join(self.dir, 'netbeans-settings')
if exists(projectSettingsDir):
for name in os.listdir(projectSettingsDir):
nbdict.setdefault(name, []).append(os.path.abspath(join(projectSettingsDir, name)))
return nbdict
def get_checkstyle_config(self, resolve_checkstyle_library=True):
"""
Gets a tuple of the path to a Checkstyle configuration file, a Checkstyle version
and the project supplying the Checkstyle configuration file. Returns
(None, None, None) if this project has no Checkstyle configuration.
"""
checkstyleProj = self if self.checkstyleProj == self.name else project(self.checkstyleProj, context=self)
config = join(checkstyleProj.dir, '.checkstyle_checks.xml')
if not exists(config):
compat = self.suite.getMxCompatibility()
should_abort = compat.check_checkstyle_config()
if checkstyleProj != self:
abort_or_warn('Project {} has no Checkstyle configuration'.format(checkstyleProj), should_abort, context=self)
else:
if hasattr(self, 'checkstyleVersion'):
abort_or_warn('Cannot specify "checkstyleVersion" attribute for project with non-existent Checkstyle configuration', should_abort, context=self)
return None, None, None
if hasattr(checkstyleProj, 'checkstyleVersion'):
checkstyleVersion = checkstyleProj.checkstyleVersion
if resolve_checkstyle_library:
library('CHECKSTYLE_' + checkstyleVersion, context=checkstyleProj)
else:
checkstyleVersion = checkstyleProj.suite.getMxCompatibility().checkstyleVersion()
return config, checkstyleVersion, checkstyleProj
def find_classes_with_annotations(self, pkgRoot, annotations, includeInnerClasses=False, includeGenSrc=False):
"""
Scan the sources of this project for Java source files containing a line starting with 'annotation'
(ignoring preceding whitespace) and return a dict mapping fully qualified class names to a tuple
consisting of the source file and line number of a match.
"""
matches = lambda line: len([a for a in annotations if line == a or line.startswith(a + '(')]) != 0
return self.find_classes_with_matching_source_line(pkgRoot, matches, includeInnerClasses, includeGenSrc)
def find_classes_with_matching_source_line(self, pkgRoot, function, includeInnerClasses=False, includeGenSrc=False):
"""
Scan the sources of this project for Java source files containing a line for which
'function' returns true. A map from class name to source file path for each existing class
corresponding to a matched source file is returned.
"""
result = dict()
source_dirs = self.source_dirs()
if includeGenSrc:
source_dirs.append(self.source_gen_dir())
for srcDir in source_dirs:
outputDir = self.output_dir()
for root, _, files in os.walk(srcDir):
for name in files:
if name.endswith('.java') and name != 'package-info.java':
matchingLineFound = None
source = join(root, name)
with open(source) as f:
pkg = None
lineNo = 1
for line in f:
if line.startswith("package "):
match = _java_package_regex.match(line)
if match:
pkg = match.group(1)
if function(line.strip()):
matchingLineFound = lineNo
if pkg and matchingLineFound:
break
lineNo += 1
if matchingLineFound:
simpleClassName = name[:-len('.java')]
assert pkg is not None, 'could not find package statement in file ' + name
className = pkg + '.' + simpleClassName
result[className] = (source, matchingLineFound)
if includeInnerClasses:
if pkgRoot is None or pkg.startswith(pkgRoot):
pkgOutputDir = join(outputDir, pkg.replace('.', os.path.sep))
if exists(pkgOutputDir):
for e in os.listdir(pkgOutputDir):
if e.endswith('.class') and e.startswith(simpleClassName + '$'):
className = pkg + '.' + e[:-len('.class')]
result[className] = (source, matchingLineFound)
return result
def _init_java_packages(self):
if not hasattr(self, '_defined_java_packages'):
packages = set()
extendedPackages = set()
depPackages = set()
def visit(dep, edge):
if dep is not self and dep.isProject():
depPackages.update(dep.defined_java_packages())
self.walk_deps(visit=visit)
for sourceDir in self.source_dirs():
for root, _, files in os.walk(sourceDir):
javaSources = [name for name in files if name.endswith('.java')]
if len(javaSources) != 0:
path_package = root[len(sourceDir) + 1:].replace(os.sep, '.')
if path_package not in depPackages:
packages.add(path_package)
else:
# A project extends a package already defined by one of its dependencies
extendedPackages.add(path_package)
self._defined_java_packages = frozenset(packages)
self._extended_java_packages = frozenset(extendedPackages)
def _init_java_imports(self):
if not hasattr(self, '_imported_packages'):
depPackages = set()
def visit(dep, edge):
if dep is not self and dep.isProject():
depPackages.update(dep.defined_java_packages())
self.walk_deps(visit=visit)
imports = set()
mismatched_imports = {}
# Assumes package name components start with lower case letter and
# classes start with upper-case letter
importStatementRe = re.compile(r'\s*import\s+(?:static\s+)?([a-zA-Z\d_$\.]+\*?)\s*;\s*')
importedRe = re.compile(r'((?:[a-z][a-zA-Z\d_$]*\.)*[a-z][a-zA-Z\d_$]*)\.(?:(?:[A-Z][a-zA-Z\d_$]*)|\*)')
for sourceDir in self.source_dirs():
for root, _, files in os.walk(sourceDir):
javaSources = [name for name in files if name.endswith('.java')]
if len(javaSources) != 0:
path_package = root[len(sourceDir) + 1:].replace(os.sep, '.')
if path_package in depPackages:
imports.add(path_package)
for n in javaSources:
java_package = None
java_source = join(root, n)
with open(java_source) as fp:
for i, line in enumerate(fp):
m = importStatementRe.match(line)
if m:
imported = m.group(1)
m = importedRe.match(imported)
if not m:
lineNo = i + 1
abort(java_source + ':' + str(lineNo) + ': import statement does not match expected pattern:\n' + line, self)
package = m.group(1)
imports.add(package)
m = _java_package_regex.match(line)
if m:
java_package = m.group('package')
if self.is_test_project() and java_package is None and path_package == '':
# Test projects are allowed to include classes without a package
continue
if java_package != path_package:
mismatched_imports[java_source] = java_package
importedPackagesFromProjects = set()
compat = self.suite.getMxCompatibility()
for package in imports:
if compat.improvedImportMatching():
if package in depPackages:
importedPackagesFromProjects.add(package)
else:
name = package
while not name in depPackages and len(name) > 0:
lastDot = name.rfind('.')
if lastDot == -1:
name = None
break
name = name[0:lastDot]
if name is not None:
importedPackagesFromProjects.add(name)
self._mismatched_imports = mismatched_imports
self._imported_packages = frozenset(imports)
self._imported_packages_from_java_projects = frozenset(importedPackagesFromProjects) # pylint: disable=invalid-name
def defined_java_packages(self):
"""Get the immutable set of Java packages defined by the Java sources of this project"""
self._init_java_packages()
return self._defined_java_packages
def extended_java_packages(self):
"""Get the immutable set of Java packages extended by the Java sources of this project"""
self._init_java_packages()
return self._extended_java_packages
def imported_java_packages(self, projectDepsOnly=True):
"""
Gets the immutable set of Java packages imported by the Java sources of this project.
:param bool projectDepsOnly: only include packages defined by other Java projects in the result
:return: the packages imported by this Java project, filtered as per `projectDepsOnly`
:rtype: frozenset
"""
self._init_java_imports()
return self._imported_packages_from_java_projects if projectDepsOnly else self._imported_packages
def mismatched_imports(self):
"""Get a dictionary of source files whose package declaration does not match their source location"""
self._init_java_imports()
return self._mismatched_imports
def annotation_processors(self):
"""
Gets the list of dependencies defining the annotation processors that will be applied
when compiling this project.
"""
return self.declaredAnnotationProcessors
def annotation_processors_path(self, jdk):
"""
Gets the class path composed of this project's annotation processor jars and the jars they depend upon.
"""
aps = self.annotation_processors()
if len(aps):
entries = classpath_entries(names=aps)
invalid = [e.classpath_repr(resolve=True) for e in entries if not e.isJar()]
if invalid:
abort('Annotation processor path can only contain jars: ' + str(invalid), context=self)
entries = (e.classpath_repr(jdk, resolve=True) if e.isJdkLibrary() else e.classpath_repr(resolve=True) for e in entries)
return os.pathsep.join((e for e in entries if e))
return None
def check_current_annotation_processors_file(self):
aps = self.annotation_processors()
outOfDate = False
currentApsFile = join(self.suite.get_mx_output_dir(), 'currentAnnotationProcessors', self.name)
currentApsFileExists = exists(currentApsFile)
if currentApsFileExists:
with open(currentApsFile) as fp:
currentAps = [l.strip() for l in fp.readlines()]
if currentAps != [ap.name for ap in aps]:
outOfDate = True
elif len(aps) == 0:
os.remove(currentApsFile)
else:
outOfDate = len(aps) != 0
return outOfDate
def update_current_annotation_processors_file(self):
aps = self.annotation_processors()
currentApsFile = join(self.suite.get_mx_output_dir(), 'currentAnnotationProcessors', self.name)
if len(aps) != 0:
ensure_dir_exists(dirname(currentApsFile))
with open(currentApsFile, 'w') as fp:
for ap in aps:
print(ap, file=fp)
else:
if exists(currentApsFile):
os.remove(currentApsFile)
def make_archive(self, path=None):
outputDir = self.output_dir()
if not path:
path = join(self.get_output_root(), self.name + '.jar')
with Archiver(path) as arc:
for root, _, files in os.walk(outputDir):
for f in files:
relpath = root[len(outputDir) + 1:]
arcname = join(relpath, f).replace(os.sep, '/')
arc.zf.write(join(root, f), arcname)
return path
def _eclipseinit(self, files=None, libFiles=None, absolutePaths=False):
"""
Generates an Eclipse project configuration for this project.
"""
mx_ide_eclipse._eclipseinit_project(self, files=files, libFiles=libFiles, absolutePaths=absolutePaths)
def get_overlay_flatten_map(self):
"""
Gets a map from the source directories of this project to the
source directories the project it overlays (or
[presides overs](https://docs.oracle.com/javase/9/docs/specs/jar/jar.html)).
:return: an empty map if this is not an overlay or multi-release version project
"""
if hasattr(self, 'overlayTarget'):
base = project(self.overlayTarget, context=self)
elif hasattr(self, 'multiReleaseJarVersion'):
def _find_version_base_project():
extended_packages = self.extended_java_packages()
if not extended_packages:
abort('Project with a multiReleaseJarVersion attribute must depend on a project that defines a package extended by ' + self.name, context=self)
base_project = None
base_package = None
for extended_package in extended_packages:
for dep in classpath_entries(self, includeSelf=False, preferProjects=True):
if dep is not self and dep.isJavaProject() and not hasattr(dep, 'multiReleaseJarVersion'):
if extended_package in dep.defined_java_packages():
if base_project is None:
base_project = dep
base_package = extended_package
else:
if base_project != dep:
abort('Multi-release jar versioned project {} must extend packages from exactly one project but extends {} from {} and {} from {}'.format(self, extended_package, dep, base_project, base_package))
if not base_project:
abort('Multi-release jar versioned project {} must extend package(s) from one of its dependencies'.format(self))
return base_project
base = _find_version_base_project()
else:
return {}
flatten_map = {}
self_packages = self.defined_java_packages() | self.extended_java_packages()
for package in self_packages:
relative_package_src_dir = package.replace('.', os.sep)
for self_package_src_dir in [join(s, relative_package_src_dir) for s in self.source_dirs()]:
if exists(self_package_src_dir):
assert len(base.source_dirs()) != 0, '{} has no source directories!'.format(base)
for base_package_src_dir in [join(s, relative_package_src_dir) for s in base.source_dirs()]:
if exists(base_package_src_dir) or self_package_src_dir not in flatten_map:
flatten_map[self_package_src_dir] = base_package_src_dir
assert len(self_packages) == len(flatten_map), 'could not find sources for all packages in ' + self.name
return flatten_map
def getBuildTask(self, args):
jdk = get_jdk(self.javaCompliance, tag=DEFAULT_JDK_TAG, purpose='building ' + self.name)
return JavaBuildTask(args, self, jdk)
def get_concealed_imported_packages(self, jdk=None):
"""
Gets the concealed packages imported by this Java project.
:param JDKConfig jdk: the JDK whose modules are to be searched for concealed packages
:return: a map from a module to its concealed packages imported by this project
"""
if jdk is None:
jdk = get_jdk(self.javaCompliance)
cache = '.concealed_imported_packages@' + str(jdk.version)
def _process_imports(imports, concealed):
imported = itertools.chain(imports, self.imported_java_packages(projectDepsOnly=False))
modulepath = jdk.get_modules()
for package in imported:
jmd, visibility = lookup_package(modulepath, package, "<unnamed>")
if visibility == 'concealed':
if self.defined_java_packages().isdisjoint(jmd.packages):
concealed.setdefault(jmd.name, set()).add(package)
else:
# This project is part of the module defining the concealed package
pass
if getattr(self, cache, None) is None:
concealed = {}
if jdk.javaCompliance >= '9':
compat = self.suite.getMxCompatibility()
if not compat.enhanced_module_usage_info():
imports = getattr(self, 'imports', [])
# Include conceals from transitive project dependencies
def visit(dep, edge):
if dep is not self and dep.isJavaProject():
dep_concealed = dep.get_concealed_imported_packages(jdk=jdk)
for module, packages in dep_concealed.items():
concealed.setdefault(module, set()).update(packages)
self.walk_deps(visit=visit)
if imports:
# This regex does not detect all legal packages names. No regex can tell you if a.b.C.D is
# a class D in the package a.b.C, a class C.D in the package a.b or even a class b.C.D in
# the package a. As such mx uses the convention that package names start with a lowercase
# letter and class names with a uppercase letter.
packageRe = re.compile(r'(?:[a-z][a-zA-Z\d_$]*\.)*[a-z][a-zA-Z\d_$]*$')
for imported in imports:
m = packageRe.match(imported)
if not m:
abort('"imports" contains an entry that does not match expected pattern for package name: ' + imported, self)
_process_imports(imports, concealed)
else:
if hasattr(self, 'imports'):
_process_imports(getattr(self, 'imports'), concealed)
nl = os.linesep
msg = 'As of mx {}, the "imports" attribute has been replaced by the "requiresConcealed" attribute:{}{}'.format(compat.version(), nl, nl)
msg += ' "requiresConcealed" : {' + nl
for module, packages in concealed.items():
msg += ' "{}" : ["{}"],{}'.format(module, '", "'.join(packages), nl)
msg += ' }' + nl + nl +'See {} for more information.'.format(join(_mx_home, 'README.md'))
self.abort(msg)
parse_requiresConcealed_attribute(jdk, getattr(self, 'requiresConcealed', None), concealed, None, self)
# JVMCI is special as it not concealed in JDK 8 but is concealed in JDK 9+.
if 'jdk.internal.vm.ci' in (jmd.name for jmd in jdk.get_modules()) and self.get_declaring_module_name() != 'jdk.internal.vm.ci':
jvmci_packages = [p for p in self.imported_java_packages(projectDepsOnly=False) if p.startswith('jdk.vm.ci')]
if jvmci_packages:
concealed.setdefault('jdk.internal.vm.ci', set()).update(jvmci_packages)
concealed = {module : list(concealed[module]) for module in concealed}
setattr(self, cache, concealed)
return getattr(self, cache)
def get_declaring_module_name(self):
module_dist = self.get_declaring_module_distribution()
if module_dist is None:
return None
return get_module_name(module_dist)
def get_declaring_module_distribution(self):
"""
Gets the distribution that contains this project and also defines a Java module.
:rtype: JARDistribution | None
"""
if not hasattr(self, '.declaring_module_dist'):
declaring_module_dist = None
compat = self.suite.getMxCompatibility()
for dist in sorted_dists():
module_name = get_module_name(dist)
if module_name and self in dist.archived_deps():
assert isinstance(dist, JARDistribution)
if declaring_module_dist is not None:
if compat.enhanced_module_usage_info():
raise abort("{} is part of multiple modules: {} and {}".format(self, get_module_name(declaring_module_dist), module_name))
declaring_module_dist = dist
if not compat.enhanced_module_usage_info():
# Earlier versions of mx were less strict and just returned the
# first module containing a project
break
setattr(self, '.declaring_module_dist', declaring_module_dist)
return getattr(self, '.declaring_module_dist')
### ~~~~~~~~~~~~~ Build task
class JavaBuildTask(ProjectBuildTask):
def __init__(self, args, project, jdk):
ProjectBuildTask.__init__(self, args, 1, project)
self.jdk = jdk
self.project = project
self._javafiles = None
self._newestOutput = None
self._compiler = None
def __str__(self):
return "Compiling {} with {}".format(self.subject.name, self._getCompiler().name())
def initSharedMemoryState(self):
ProjectBuildTask.initSharedMemoryState(self)
try:
self._newestBox = multiprocessing.Array('c', 2048)
except TypeError:
self._newestBox = multiprocessing.Value('c', '')
def pushSharedMemoryState(self):
ProjectBuildTask.pushSharedMemoryState(self)
self._newestBox.value = _encode(self._newestOutput.path if self._newestOutput else '')
def pullSharedMemoryState(self):
ProjectBuildTask.pullSharedMemoryState(self)
self._newestOutput = TimeStampFile(_decode(self._newestBox.value)) if self._newestBox.value else None
def cleanSharedMemoryState(self):
ProjectBuildTask.cleanSharedMemoryState(self)
self._newestBox = None
def buildForbidden(self):
if ProjectBuildTask.buildForbidden(self):
return True
if not self.args.java:
return True
if exists(join(self.subject.dir, 'plugin.xml')): # eclipse plugin project
return True
return False
def cleanForbidden(self):
if ProjectBuildTask.cleanForbidden(self):
return True
if not self.args.java:
return True
return False
def needsBuild(self, newestInput):
sup = ProjectBuildTask.needsBuild(self, newestInput)
if sup[0]:
return sup
reason = self._compute_build_reason(newestInput)
if reason:
return (True, reason)
if self.subject.check_current_annotation_processors_file():
return (True, 'annotation processor(s) changed')
if not self._get_javafiles() and not self._get_non_javafiles():
return (False, 'no sources')
return (False, 'all files are up to date')
def newestOutput(self):
return self._newestOutput
def _get_javafiles(self): return self._collect_files()._javafiles
def _get_non_javafiles(self): return self._collect_files()._non_javafiles
def _get_copyfiles(self): return self._collect_files()._copyfiles
def _collect_files(self):
if self._javafiles is None:
javafiles = {}
non_javafiles = {}
copyfiles = {}
outputDir = self.subject.output_dir()
for sourceDir in self.subject.source_dirs():
for root, _, files in os.walk(sourceDir, followlinks=True):
for name in files:
path = join(root, name)
if name.endswith('.java'):
classfile = outputDir + path[len(sourceDir):-len('.java')] + '.class'
javafiles[path] = classfile
else:
non_javafiles[path] = outputDir + path[len(sourceDir):]
if hasattr(self.subject, 'copyFiles'):
for depname, copyMap in self.subject.copyFiles.items():
dep = dependency(depname)
if not dep.isProject():
abort('Unsupported dependency type in "copyFiles" attribute: {}'.format(dep), context=self.subject)
deproot = dep.get_output_root()
if dep.isNativeProject():
deproot = join(dep.suite.dir, dep.getOutput())
for src, dst in copyMap.items():
copyfiles[join(deproot, src)] = join(outputDir, dst)
self._javafiles = javafiles
self._non_javafiles = non_javafiles
self._copyfiles = copyfiles
return self
def _compute_build_reason(self, newestInput):
self._collect_files()
def _find_build_reason(items):
for source, output in items:
if basename(source) == 'package-info.java':
continue
if not exists(output):
return output + ' does not exist'
output_ts = TimeStampFile(output)
if not self._newestOutput or output_ts.isNewerThan(self._newestOutput):
self._newestOutput = output_ts
if output_ts.isOlderThan(source):
return '{} is older than {}'.format(output_ts, TimeStampFile(source))
if newestInput and output_ts.isOlderThan(newestInput):
return '{} is older than {}'.format(output_ts, newestInput)
return None
return _find_build_reason((item for item in self._javafiles.items() if basename(item[0]) != 'package-info.java')) or \
_find_build_reason(self._non_javafiles.items()) or \
_find_build_reason(self._copyfiles.items())
def _getCompiler(self):
if self._compiler is None:
useJDT = not self.args.force_javac and self.args.jdt
if useJDT and hasattr(self.subject, 'forceJavac') and getattr(self.subject, 'forceJavac', False):
# Revisit once GR-8992 is resolved
logv('Project {} has "forceJavac" attribute set to True - falling back to javac'.format(self.subject))
useJDT = False
# we cannot use JDT for projects with a JNI dir because the javah tool is needed anyway
# and that is no longer available JDK >= 10
if useJDT and self.subject.jni_gen_dir():
logv('Project {} has jni_gen_dir dir set. That is unsupported on ECJ - falling back to javac'.format(self.subject))
useJDT = False
jdt = None
if useJDT:
jdt = _resolve_ecj_jar(self.jdk, self.project.javaCompliance, self.args.jdt)
if not jdt:
logv('Project {} should be compiled with ecj. But no compatible ecj version was found for this project - falling back to javac'.format(self.subject))
if jdt:
if self.args.no_daemon:
self._compiler = ECJCompiler(self.jdk, jdt, self.args.extra_javac_args)
else:
self._compiler = ECJDaemonCompiler(self.jdk, jdt, self.args.extra_javac_args)
else:
if self.args.no_daemon or self.args.alt_javac:
self._compiler = JavacCompiler(self.jdk, self.args.alt_javac, self.args.extra_javac_args)
else:
self._compiler = JavacDaemonCompiler(self.jdk, self.args.extra_javac_args)
return self._compiler
def prepare(self, daemons):
"""
Prepares the compilation that will be performed if `build` is called.
:param dict daemons: map from keys to `Daemon` objects into which any daemons
created to assist this task when `build` is called should be placed.
"""
self.compiler = self._getCompiler()
outputDir = ensure_dir_exists(self.subject.output_dir())
self._collect_files()
javafiles = self._get_javafiles()
if javafiles:
self.postCompileActions = []
self.compileArgs = self.compiler.prepare(
sourceFiles=[_cygpathU2W(f) for f in sorted(javafiles.keys())],
project=self.subject,
outputDir=_cygpathU2W(outputDir),
classPath=_separatedCygpathU2W(classpath(self.subject.name, includeSelf=False, jdk=self.jdk, ignoreStripped=True)),
sourceGenDir=self.subject.source_gen_dir(),
jnigenDir=self.subject.jni_gen_dir(),
processorPath=_separatedCygpathU2W(self.subject.annotation_processors_path(self.jdk)),
disableApiRestrictions=not self.args.warnAPI,
warningsAsErrors=self.args.warning_as_error,
showTasks=self.args.jdt_show_task_tags,
postCompileActions=self.postCompileActions,
forceDeprecationAsWarning=self.args.force_deprecation_as_warning)
self.compiler.prepare_daemon(daemons, self.compileArgs)
else:
self.compileArgs = None
def build(self):
outputDir = ensure_dir_exists(self.subject.output_dir())
# Copy other files
self._collect_files()
if self._get_non_javafiles():
for source, output in self._get_non_javafiles().items():
ensure_dir_exists(dirname(output))
output_ts = TimeStampFile(output)
if output_ts.isOlderThan(source):
shutil.copyfile(source, output)
self._newestOutput = output_ts
logvv('Finished resource copy for {}'.format(self.subject.name))
# Java build
if self.compileArgs:
try:
self.compiler.compile(self.compileArgs)
finally:
for action in self.postCompileActions:
action()
logvv('Finished Java compilation for {}'.format(self.subject.name))
output = []
for root, _, filenames in os.walk(outputDir):
for fname in filenames:
output.append(os.path.join(root, fname))
if output:
self._newestOutput = TimeStampFile(max(output, key=getmtime))
# Record current annotation processor config
self.subject.update_current_annotation_processors_file()
if self._get_copyfiles():
for src, dst in self._get_copyfiles():
ensure_dir_exists(dirname(dst))
if not exists(dst) or getmtime(dst) < getmtime(src):
shutil.copyfile(src, dst)
self._newestOutput = TimeStampFile(dst)
logvv('Finished copying files from dependencies for {}'.format(self.subject.name))
def clean(self, forBuild=False):
genDir = self.subject.source_gen_dir()
if exists(genDir):
logv('Cleaning {0}...'.format(genDir))
for f in os.listdir(genDir):
rmtree(join(genDir, f))
linkedGenDir = self.subject.latest_output_dir()
if exists(linkedGenDir):
logv('Cleaning {0}...'.format(linkedGenDir))
if islink(linkedGenDir):
os.unlink(linkedGenDir)
elif isdir(linkedGenDir):
rmtree(linkedGenDir)
outputDir = self.subject.output_dir()
if exists(outputDir):
logv('Cleaning {0}...'.format(outputDir))
rmtree(outputDir)
jnigenDir = self.subject.jni_gen_dir()
if jnigenDir and exists(jnigenDir):
logv('Cleaning {0}...'.format(jnigenDir))
rmtree(jnigenDir)
### Compiler / Java Compiler
class JavaCompiler:
def name(self):
nyi('name', self)
def prepare(self, sourceFiles, project, jdk, compliance, outputDir, classPath, processorPath, sourceGenDir, jnigenDir,
disableApiRestrictions, warningsAsErrors, forceDeprecationAsWarning, showTasks, postCompileActions):
"""
Prepares for a compilation with this compiler. This done in the main process.
:param list sourceFiles: list of Java source files to compile
:param JavaProject project: the project containing the source files
:param JDKConfig jdk: the JDK used to execute this compiler
:param JavaCompliance compliance:
:param str outputDir: where to place generated class files
:param str classpath: where to find user class files
:param str processorPath: where to find annotation processors
:param str sourceGenDir: where to place generated source files
:param str jnigenDir: where to place generated JNI header files
:param bool disableApiRestrictions: specifies if the compiler should not warning about accesses to restricted API
:param bool warningsAsErrors: specifies if the compiler should treat warnings as errors
:param bool forceDeprecationAsWarning: never treat deprecation warnings as errors irrespective of warningsAsErrors
:param bool showTasks: specifies if the compiler should show tasks tags as warnings (JDT only)
:param list postCompileActions: list into which callable objects can be added for performing post-compile actions
:return: the value to be bound to `args` when calling `compile` to perform the compilation
"""
nyi('prepare', self)
def prepare_daemon(self, daemons, compileArgs):
"""
Initializes any daemons used when `compile` is called with `compileArgs`.
:param dict daemons: map from name to `CompilerDaemon` into which new daemons should be registered
:param list compileArgs: the value bound to the `args` parameter when calling `compile`
"""
def compile(self, jdk, args):
"""
Executes the compilation that was prepared by a previous call to `prepare`.
:param JDKConfig jdk: the JDK used to execute this compiler
:param list args: the value returned by a call to `prepare`
"""
nyi('compile', self)
class JavacLikeCompiler(JavaCompiler):
def __init__(self, jdk, extraJavacArgs):
self.jdk = jdk
self.extraJavacArgs = extraJavacArgs if extraJavacArgs else []
def prepare(self, sourceFiles, project, outputDir, classPath, processorPath, sourceGenDir, jnigenDir,
disableApiRestrictions, warningsAsErrors, forceDeprecationAsWarning, showTasks, postCompileActions):
javacArgs = ['-g', '-d', outputDir]
compliance = project.javaCompliance
if self.jdk.javaCompliance.value > 8 and compliance.value <= 8 and isinstance(self, JavacCompiler): # pylint: disable=chained-comparison
# Ensure classes from dependencies take precedence over those in the JDK image.
# We only need this on javac as on ECJ we strip the jmods directory - see code later
javacArgs.append('-Xbootclasspath/p:' + classPath)
else:
javacArgs += ['-classpath', classPath]
if compliance.value >= 8:
javacArgs.append('-parameters')
if processorPath:
ensure_dir_exists(sourceGenDir)
javacArgs += ['-processorpath', processorPath, '-s', sourceGenDir]
else:
javacArgs += ['-proc:none']
c = str(compliance)
javacArgs += ['-target', c, '-source', c]
if _opts.very_verbose:
javacArgs.append('-verbose')
# GR-31142
postCompileActions.append(lambda: _stage_file_impl(project.get_output_root(), project.latest_output_dir()))
javacArgs.extend(self.extraJavacArgs)
fileList = join(project.get_output_root(), 'javafilelist.txt')
with open(fileList, 'w') as fp:
sourceFiles = ['"' + sourceFile.replace("\\", "\\\\") + '"' for sourceFile in sourceFiles]
fp.write(os.linesep.join(sourceFiles))
javacArgs.append('@' + _cygpathU2W(fileList))
tempFiles = [fileList]
if not _opts.verbose:
# Only remove temporary files if not verbose so the user can copy and paste
# the Java compiler command line directly to reproduce a failure.
def _rm_tempFiles():
for f in tempFiles:
os.remove(f)
postCompileActions.append(_rm_tempFiles)
if self.jdk.javaCompliance >= '9':
jdk_modules_overridden_on_classpath = set() # pylint: disable=C0103
declaring_module = project.get_declaring_module_name()
if declaring_module is not None:
jdk_modules_overridden_on_classpath.add(declaring_module)
def addExportArgs(dep, exports=None, prefix='', jdk=None, observable_modules=None):
"""
Adds ``--add-exports`` options (`JEP 261 <http://openjdk.java.net/jeps/261>`_) to
`javacArgs` for the non-public JDK modules required by `dep`.
:param mx.JavaProject dep: a Java project
:param dict exports: module exports for which ``--add-exports`` args
have already been added to `javacArgs`
:param string prefix: the prefix to be added to the ``--add-exports`` arg(s)
:param JDKConfig jdk: the JDK to be searched for concealed packages
:param observable_modules: only consider modules in this set if not None
"""
for module, packages in dep.get_concealed_imported_packages(jdk).items():
if observable_modules is not None and module not in observable_modules:
continue
if module in jdk_modules_overridden_on_classpath:
# If the classes in a module declaring the dependency are also
# resolvable on the class path, then do not export the module
# as the class path classes are more recent than the module classes
continue
for package in packages:
exportedPackages = exports.setdefault(module, set())
if package not in exportedPackages:
exportedPackages.add(package)
self.addModuleArg(javacArgs, prefix + '--add-exports', module + '/' + package + '=ALL-UNNAMED')
jmodsDir = None
if isinstance(self, ECJCompiler):
# on ecj and JDK >= 8 system modules cannot be accessed if the module path is not set
# see https://bugs.eclipse.org/bugs/show_bug.cgi?id=535552 for reference.
javacArgs.append('--module-path')
jmodsDir = join(self.jdk.home, 'jmods')
# If Graal is in the JDK we need to remove it to avoid conflicts with build artefacts
jmodsToRemove = ('jdk.internal.vm.compiler.jmod', 'jdk.internal.vm.compiler.management.jmod')
if any(exists(join(jmodsDir, jmod)) for jmod in jmodsToRemove):
# Use version and sha1 of source JDK's JAVA_HOME to ensure jmods copy is unique to source JDK
d = hashlib.sha1()
d.update(_encode(self.jdk.home))
jdkHomeSig = d.hexdigest()[0:10] # 10 digits of the sha1 is more than enough
jdkHomeMirror = ensure_dir_exists(join(primary_suite().get_output_root(), '.jdk{}_{}_ecj'.format(self.jdk.version, jdkHomeSig)))
jmodsCopyPath = join(jdkHomeMirror, 'jmods')
if not exists(jmodsCopyPath):
logv('The JDK contains Graal. Copying {} to {} and removing Graal to avoid conflicts in ECJ compilation.'.format(jmodsDir, jmodsCopyPath))
if not can_symlink():
shutil.copytree(jmodsDir, jmodsCopyPath)
for jmod in jmodsToRemove:
os.remove(join(jmodsCopyPath, jmod))
else:
ensure_dir_exists(jmodsCopyPath)
for name in os.listdir(jmodsDir):
if name not in jmodsToRemove:
os.symlink(join(jmodsDir, name), join(jmodsCopyPath, name))
jmodsDir = jmodsCopyPath
javacArgs.append(jmodsDir)
# on ECJ if the module path is set then the processors need to use the processor-module-path to be found
if processorPath:
javacArgs += ['--processor-module-path', processorPath, '-s', sourceGenDir]
required_modules = set()
if compliance >= '9':
exports = {}
compat = project.suite.getMxCompatibility()
if compat.enhanced_module_usage_info():
required_modules = set(getattr(project, 'requires', []))
required_modules.add('java.base')
else:
required_modules = None
entries = classpath_entries(project, includeSelf=False)
for e in entries:
e_module_name = e.get_declaring_module_name()
if e.isJdkLibrary():
if required_modules is not None and self.jdk.javaCompliance >= e.jdkStandardizedSince:
# this will not be on the classpath, and is needed from a JDK module
if not e_module_name:
abort('JDK library standardized since {} must have a "module" attribute'.format(e.jdkStandardizedSince), context=e)
required_modules.add(e_module_name)
else:
if e_module_name:
jdk_modules_overridden_on_classpath.add(e_module_name)
if required_modules and e_module_name in required_modules:
abort('Project must not specify {} in a "requires" attribute as it conflicts with the dependency {}'.format(e_module_name, e),
context=project)
elif e.isJavaProject():
addExportArgs(e, exports)
if required_modules is not None:
concealed = parse_requiresConcealed_attribute(self.jdk, getattr(project, 'requiresConcealed', None), {}, None, project)
required_modules.update((m for m in concealed if m not in jdk_modules_overridden_on_classpath))
addExportArgs(project, exports, '', self.jdk, required_modules)
root_modules = set(exports.keys())
if jmodsDir:
# on ECJ filter root modules already in the JDK otherwise we will get an duplicate module error when compiling
root_modules = set([m for m in root_modules if not os.path.exists(join(jmodsDir, m + '.jmod'))])
if required_modules:
root_modules.update((m for m in required_modules if m.startswith('jdk.incubator')))
if root_modules:
self.addModuleArg(javacArgs, '--add-modules', ','.join(root_modules))
if required_modules:
self.addModuleArg(javacArgs, '--limit-modules', ','.join(required_modules))
# this hack is exclusive to javac. on ECJ we copy the jmods directory to avoid this problem
# if the JVM happens to contain a compiler.
aps = project.annotation_processors()
if aps and isinstance(self, JavacCompiler):
# We want annotation processors to use classes on the class path
# instead of those in modules since the module classes may not
# be in exported packages and/or may have different signatures.
# Unfortunately, there's no VM option for hiding modules, only the
# --limit-modules option for restricting modules observability.
# We limit module observability to those required by javac and
# the module declaring sun.misc.Unsafe which is used by annotation
# processors such as JMH.
observable_modules = frozenset(['jdk.compiler', 'jdk.zipfs', 'jdk.unsupported'])
exports = {}
entries = classpath_entries(aps, preferProjects=True)
for e in entries:
e_module_name = e.get_declaring_module_name()
if e_module_name:
jdk_modules_overridden_on_classpath.add(e_module_name)
elif e.isJavaProject():
addExportArgs(e, exports, '-J', self.jdk, observable_modules)
# An annotation processor may have a dependency on other annotation
# processors. The latter might need extra exports.
entries = classpath_entries(aps, preferProjects=False)
for dep in entries:
if dep.isJARDistribution() and dep.definedAnnotationProcessors:
for apDep in dep.deps:
module_name = apDep.get_declaring_module_name()
if module_name:
jdk_modules_overridden_on_classpath.add(module_name)
elif apDep.isJavaProject():
addExportArgs(apDep, exports, '-J', self.jdk, observable_modules)
root_modules = set(exports.keys())
if required_modules:
root_modules.update((m for m in required_modules if m.startswith('jdk.incubator')))
if root_modules:
self.addModuleArg(javacArgs, '--add-modules', ','.join(root_modules))
if len(jdk_modules_overridden_on_classpath) != 0:
javacArgs.append('-J--limit-modules=' + ','.join(observable_modules))
return self.prepareJavacLike(project, javacArgs, disableApiRestrictions, warningsAsErrors, forceDeprecationAsWarning, showTasks, tempFiles, jnigenDir)
def addModuleArg(self, args, key, value):
nyi('buildJavacLike', self)
def prepareJavacLike(self, project, javacArgs, disableApiRestrictions, warningsAsErrors, forceDeprecationAsWarning, showTasks, tempFiles, jnigenDir):
nyi('buildJavacLike', self)
class JavacCompiler(JavacLikeCompiler):
def __init__(self, jdk, altJavac=None, extraJavacArgs=None):
JavacLikeCompiler.__init__(self, jdk, extraJavacArgs)
self.altJavac = altJavac
def name(self):
return 'javac(JDK {})'.format(self.jdk.javaCompliance)
def addModuleArg(self, args, key, value):
args.append(key + '=' + value)
def prepareJavacLike(self, project, javacArgs, disableApiRestrictions, warningsAsErrors, forceDeprecationAsWarning, showTasks, tempFiles, jnigenDir):
jdk = self.jdk
if jnigenDir is not None:
javacArgs += ['-h', jnigenDir]
lint = ['all', '-auxiliaryclass', '-processing', '-removal']
overrides = project.get_javac_lint_overrides()
if overrides:
if 'none' in overrides:
lint = ['none']
else:
lint += overrides
if lint != ['none']:
# https://blogs.oracle.com/darcy/new-javac-warning-for-setting-an-older-source-without-bootclasspath
# Disable the "bootstrap class path not set in conjunction with -source N" warning
# as we're relying on the Java compliance of project to correctly specify a JDK range
# providing the API required by the project.
lint += ['-options']
if forceDeprecationAsWarning:
lint += ['-deprecation']
knownLints = jdk.getKnownJavacLints()
if knownLints:
lint = [l for l in lint if l in knownLints]
if lint:
javacArgs.append('-Xlint:' + ','.join(lint))
if disableApiRestrictions:
javacArgs.append('-XDignore.symbol.file')
else:
if jdk.javaCompliance >= '9':
warn("Can not check all API restrictions on 9 (in particular sun.misc.Unsafe)")
if warningsAsErrors:
javacArgs.append('-Werror')
if showTasks:
abort('Showing task tags is not currently supported for javac')
javacArgs.append('-encoding')
javacArgs.append('UTF-8')
javacArgs.append('-Xmaxerrs')
javacArgs.append('10000')
return javacArgs
def compile(self, args):
javac = self.altJavac if self.altJavac else self.jdk.javac
cmd = [javac] + ['-J' + arg for arg in self.jdk.java_args] + args
run(cmd)
class JavacDaemonCompiler(JavacCompiler):
def __init__(self, jdk, extraJavacArgs=None):
JavacCompiler.__init__(self, jdk, None, extraJavacArgs)
def name(self):
return 'javac-daemon(JDK {})'.format(self.jdk.javaCompliance)
def compile(self, args):
nonJvmArgs = [a for a in args if not a.startswith('-J')]
return self.daemon.compile(nonJvmArgs)
def prepare_daemon(self, daemons, compileArgs):
jvmArgs = self.jdk.java_args + [a[2:] for a in compileArgs if a.startswith('-J')]
key = 'javac-daemon:' + self.jdk.java + ' '.join(jvmArgs)
self.daemon = daemons.get(key)
if not self.daemon:
self.daemon = JavacDaemon(self.jdk, jvmArgs)
daemons[key] = self.daemon
class Daemon:
def shutdown(self):
pass
class CompilerDaemon(Daemon):
def __init__(self, jdk, jvmArgs, mainClass, toolJar, buildArgs=None):
logv("Starting daemon for {} [{}]".format(jdk.java, ', '.join(jvmArgs)))
self.jdk = jdk
if not buildArgs:
buildArgs = []
build(buildArgs + ['--no-daemon', '--dependencies', 'com.oracle.mxtool.compilerserver'])
cpArgs = get_runtime_jvm_args(names=['com.oracle.mxtool.compilerserver'], jdk=jdk, cp_suffix=toolJar)
self.port = None
self.portRegex = re.compile(r'Started server on port ([0-9]+)')
# Start Java process asynchronously
verbose = ['-v'] if _opts.verbose else []
jobs = ['-j', str(cpu_count())]
args = [jdk.java] + jvmArgs + cpArgs + [mainClass] + verbose + jobs
preexec_fn, creationflags = _get_new_progress_group_args()
if _opts.verbose:
log(' '.join(map(pipes.quote, args)))
p = subprocess.Popen(args, preexec_fn=preexec_fn, creationflags=creationflags, stdout=subprocess.PIPE) #pylint: disable=subprocess-popen-preexec-fn
# scan stdout to capture the port number
pout = []
def redirect(stream):
for line in iter(stream.readline, b''):
line = _decode(line)
pout.append(line)
self._noticePort(line)
stream.close()
t = Thread(target=redirect, args=(p.stdout,))
t.daemon = True
t.start()
# Ensure the process is cleaned up when mx exits
_addSubprocess(p, args)
# wait 30 seconds for the Java process to launch and report the port number
retries = 0
while self.port is None:
retries = retries + 1
returncode = p.poll()
if returncode is not None:
raise RuntimeError('Error starting ' + self.name() + ': returncode=' + str(returncode) + '\n' + ''.join(pout))
if retries == 299:
warn('Killing ' + self.name() + ' after failing to see port number after nearly 30 seconds')
os.kill(p.pid, signal.SIGKILL)
time.sleep(1.0)
elif retries > 300:
raise RuntimeError('Error starting ' + self.name() + ': No port number was found in output after 30 seconds\n' + ''.join(pout))
else:
time.sleep(0.1)
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.connection.connect(('127.0.0.1', self.port))
logv('[Started ' + str(self) + ']')
return
except socket.error as e:
logv('[Error starting ' + str(self) + ': ' + str(e) + ']')
raise e
def _noticePort(self, data):
logv(data.rstrip())
if self.port is None:
m = self.portRegex.match(data)
if m:
self.port = int(m.group(1))
def compile(self, compilerArgs):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', self.port))
logv(self.jdk.javac + ' ' + ' '.join(compilerArgs))
commandLine = u'\x00'.join(compilerArgs)
s.send((commandLine + '\n').encode('utf-8'))
f = s.makefile()
response = _unicode(f.readline())
if response == '':
# Compiler server process probably crashed
logv('[Compiler daemon process appears to have crashed]')
retcode = -1
else:
retcode = int(response)
s.close()
if retcode:
if _opts.verbose:
if _opts.very_verbose:
retcode = str(subprocess.CalledProcessError(retcode, self.jdk.javac + ' ' + ' '.join(compilerArgs)))
else:
log('[exit code: ' + str(retcode) + ']')
abort(retcode)
return retcode
def shutdown(self):
try:
self.connection.send('\n'.encode('utf8'))
self.connection.close()
logv('[Stopped ' + str(self) + ']')
except socket.error as e:
logv('Error stopping ' + str(self) + ': ' + str(e))
def __str__(self):
return self.name() + ' on port ' + str(self.port) + ' for ' + str(self.jdk)
class JavacDaemon(CompilerDaemon):
def __init__(self, jdk, jvmArgs):
CompilerDaemon.__init__(self, jdk, jvmArgs, 'com.oracle.mxtool.compilerserver.JavacDaemon', jdk.toolsjar, ['--force-javac'])
def name(self):
return 'javac-daemon'
class ECJCompiler(JavacLikeCompiler):
def __init__(self, jdk, jdtJar, extraJavacArgs=None):
JavacLikeCompiler.__init__(self, jdk, extraJavacArgs)
self.jdtJar = jdtJar
def name(self):
return 'ecj(JDK {})'.format(self.jdk.javaCompliance)
def addModuleArg(self, args, key, value):
args.append(key)
args.append(value)
def prepareJavacLike(self, project, javacArgs, disableApiRestrictions, warningsAsErrors, forceDeprecationAsWarning, showTasks, tempFiles, jnigenDir):
jdtArgs = javacArgs
jdtProperties = join(project.dir, '.settings', 'org.eclipse.jdt.core.prefs')
jdtPropertiesSources = project.eclipse_settings_sources()['org.eclipse.jdt.core.prefs']
if not exists(jdtProperties) or TimeStampFile(jdtProperties).isOlderThan(jdtPropertiesSources):
# Try to fix a missing or out of date properties file by running eclipseinit
project._eclipseinit()
if not exists(jdtProperties):
log('JDT properties file {0} not found'.format(jdtProperties))
else:
with open(jdtProperties) as fp:
origContent = fp.read()
content = origContent
if [ap for ap in project.declaredAnnotationProcessors if ap.isLibrary()]:
# unfortunately, the command line compiler doesn't let us ignore warnings for generated files only
content = content.replace('=warning', '=ignore')
elif warningsAsErrors:
content = content.replace('=warning', '=error')
if not showTasks:
content = content + '\norg.eclipse.jdt.core.compiler.problem.tasks=ignore'
if disableApiRestrictions:
content = content + '\norg.eclipse.jdt.core.compiler.problem.forbiddenReference=ignore'
content = content + '\norg.eclipse.jdt.core.compiler.problem.discouragedReference=ignore'
if forceDeprecationAsWarning:
content = content.replace('org.eclipse.jdt.core.compiler.problem.deprecation=error', 'org.eclipse.jdt.core.compiler.problem.deprecation=warning')
if origContent != content:
jdtPropertiesTmp = jdtProperties + '.tmp'
with open(jdtPropertiesTmp, 'w') as fp:
fp.write(content)
tempFiles.append(jdtPropertiesTmp)
jdtArgs += ['-properties', _cygpathU2W(jdtPropertiesTmp)]
else:
jdtArgs += ['-properties', _cygpathU2W(jdtProperties)]
if jnigenDir:
abort('Cannot use the "jniHeaders" flag with ECJ in project {}. Force javac to generate JNI headers.'.format(project.name), context=project)
return jdtArgs
def compile(self, jdtArgs):
run_java(['-jar', self.jdtJar] + jdtArgs, jdk=self.jdk)
class ECJDaemonCompiler(ECJCompiler):
def __init__(self, jdk, jdtJar, extraJavacArgs=None):
ECJCompiler.__init__(self, jdk, jdtJar, extraJavacArgs)
def name(self):
return 'ecj-daemon(JDK {})'.format(self.jdk.javaCompliance)
def compile(self, jdtArgs):
self.daemon.compile(jdtArgs)
def prepare_daemon(self, daemons, compileArgs):
jvmArgs = self.jdk.java_args
key = 'ecj-daemon:' + self.jdk.java + ' '.join(jvmArgs)
self.daemon = daemons.get(key)
if not self.daemon:
self.daemon = ECJDaemon(self.jdk, jvmArgs, self.jdtJar)
daemons[key] = self.daemon
class ECJDaemon(CompilerDaemon):
def __init__(self, jdk, jvmArgs, jdtJar):
CompilerDaemon.__init__(self, jdk, jvmArgs, 'com.oracle.mxtool.compilerserver.ECJDaemon', jdtJar)
def name(self):
return 'ecj-daemon-server(JDK {})'.format(self.jdk.javaCompliance)
### ~~~~~~~~~~~~~ Project
class AbstractNativeProject(Project):
def __init__(self, suite, name, subDir, srcDirs, deps, workingSets, d, theLicense=None, **kwargs):
context = 'project ' + name
self.buildDependencies = Suite._pop_list(kwargs, 'buildDependencies', context)
super(AbstractNativeProject, self).__init__(suite, name, subDir, srcDirs, deps, workingSets, d, theLicense,
**kwargs)
def isPlatformDependent(self):
return True
class NativeProject(AbstractNativeProject):
"""
A NativeProject is a Project containing native code. It is built using `make`. The `MX_CLASSPATH` variable will be set
to a classpath containing all JavaProject dependencies.
Additional attributes:
results: a list of result file names that will be packaged if the project is part of a distribution
headers: a list of source file names (typically header files) that will be packaged if the project is part of a distribution
output: the directory where the Makefile puts the `results`
vpath: if `True`, make will be executed from the output root, with the `VPATH` environment variable set to the source directory
if `False` or undefined, make will be executed from the source directory
buildEnv: a dictionary of custom environment variables that are passed to the `make` process
"""
def __init__(self, suite, name, subDir, srcDirs, deps, workingSets, results, output, d, theLicense=None, testProject=False, vpath=False, **kwArgs):
super(NativeProject, self).__init__(suite, name, subDir, srcDirs, deps, workingSets, d, theLicense, testProject=testProject, **kwArgs)
self.results = results
self.output = output
self.vpath = vpath
def getBuildTask(self, args):
return NativeBuildTask(args, self)
def getOutput(self, replaceVar=mx_subst.results_substitutions):
if self.output:
return mx_subst.as_engine(replaceVar).substitute(self.output, dependency=self)
if self.vpath:
return self.get_output_root()
return None
def getResults(self, replaceVar=mx_subst.results_substitutions):
results = []
output = self.getOutput(replaceVar=replaceVar)
for rt in self.results:
r = mx_subst.as_engine(replaceVar).substitute(rt, dependency=self)
results.append(join(self.suite.dir, output, r))
return results
def getBuildEnv(self, replaceVar=mx_subst.path_substitutions):
ret = {}
if hasattr(self, 'buildEnv'):
for key, value in self.buildEnv.items():
ret[key] = replaceVar.substitute(value, dependency=self)
return ret
def getArchivableResults(self, use_relpath=True, single=False):
if single:
raise ValueError("single not supported")
output = self.getOutput()
output = join(self.suite.dir, output) if output else None
for r in self.getResults():
if output and use_relpath:
filename = os.path.relpath(r, output)
else:
filename = basename(r)
# Make debug-info files optional for distribution
if is_debug_lib_file(r) and not os.path.exists(r):
warn("File {} for archive {} does not exist.".format(filename, self.name))
else:
yield r, filename
if hasattr(self, "headers"):
srcdir = os.path.join(self.suite.dir, self.dir)
for h in self.headers:
if use_relpath:
filename = h
else:
filename = basename(h)
yield os.path.join(srcdir, h), filename
### ~~~~~~~~~~~~~ Build Tasks
class AbstractNativeBuildTask(ProjectBuildTask):
default_parallelism = 8
def __init__(self, args, project):
# Cap jobs to maximum of 8 by default. If a project wants more parallelism, it can explicitly set the
# "max_jobs" attribute. Setting jobs=cpu_count() would not allow any other tasks in parallel, now matter
# how much parallelism the build machine supports.
jobs = min(int(getattr(project, 'max_jobs', self.default_parallelism)), cpu_count())
super(AbstractNativeBuildTask, self).__init__(args, jobs, project)
def buildForbidden(self):
if not self.args.native:
return True
return super(AbstractNativeBuildTask, self).buildForbidden()
def cleanForbidden(self):
if not self.args.native:
return True
return super(AbstractNativeBuildTask, self).cleanForbidden()
def needsBuild(self, newestInput):
is_needed, reason = super(AbstractNativeBuildTask, self).needsBuild(newestInput)
if is_needed:
return True, reason
output = self.newestOutput() # pylint: disable=assignment-from-no-return
if output is None:
return True, None
return False, reason
class NativeBuildTask(AbstractNativeBuildTask):
def __init__(self, args, project):
super(NativeBuildTask, self).__init__(args, project)
if hasattr(project, 'single_job') or not project.suite.getMxCompatibility().useJobsForMakeByDefault():
self.parallelism = 1
elif (is_darwin() and is_continuous_integration()) and not _opts.cpu_count:
# work around darwin bug where make randomly fails in our CI (GR-6892) if compilation is too parallel
self.parallelism = 1
self._newestOutput = None
def __str__(self):
return 'Building {} with GNU Make'.format(self.subject.name)
def _build_run_args(self):
env = os.environ.copy()
all_deps = self.subject.canonical_deps()
if hasattr(self.subject, 'buildDependencies'):
all_deps = set(all_deps) | set(self.subject.buildDependencies)
javaDeps = [d for d in all_deps if isinstance(d, JavaProject)]
if len(javaDeps) > 0:
env['MX_CLASSPATH'] = classpath(javaDeps)
cmdline = mx_compdb.gmake_with_compdb_cmd(context=self.subject)
if _opts.verbose:
# The Makefiles should have logic to disable the @ sign
# so that all executed commands are visible.
cmdline += ["MX_VERBOSE=y"]
if hasattr(self.subject, "vpath") and self.subject.vpath:
env['VPATH'] = self.subject.dir
cwd = join(self.subject.suite.dir, self.subject.getOutput())
ensure_dir_exists(cwd)
cmdline += ['-f', join(self.subject.dir, 'Makefile')]
else:
cwd = self.subject.dir
if hasattr(self.subject, "makeTarget"):
cmdline += [self.subject.makeTarget]
if hasattr(self.subject, "getBuildEnv"):
env.update(self.subject.getBuildEnv())
if self.parallelism > 1:
cmdline += ['-j', str(self.parallelism)]
return cmdline, cwd, env
def build(self):
cmdline, cwd, env = self._build_run_args()
run(cmdline, cwd=cwd, env=env)
mx_compdb.merge_compdb(subject=self.subject, path=cwd)
self._newestOutput = None
def needsBuild(self, newestInput):
logv('Checking whether to build {} with GNU Make'.format(self.subject.name))
cmdline, cwd, env = self._build_run_args()
cmdline += ['-q']
if _opts.verbose:
# default out/err stream
ret_code = run(cmdline, cwd=cwd, env=env, nonZeroIsFatal=False)
else:
with open(os.devnull, 'w') as fnull:
# suppress out/err (redirect to null device)
ret_code = run(cmdline, cwd=cwd, env=env, nonZeroIsFatal=False, out=fnull, err=fnull)
if ret_code != 0:
return (True, "rebuild needed by GNU Make")
return (False, "up to date according to GNU Make")
def newestOutput(self):
if self._newestOutput is None:
results = self.subject.getResults()
self._newestOutput = None
for r in results:
ts = TimeStampFile(r, followSymlinks='newest')
if ts.exists():
if not self._newestOutput or ts.isNewerThan(self._newestOutput):
self._newestOutput = ts
else:
self._newestOutput = ts
break
return self._newestOutput
def clean(self, forBuild=False):
if not forBuild: # assume make can do incremental builds
if hasattr(self.subject, "vpath") and self.subject.vpath:
output = self.subject.getOutput()
if os.path.exists(output) and output != '.':
shutil.rmtree(output)
else:
env = os.environ.copy()
if hasattr(self.subject, "getBuildEnv"):
env.update(self.subject.getBuildEnv())
run([gmake_cmd(context=self.subject), 'clean'], cwd=self.subject.dir, env=env)
self._newestOutput = None
class Extractor(_with_metaclass(ABCMeta, object)):
def __init__(self, src): # pylint: disable=super-init-not-called
self.src = src
def extract(self, dst):
logv("Extracting {} to {}".format(self.src, dst))
with self._open() as ar:
logv("Sanity checking archive...")
if any((m for m in self._getnames(ar) if not self._is_sane_name(m, dst))):
abort("Refusing to create files outside of the destination folder.\n" +
"Reasons might be entries with absolute paths or paths pointing to the parent directory (starting with `..`).\n" +
"Archive: {} \nProblematic files:\n{}".format(self.src, "\n".join((m for m in self._getnames(ar) if not self._is_sane_name(m, dst)))
))
self._extractall(ar, dst)
@abstractmethod
def _open(self):
pass
@abstractmethod
def _getnames(self, ar):
pass
@abstractmethod
def _extractall(self, ar, dst):
pass
def _is_sane_name(self, m, dst):
return os.path.realpath(os.path.join(dst, m)).startswith(os.path.realpath(dst))
@staticmethod
def create(src):
if src.endswith(".tar") or src.endswith(".tar.gz") or src.endswith(".tgz"):
return TarExtractor(src)
if src.endswith(".zip"):
return ZipExtractor(src)
abort("Don't know how to extract the archive: " + src)
class TarExtractor(Extractor):
def _open(self):
return tarfile.open(self.src)
def _getnames(self, ar):
return ar.getnames()
def _extractall(self, ar, dst):
return ar.extractall(dst)
class ZipExtractor(Extractor):
def _open(self):
return zipfile.ZipFile(self.src)
def _getnames(self, ar):
return ar.namelist()
def _extractall(self, ar, dst):
return ar.extractall(dst)
### ~~~~~~~~~~~~~ Library
class BaseLibrary(Dependency):
"""
A library that has no structure understood by mx, typically a jar file.
It is used "as is".
"""
def __init__(self, suite, name, optional, theLicense, **kwArgs):
Dependency.__init__(self, suite, name, theLicense, **kwArgs)
self.optional = optional
def _walk_deps_visit_edges(self, visited, edge, preVisit=None, visit=None, ignoredEdges=None, visitEdge=None):
pass
def resolveDeps(self):
licenseId = self.theLicense # pylint: disable=access-member-before-definition
# do not use suite's default license
if licenseId:
self.theLicense = get_license(licenseId, context=self)
def substVars(self, text):
"""
Returns the result of calling `text.format(kwargs)` where kwargs is the instance variables of this object along with their values.
"""
return text.format(**vars(self))
def substVarsList(self, textList):
"""
Returns the list formed by calling `self.substVars` on each string in `textList`.
"""
return [self.substVars(text) for text in textList]
@abstractmethod
def is_available(self):
"""
Used to check whether an optional library is available.
:rtype: bool
"""
pass
class _RewritableLibraryMixin:
def _should_generate_cache_path(self):
return not self.path and not self.optional
def _optionally_generate_cache_pathAttr(self, ext):
if self._should_generate_cache_path():
if not self.urls:
self.abort('Library without "path" attribute must have a non-empty "urls" list attribute or "maven" attribute')
if not self.sha1:
self.abort('Library without "path" attribute must have a non-empty "sha1" attribute')
self.path = _get_path_in_cache(self.name, self.sha1, self.urls, ext, sources=False)
def _optionally_generate_cache_sourcePathAttr(self):
if not self.sourcePath and self.sourceUrls:
if not self.sourceSha1:
self.abort('Library without "sourcePath" attribute but with non-empty "sourceUrls" attribute must have a non-empty "sourceSha1" attribute')
self.sourcePath = _get_path_in_cache(self.name, self.sourceSha1, self.sourceUrls, self.sourceExt, sources=True)
def _normalize_path(self, path):
if path:
# Accept forward slashes regardless of platform.
path = path.replace('/', os.sep)
# Relative paths refer to suite directory.
path = _make_absolute(path, self.suite.dir)
return path
def _check_hash_specified(self, path, attribute):
if not hasattr(self, attribute):
if exists(path):
self.abort('Missing "{0}" property for library {1}, add the following to the definition of {1}:\n{0}={2}'.format(attribute, self.name, sha1OfFile(path)))
else:
self.abort('Missing "{0}" property for library {1}'.format(attribute, self.name))
class ResourceLibrary(BaseLibrary, _RewritableLibraryMixin):
"""
A library that is just a resource and therefore not a `ClasspathDependency`.
"""
def __init__(self, suite, name, path, optional, urls, sha1, ext=None, **kwArgs):
BaseLibrary.__init__(self, suite, name, optional, None, **kwArgs)
# Perform URL and SHA1 rewriting before potentially generating cache path.
self.urls, self.sha1 = mx_urlrewrites._rewrite_urls_and_sha1(self.substVarsList(urls), sha1)
# Path can be generated from URL and SHA1 if needed.
self.ext = ext
self.path = self._normalize_path(path)
self._optionally_generate_cache_pathAttr(self.ext)
# TODO Note from refactoring.
# Was set here but not clear if any code should expect ResourceLibrary to have sourcePath.
self.sourcePath = None
def get_urls(self):
return self.urls
def get_path(self, resolve):
sha1path = self.path + '.sha1'
return download_file_with_sha1(self.name, self.path, self.urls, self.sha1, sha1path, resolve, not self.optional, ext=self.ext, canSymlink=True)
def getArchivableResults(self, use_relpath=True, single=False):
path = realpath(self.get_path(False))
yield path, _map_to_maven_dist_name(self.name) + '.' + get_file_extension(path)
def getBuildTask(self, args):
return LibraryDownloadTask(args, self)
def is_available(self):
if not self.path:
return False
return exists(self.get_path(True))
def _check_download_needed(self):
sha1path = self.path + '.sha1'
return not _check_file_with_sha1(self.path, self.urls, self.sha1, sha1path)
def _comparison_key(self):
return (self.sha1, self.name)
class PackedResourceLibrary(ResourceLibrary):
"""
A ResourceLibrary that comes in an archive and should be extracted after downloading.
"""
def __init__(self, *args, **kwargs):
super(PackedResourceLibrary, self).__init__(*args, **kwargs)
# Specifying preExtractedPath beats all other extraction logic.
if hasattr(self, 'preExtractedPath'):
if self.path:
self.abort('At most one of the "preExtractedPath" or "path" attributes should be used on a packed resource library')
self.extract_path = self._normalize_path(self.preExtractedPath)
# Absent preExtractedPath we want self.path to point to the archive and self.extract_path to point to the extracted content.
else:
# If we do not have attributes to generate cache paths
# then we have to be optional and use explicit paths.
if not self.urls or not self.sha1:
if self.optional:
self.extract_path = self.path
self.path = None
else:
self.abort('Non-optional packed resource must have both "urls" and "sha1" attributes')
else:
candidate_archive_path = _get_path_in_cache(self.name, self.sha1, self.urls, self.ext, sources=False)
candidate_extract_path = _get_path_in_cache(self.name, self.sha1, self.urls, '.extracted', sources=False)
if self.path == candidate_archive_path:
# The path attribute was generated.
# Use that path to point to the archive content so that extraction can rely on archive extension.
self.extract_path = candidate_extract_path
else:
# The path attribute was provided explicitly.
# Use that path to point to the extracted content and use the generated path to point to the archive.
self.extract_path = self.path
self.path = candidate_archive_path
def _should_generate_cache_path(self):
return super(PackedResourceLibrary, self)._should_generate_cache_path() and not hasattr(self, 'preExtractedPath')
def _check_extract_needed(self, dst, src):
if not os.path.exists(dst):
logvv("Destination does not exist")
logvv("Destination: " + dst)
return True
if getmtime(src) > getmtime(dst):
logvv("Destination older than source")
logvv("Destination: " + dst)
logvv("Source: " + src)
return True
return False
def is_available(self):
if not self.extract_path:
return False
return exists(self.get_path(True))
def getBuildTask(self, args):
if self.path:
return LibraryDownloadTask(args, self)
else:
# pre-extracted
return NoOpTask(self, args)
def get_path(self, resolve):
extract_path = _make_absolute(self.extract_path, self.suite.dir)
if self.path:
download_path = super(PackedResourceLibrary, self).get_path(resolve)
if resolve and self._check_extract_needed(extract_path, download_path):
with SafeDirectoryUpdater(extract_path, create=True) as sdu:
Extractor.create(download_path).extract(sdu.directory)
return extract_path
def _check_download_needed(self):
if not self.path:
return False
need_download = super(PackedResourceLibrary, self)._check_download_needed()
extract_path = _make_absolute(self.extract_path, self.suite.dir)
download_path = _make_absolute(self.path, self.suite.dir)
return need_download or self._check_extract_needed(extract_path, download_path)
class JreLibrary(BaseLibrary, ClasspathDependency):
"""
A library jar provided by the Java Runtime Environment (JRE).
This mechanism exists primarily to be able to support code
that may use functionality in one JRE (e.g., Oracle JRE)
that is not present in another JRE (e.g., OpenJDK). A
motivating example is the Java Flight Recorder library
found in the Oracle JRE.
"""
def __init__(self, suite, name, jar, optional, theLicense, **kwArgs):
BaseLibrary.__init__(self, suite, name, optional, theLicense, **kwArgs)
ClasspathDependency.__init__(self, **kwArgs)
self.jar = jar
def _comparison_key(self):
return self.jar
def is_available(self):
# This can not be answered without a JRE as context, see is_provided_by
return True
def is_provided_by(self, jdk):
"""
Determines if this library is provided by `jdk`.
:param JDKConfig jdk: the JDK to test
:return: whether this library is available in `jdk`
"""
return jdk.hasJarOnClasspath(self.jar)
def getBuildTask(self, args):
return NoOpTask(self, args)
def classpath_repr(self, jdk, resolve=True):
"""
Gets the absolute path of this library in `jdk`. This method will abort if this library is
not provided by `jdk`.
:param JDKConfig jdk: the JDK to test
:return: whether this library is available in `jdk`
"""
if not jdk:
abort('A JDK is required to resolve ' + self.name + ' to a path')
path = jdk.hasJarOnClasspath(self.jar)
if not path:
abort(self.name + ' is not provided by ' + str(jdk))
return path
def isJar(self):
return True
class JdkLibrary(BaseLibrary, ClasspathDependency):
"""
A library that will be provided by the JDK but may be absent.
Any project or normal library that depends on an optional missing library
will be removed from the global project and library registry.
:param Suite suite: the suite defining this library
:param str name: the name of this library
:param path: path relative to a JDK home directory where the jar file for this library is located
:param deps: the dependencies of this library (which can only be other `JdkLibrary`s)
:param bool optional: a missing non-optional library will cause mx to abort when resolving a reference to this library
:param str theLicense: the license under which this library can be redistributed
:param sourcePath: a path where the sources for this library are located. A relative path is resolved against a JDK.
:param JavaCompliance jdkStandardizedSince: the JDK version in which the resources represented by this library are automatically
available at compile and runtime without augmenting the class path. If not provided, ``1.2`` is used.
:param module: If this JAR has become a module since JDK 9, the name of the module that contains the same classes as the JAR used to.
"""
def __init__(self, suite, name, path, deps, optional, theLicense, sourcePath=None, jdkStandardizedSince=None, module=None, **kwArgs):
BaseLibrary.__init__(self, suite, name, optional, theLicense, **kwArgs)
ClasspathDependency.__init__(self, **kwArgs)
self.path = path.replace('/', os.sep)
self.sourcePath = sourcePath.replace('/', os.sep) if sourcePath else None
self.deps = deps
self.jdkStandardizedSince = jdkStandardizedSince if jdkStandardizedSince else JavaCompliance(1.2)
self.module = module
def resolveDeps(self):
"""
Resolves symbolic dependency references to be Dependency objects.
"""
BaseLibrary.resolveDeps(self)
self._resolveDepsHelper(self.deps)
for d in self.deps:
if not d.isJdkLibrary():
abort('"dependencies" attribute of a JDK library can only contain other JDK libraries: ' + d.name, context=self)
def _comparison_key(self):
return self.path
def get_jdk_path(self, jdk, path):
# Exploded JDKs don't have a jre directory.
if exists(join(jdk.home, path)):
return join(jdk.home, path)
else:
return join(jdk.home, 'jre', path)
def is_available(self):
# This can not be answered without a JDK as context, see is_provided_by
return True
def is_provided_by(self, jdk):
"""
Determines if this library is provided by `jdk`.
:param JDKConfig jdk: the JDK to test
"""
return jdk.javaCompliance >= self.jdkStandardizedSince or exists(self.get_jdk_path(jdk, self.path))
def getBuildTask(self, args):
return NoOpTask(self, args)
def classpath_repr(self, jdk, resolve=True):
"""
Gets the absolute path of this library in `jdk` or None if this library is available
on the default class path of `jdk`. This method will abort if this library is
not provided by `jdk`.
:param JDKConfig jdk: the JDK from which to retrieve this library's jar file
:return: the absolute path of this library's jar file in `jdk`
"""
if not jdk:
abort('A JDK is required to resolve ' + self.name)
if jdk.javaCompliance >= self.jdkStandardizedSince:
return None
path = self.get_jdk_path(jdk, self.path)
if not exists(path):
abort(self.name + ' is not provided by ' + str(jdk))
return path
def get_source_path(self, jdk):
"""
Gets the path where the sources for this library are located.
:param JDKConfig jdk: the JDK against which a relative path is resolved
:return: the absolute path where the sources of this library are located
"""
if self.sourcePath is None:
return None
if isabs(self.sourcePath):
return self.sourcePath
path = self.get_jdk_path(jdk, self.sourcePath)
if not exists(path) and jdk.javaCompliance >= self.jdkStandardizedSince:
return self.get_jdk_path(jdk, 'lib/src.zip')
return path
def isJar(self):
return True
def _walk_deps_visit_edges(self, visited, in_edge, preVisit=None, visit=None, ignoredEdges=None, visitEdge=None):
deps = [(DEP_STANDARD, self.deps)]
self._walk_deps_visit_edges_helper(deps, visited, in_edge, preVisit, visit, ignoredEdges, visitEdge)
def get_declaring_module_name(self):
return getattr(self, 'module')
class Library(BaseLibrary, ClasspathDependency, _RewritableLibraryMixin):
"""
A library that is provided (built) by some third-party and made available via a URL.
A Library may have dependencies on other Libraries as expressed by the "deps" field.
A Library can only depend on another Library, and not a Project or Distribution
Additional attributes are an SHA1 checksum, location of (assumed) matching sources.
A Library is effectively an "import" into the suite since, unlike a Project or Distribution
it is not built by the Suite.
N.B. Not obvious but a Library can be an annotationProcessor
"""
def __init__(self, suite, name, path, optional, urls, sha1, sourcePath, sourceUrls, sourceSha1, deps, theLicense, ignore=False, **kwArgs):
BaseLibrary.__init__(self, suite, name, optional, theLicense, **kwArgs)
ClasspathDependency.__init__(self, **kwArgs)
# Perform URL and SHA1 rewriting before potentially generating cache path.
self.urls, self.sha1 = mx_urlrewrites._rewrite_urls_and_sha1(self.substVarsList(urls), sha1)
self.sourceUrls, self.sourceSha1 = mx_urlrewrites._rewrite_urls_and_sha1(self.substVarsList(sourceUrls), sourceSha1)
# Path and sourcePath can be generated from URL and SHA1 if needed.
self.path = self._normalize_path(path)
self.sourcePath = self._normalize_path(sourcePath)
if self.path == self.sourcePath and not self.sourceSha1:
self.sourceSha1 = self.sha1
self._optionally_generate_cache_pathAttr(None)
self._optionally_generate_cache_sourcePathAttr()
self.deps = deps
self.ignore = ignore
if not optional and not ignore:
if not exists(self.path) and not self.urls:
self.abort('Non-optional library {0} must either exist at {1} or specify URL list from which it can be retrieved'.format(self.name, self.path))
self._check_hash_specified(self.path, 'sha1')
if self.sourcePath:
self._check_hash_specified(self.sourcePath, 'sourceSha1')
for url in self.urls:
if url.endswith('/') != self.path.endswith(os.sep):
self.abort('Path for dependency directory must have a URL ending with "/":\npath={0}\nurl={1}'.format(self.path, url))
def resolveDeps(self):
"""
Resolves symbolic dependency references to be Dependency objects.
"""
BaseLibrary.resolveDeps(self)
self._resolveDepsHelper(self.deps)
def _walk_deps_visit_edges(self, visited, in_edge, preVisit=None, visit=None, ignoredEdges=None, visitEdge=None):
deps = [(DEP_STANDARD, self.deps)]
self._walk_deps_visit_edges_helper(deps, visited, in_edge, preVisit, visit, ignoredEdges, visitEdge)
def _comparison_key(self):
return (self.sha1, self.name)
def get_urls(self):
return self.urls
def is_available(self):
if not self.path:
return False
return exists(self.get_path(True))
def get_path(self, resolve):
sha1path = self.path + '.sha1'
bootClassPathAgent = hasattr(self, 'bootClassPathAgent') and getattr(self, 'bootClassPathAgent').lower() == 'true'
return download_file_with_sha1(self.name, self.path, self.urls, self.sha1, sha1path, resolve, not self.optional, canSymlink=not bootClassPathAgent)
def _check_download_needed(self):
sha1Path = self.path + '.sha1'
if not _check_file_with_sha1(self.path, self.urls, self.sha1, sha1Path):
return True
if self.sourcePath:
sourceSha1Path = self.sourcePath + '.sha1'
if not _check_file_with_sha1(self.sourcePath, self.sourceUrls, self.sourceSha1, sourceSha1Path):
return True
return False
def get_source_path(self, resolve):
if self.sourcePath is None:
return None
sourceSha1Path = self.sourcePath + '.sha1'
return download_file_with_sha1(self.name, self.sourcePath, self.sourceUrls, self.sourceSha1, sourceSha1Path, resolve, len(self.sourceUrls) != 0, sources=True)
def classpath_repr(self, resolve=True):
path = self.get_path(resolve)
if path and (exists(path) or not resolve):
return path
return None
def getBuildTask(self, args):
return LibraryDownloadTask(args, self)
def getArchivableResults(self, use_relpath=True, single=False):
path = realpath(self.get_path(False))
yield path, _map_to_maven_dist_name(self.name) + '.' + get_file_extension(path)
if not single:
src_path = self.get_source_path(False)
if src_path:
src_path = realpath(src_path)
ext = get_file_extension(src_path)
if 'src' not in ext and 'source' not in ext:
ext = "src." + ext
src_filename = _map_to_maven_dist_name(self.name) + '.' + ext
yield src_path, src_filename
def defined_java_packages(self):
if not hasattr(self, '_defined_java_packages'):
self._defined_java_packages = set()
with zipfile.ZipFile(self.get_path(True), 'r') as zf:
for zi in zf.infolist():
if zi.filename.endswith('.class'):
self._defined_java_packages.add(posixpath.dirname(zi.filename).replace('/', '.'))
return self._defined_java_packages
class LibraryDownloadTask(BuildTask):
def __init__(self, args, lib):
BuildTask.__init__(self, lib, args, 1) # TODO use all CPUs to avoid output problems?
def __str__(self):
return "Downloading {}".format(self.subject.name)
def logBuild(self, reason=None):
pass
def logSkip(self, reason=None):
pass
def needsBuild(self, newestInput):
sup = BuildTask.needsBuild(self, newestInput)
if sup[0]:
return sup
return (self.subject._check_download_needed(), None)
def newestOutput(self):
return TimeStampFile(_make_absolute(self.subject.path, self.subject.suite.dir))
def build(self):
self.subject.get_path(resolve=True)
if hasattr(self.subject, 'get_source_path'):
self.subject.get_source_path(resolve=True)
def clean(self, forBuild=False):
abort('should not reach here')
def cleanForbidden(self):
return True
### ~~~~~~~~~~~~~ Version control
"""
Abstracts the operations of the version control systems
Most operations take a vcdir as the dir in which to execute the operation
Most operations abort on error unless abortOnError=False, and return True
or False for success/failure.
Potentially long running operations should log the command. If '-v' is set
'run' will log the actual VC command. If '-V' is set the output from
the command should be logged.
"""
class VC(_with_metaclass(ABCMeta, object)):
"""
base class for all supported Distributed Version Control abstractions
:ivar str kind: the VC type identifier
:ivar str proper_name: the long name descriptor of the VCS
"""
def __init__(self, kind, proper_name): # pylint: disable=super-init-not-called
self.kind = kind
self.proper_name = proper_name
@staticmethod
def is_valid_kind(kind):
"""
tests if the given VCS kind is valid or not
:param str kind: the VCS kind
:return: True if a valid VCS kind
:rtype: bool
"""
for vcs in _vc_systems:
if kind == vcs.kind:
return True
return False
@staticmethod
def get_vc(vcdir, abortOnError=True):
"""
Given that `vcdir` is a repository directory, attempt to determine
what kind of VCS is it managed by. Return None if it cannot be determined.
:param str vcdir: a valid path to a version controlled directory
:param bool abortOnError: if an error occurs, abort mx operations
:return: an instance of VC or None if it cannot be determined
:rtype: :class:`VC`
"""
for vcs in _vc_systems:
vcs.check()
if vcs.is_this_vc(vcdir):
return vcs
if abortOnError:
abort('cannot determine VC for ' + vcdir)
else:
return None
@staticmethod
def get_vc_root(directory, abortOnError=True):
"""
Attempt to determine what kind of VCS is associated with `directory`.
Return the VC and its root directory or (None, None) if it cannot be determined.
If `directory` is contained in multiple VCS, the one with the deepest nesting is returned.
:param str directory: a valid path to a potentially version controlled directory
:param bool abortOnError: if an error occurs, abort mx operations
:return: a tuple containing an instance of VC or None if it cannot be
determined followed by the root of the repository or None.
:rtype: :class:`VC`, str
"""
best_root = None
best_vc = None
for vcs in _vc_systems:
vcs.check()
root = vcs.root(directory, abortOnError=False)
if root is None:
continue
root = realpath(os.path.abspath(root))
if best_root is None or len(root) > len(best_root): # prefer more nested vcs roots
best_root = root
best_vc = vcs
if abortOnError and best_root is None:
abort('cannot determine VC and root for ' + directory)
return best_vc, best_root
def check(self, abortOnError=True):
"""
Lazily check whether a particular VC system is available.
Return None if fails and abortOnError=False
"""
abort("VC.check is not implemented")
def init(self, vcdir, abortOnError=True):
"""
Initialize 'vcdir' for vc control
"""
abort(self.kind + " init is not implemented")
def is_this_vc(self, vcdir):
"""
Check whether vcdir is managed by this vc.
Return None if not, True if so
"""
abort(self.kind + " is_this_vc is not implemented")
def metadir(self):
"""
Return name of metadata directory
"""
abort(self.kind + " metadir is not implemented")
def add(self, vcdir, path, abortOnError=True):
"""
Add path to repo
"""
abort(self.kind + " add is not implemented")
def commit(self, vcdir, msg, abortOnError=True):
"""
commit with msg
"""
abort(self.kind + " commit is not implemented")
def tip(self, vcdir, abortOnError=True):
"""
Get the most recent changeset for repo at `vcdir`.
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on error
:return: most recent changeset for specified repository,
None if failure and `abortOnError` is False
:rtype: str
"""
abort(self.kind + " tip is not implemented")
def parent(self, vcdir, abortOnError=True):
"""
Get the parent changeset of the working directory for repo at `vcdir`.
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on error
:return: most recent changeset for specified repository,
None if failure and `abortOnError` is False
:rtype: str
"""
abort(self.kind + " id is not implemented")
def parent_info(self, vcdir, abortOnError=True):
"""
Get the dict with common commit information.
The following fields are provided in the dict:
- author: name <e-mail> (best-effort, might only contain a name)
- author-ts: unix timestamp (int)
- committer: name <e-mail> (best-effort, might only contain a name)
- committer-ts: unix timestamp (int)
- description: Commit description
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on error
:return: dictionary with information key-value pairs
:rtype: dict
"""
abort(self.kind + " parent_info is not implemented")
def _sanitize_parent_info(self, info):
"""Utility method to sanitize the parent_info dictionary.
Converts integer fields to actual ints, and strips.
"""
def strip(field):
info[field] = info[field].strip()
def to_int(field):
info[field] = int(info[field].strip())
to_int("author-ts")
to_int("committer-ts")
strip("author")
strip("committer")
return info
def active_branch(self, vcdir, abortOnError=True):
"""
Returns the active branch of the repository
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on error
:return: name of the branch
:rtype: str
"""
abort(self.kind + " active_branch is not implemented")
def update_to_branch(self, vcdir, branch, abortOnError=True):
"""
Update to a branch a make it active.
:param str vcdir: a valid repository path
:param str branch: a branch name
:param bool abortOnError: if True abort on error
:return: True if update performed, False otherwise
"""
abort(self.kind + " update_to_branch is not implemented")
def is_release_from_tags(self, vcdir, prefix):
"""
Returns True if the release version derived from VC tags matches the pattern <number>(.<number>)*.
:param str vcdir: a valid repository path
:param str prefix: the prefix
:return: True if release
:rtype: bool
"""
_release_version = self.release_version_from_tags(vcdir=vcdir, prefix=prefix) #pylint: disable=assignment-from-no-return
return _release_version and re.match(r'^[0-9]+[0-9.]+$', _release_version)
def release_version_from_tags(self, vcdir, prefix, snapshotSuffix='dev', abortOnError=True):
"""
Returns a release version derived from VC tags that match the pattern <prefix>-<number>(.<number>)*
or None if no such tags exist.
:param str vcdir: a valid repository path
:param str prefix: the prefix
:param str snapshotSuffix: the snapshot suffix
:param bool abortOnError: if True abort on mx error
:return: a release version
:rtype: str
"""
abort(self.kind + " release_version_from_tags is not implemented")
def parent_tags(self, vcdir):
"""
Returns the tags of the parent revision.
:param str vcdir: a valid repository path
:rtype: list of str
"""
abort(self.kind + " parent_tags is not implemented")
@staticmethod
def _version_string_helper(current_revision, tag_revision, tag_version, snapshotSuffix):
def version_str(version_list):
return '.'.join((str(a) for a in version_list))
if current_revision == tag_revision:
return version_str(tag_version)
else:
next_version = list(tag_version)
next_version[-1] += 1
return version_str(next_version) + '-' + snapshotSuffix
@staticmethod
def _find_metadata_dir(start, name):
d = start
while len(d) != 0 and os.path.splitdrive(d)[1] != os.sep:
subdir = join(d, name)
if exists(subdir):
return subdir
d = dirname(d)
return None
def clone(self, url, dest=None, rev=None, abortOnError=True, **extra_args):
"""
Clone the repo at `url` to `dest` using `rev`
:param str url: the repository url
:param str dest: the path to destination, if None the destination is
chosen by the vcs
:param str rev: the desired revision, if None use tip
:param dict extra_args: for subclass-specific information in/out
:return: True if the operation is successful, False otherwise
:rtype: bool
"""
abort(self.kind + " clone is not implemented")
def _log_clone(self, url, dest=None, rev=None):
msg = 'Cloning ' + url
if rev:
msg += ' revision ' + rev
if dest:
msg += ' to ' + dest
msg += ' with ' + self.proper_name
log(msg)
def pull(self, vcdir, rev=None, update=False, abortOnError=True):
"""
Pull a given changeset (the head if `rev` is None), optionally updating
the working directory. Updating is only done if something was pulled.
If there were no new changesets or `rev` was already known locally,
no update is performed.
:param str vcdir: a valid repository path
:param str rev: the desired revision, if None use tip
:param bool abortOnError: if True abort on mx error
:return: True if the operation is successful, False otherwise
:rtype: bool
"""
abort(self.kind + " pull is not implemented")
def _log_pull(self, vcdir, rev):
msg = 'Pulling'
if rev:
msg += ' revision ' + rev
else:
msg += ' head updates'
msg += ' in ' + vcdir
msg += ' with ' + self.proper_name
log(msg)
def can_push(self, vcdir, strict=True):
"""
Check if `vcdir` can be pushed.
:param str vcdir: a valid repository path
:param bool strict: if set no uncommitted changes or unadded are allowed
:return: True if we can push, False otherwise
:rtype: bool
"""
def default_push(self, vcdir, abortOnError=True):
"""
get the default push target for this repo
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: default push target for repo
:rtype: str
"""
abort(self.kind + " default_push is not implemented")
def default_pull(self, vcdir, abortOnError=True):
"""
get the default pull target for this repo
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: default pull target for repo
:rtype: str
"""
abort(self.kind + " default_pull is not implemented")
def incoming(self, vcdir, abortOnError=True):
"""
list incoming changesets
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: most recent changeset for specified repository,
None if failure and `abortOnError` is False
:rtype: str
"""
abort(self.kind + ": outgoing is not implemented")
def outgoing(self, vcdir, dest=None, abortOnError=True):
"""
llist outgoing changesets to 'dest' or default-push if None
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: most recent changeset for specified repository,
None if failure and `abortOnError` is False
:rtype: str
"""
abort(self.kind + ": outgoing is not implemented")
def push(self, vcdir, dest=None, rev=None, abortOnError=False):
"""
Push `vcdir` at rev `rev` to default if `dest`
is None, else push to `dest`.
:param str vcdir: a valid repository path
:param str rev: the desired revision
:param str dest: the path to destination
:param bool abortOnError: if True abort on mx error
:return: True on success, False otherwise
:rtype: bool
"""
abort(self.kind + ": push is not implemented")
def _log_push(self, vcdir, dest, rev):
msg = 'Pushing changes'
if rev:
msg += ' revision ' + rev
msg += ' from ' + vcdir
if dest:
msg += ' to ' + dest
else:
msg += ' to default'
msg += ' with ' + self.proper_name
log(msg)
def update(self, vcdir, rev=None, mayPull=False, clean=False, abortOnError=False):
"""
update the `vcdir` working directory.
If `rev` is not specified, update to the tip of the current branch.
If `rev` is specified, `mayPull` controls whether a pull will be attempted if
`rev` can not be found locally.
If `clean` is True, uncommitted changes will be discarded (no backup!).
:param str vcdir: a valid repository path
:param str rev: the desired revision
:param bool mayPull: flag to controll whether to pull or not
:param bool clean: discard uncommitted changes without backing up
:param bool abortOnError: if True abort on mx error
:return: True on success, False otherwise
:rtype: bool
"""
abort(self.kind + " update is not implemented")
def isDirty(self, vcdir, abortOnError=True):
"""
check whether the working directory is dirty
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: True of the working directory is dirty, False otherwise
:rtype: bool
"""
abort(self.kind + " isDirty is not implemented")
def status(self, vcdir, abortOnError=True):
"""
report the status of the repository
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: True on success, False otherwise
:rtype: bool
"""
abort(self.kind + " status is not implemented")
def locate(self, vcdir, patterns=None, abortOnError=True):
"""
Return a list of paths under vc control that match `patterns`
:param str vcdir: a valid repository path
:param patterns: a list of patterns
:type patterns: str or None or list
:param bool abortOnError: if True abort on mx error
:return: a list of paths under vc control
:rtype: list
"""
abort(self.kind + " locate is not implemented")
def bookmark(self, vcdir, name, rev, abortOnError=True):
"""
Place a bookmark at a given revision
:param str vcdir: a valid repository path
:param str name: the name of the bookmark
:param str rev: the desired revision
:param bool abortOnError: if True abort on mx error
:return: True on success, False otherwise
:rtype: bool
"""
abort(self.kind + " bookmark is not implemented")
def latest(self, vcdir, rev1, rev2, abortOnError=True):
"""
Returns the latest of 2 revisions.
The revisions should be related in the DAG.
:param str vcdir: a valid repository path
:param str rev1: the first revision
:param str rev2: the second revision
:param bool abortOnError: if True abort on mx error
:return: the latest of the 2 revisions
:rtype: str or None
"""
abort(self.kind + " latest is not implemented")
def exists(self, vcdir, rev):
"""
Check if a given revision exists in the repository.
:param str vcdir: a valid repository path
:param str rev: the second revision
:return: True if revision exists, False otherwise
:rtype: bool
"""
abort(self.kind + " exists is not implemented")
def root(self, directory, abortOnError=True):
"""
Returns the path to the root of the repository that contains `dir`.
:param str dir: a path to a directory contained in a repository.
:param bool abortOnError: if True abort on mx error
:return: The path to the repository's root
:rtype: str or None
"""
abort(self.kind + " root is not implemented")
class OutputCapture:
def __init__(self):
self.data = ""
def __call__(self, data):
self.data += data
def __repr__(self):
return self.data
class LinesOutputCapture:
def __init__(self):
self.lines = []
def __call__(self, data):
self.lines.append(data.rstrip())
def __repr__(self):
return os.linesep.join(self.lines)
class TeeOutputCapture:
def __init__(self, underlying):
self.underlying = underlying
def __call__(self, data):
log(data.rstrip())
self.underlying(data)
def __repr__(self):
if isinstance(self.underlying, (OutputCapture, LinesOutputCapture)):
return repr(self.underlying)
return object.__repr__(self)
class HgConfig(VC):
has_hg = None
"""
Encapsulates access to Mercurial (hg)
"""
def __init__(self):
VC.__init__(self, 'hg', 'Mercurial')
self.missing = 'no hg executable found'
def check(self, abortOnError=True):
# Mercurial does lazy checking before use of the hg command itself
return self
def check_for_hg(self, abortOnError=True):
if HgConfig.has_hg is None:
try:
_check_output_str(['hg'])
HgConfig.has_hg = True
except OSError:
HgConfig.has_hg = False
if not HgConfig.has_hg:
if abortOnError:
abort(self.missing)
return self if HgConfig.has_hg else None
def run(self, *args, **kwargs):
# Ensure hg exists before executing the command
self.check_for_hg()
return run(*args, **kwargs)
def init(self, vcdir, abortOnError=True):
return self.run(['hg', 'init', vcdir], nonZeroIsFatal=abortOnError) == 0
def is_this_vc(self, vcdir):
hgdir = join(vcdir, self.metadir())
return os.path.isdir(hgdir)
def active_branch(self, vcdir, abortOnError=True):
out = OutputCapture()
cmd = ['hg', 'bookmarks']
rc = self.run(cmd, nonZeroIsFatal=False, cwd=vcdir, out=out)
if rc == 0:
for line in out.data.splitlines():
if line.strip().startswith(' * '):
return line[3:].split(" ")[0]
if abortOnError:
abort('no active hg bookmark found')
return None
def update_to_branch(self, vcdir, branch, abortOnError=True):
cmd = ['update', branch]
return self.hg_command(vcdir, cmd, abortOnError=abortOnError) == 0
def add(self, vcdir, path, abortOnError=True):
return self.run(['hg', '-q', '-R', vcdir, 'add', path]) == 0
def commit(self, vcdir, msg, abortOnError=True):
return self.run(['hg', '-R', vcdir, 'commit', '-m', msg]) == 0
def tip(self, vcdir, abortOnError=True):
self.check_for_hg()
# We don't use run because this can be called very early before _opts is set
try:
return _check_output_str(['hg', 'tip', '-R', vcdir, '--template', '{node}'])
except subprocess.CalledProcessError:
if abortOnError:
abort('hg tip failed')
else:
return None
def parent(self, vcdir, abortOnError=True):
self.check_for_hg()
# We don't use run because this can be called very early before _opts is set
try:
out = _check_output_str(['hg', '-R', vcdir, 'parents', '--template', '{node}\n'])
parents = out.rstrip('\n').split('\n')
if len(parents) != 1:
if abortOnError:
abort('hg parents returned {} parents (expected 1)'.format(len(parents)))
return None
return parents[0]
except subprocess.CalledProcessError:
if abortOnError:
abort('hg parents failed')
else:
return None
def parent_info(self, vcdir, abortOnError=True):
out = self.hg_command(vcdir, ["log", "-r", ".", "--template", "{author}|||{date|hgdate}"], abortOnError=abortOnError)
author, date = out.split("|||")
ts, _ = date.split(" ")
return self._sanitize_parent_info({
"author": author,
"author-ts": ts,
"committer": author,
"committer-ts": ts,
})
def release_version_from_tags(self, vcdir, prefix, snapshotSuffix='dev', abortOnError=True):
prefix = prefix + '-'
try:
tagged_ids_out = _check_output_str(['hg', '-R', vcdir, 'log', '--rev', 'ancestors(.) and tag()', '--template', '{tags},{rev}\n'])
tagged_ids = [x.split(',') for x in tagged_ids_out.split('\n') if x]
current_id = _check_output_str(['hg', '-R', vcdir, 'log', '--template', '{rev}\n', '--rev', '.']).strip()
except subprocess.CalledProcessError as e:
if abortOnError:
abort('hg tags or hg tip failed: ' + str(e))
else:
return None
if tagged_ids and current_id:
tag_re = re.compile(r"^{0}[0-9]+\.[0-9]+$".format(prefix))
tagged_ids = [(_first((tag for tag in tags.split(' ') if tag_re.match(tag))), revid) for tags, revid in tagged_ids]
tagged_ids = [(tag, revid) for tag, revid in tagged_ids if tag]
version_ids = [([int(x) for x in tag[len(prefix):].split('.')], revid) for tag, revid in tagged_ids]
version_ids = sorted(version_ids, key=lambda e: e[0], reverse=True)
most_recent_tag_version, most_recent_tag_id = version_ids[0]
return VC._version_string_helper(current_id, most_recent_tag_id, most_recent_tag_version, snapshotSuffix)
return None
def parent_tags(self, vcdir):
try:
_tags = _check_output_str(['hg', '-R', vcdir, 'log', '--template', '{tags}', '--rev', '.']).strip().split(' ')
return [tag for tag in _tags if tag != 'tip']
except subprocess.CalledProcessError as e:
abort('hg log failed: ' + str(e))
def metadir(self):
return '.hg'
def clone(self, url, dest=None, rev=None, abortOnError=True, **extra_args):
cmd = ['hg', 'clone']
if rev:
cmd.append('-r')
cmd.append(rev)
cmd.append(url)
if dest:
cmd.append(dest)
self._log_clone(url, dest, rev)
out = OutputCapture()
rc = self.run(cmd, nonZeroIsFatal=abortOnError, out=out)
logvv(out.data)
return rc == 0
def incoming(self, vcdir, abortOnError=True):
out = OutputCapture()
rc = self.run(['hg', '-R', vcdir, 'incoming'], nonZeroIsFatal=False, out=out)
if rc in (0, 1):
return out.data
else:
if abortOnError:
abort('incoming returned ' + str(rc))
return None
def outgoing(self, vcdir, dest=None, abortOnError=True):
out = OutputCapture()
cmd = ['hg', '-R', vcdir, 'outgoing']
if dest:
cmd.append(dest)
rc = self.run(cmd, nonZeroIsFatal=False, out=out)
if rc in (0, 1):
return out.data
else:
if abortOnError:
abort('outgoing returned ' + str(rc))
return None
def pull(self, vcdir, rev=None, update=False, abortOnError=True):
cmd = ['hg', 'pull', '-R', vcdir]
if rev:
cmd.append('-r')
cmd.append(rev)
if update:
cmd.append('-u')
self._log_pull(vcdir, rev)
out = OutputCapture()
rc = self.run(cmd, nonZeroIsFatal=abortOnError, out=out)
logvv(out.data)
return rc == 0
def can_push(self, vcdir, strict=True, abortOnError=True):
out = OutputCapture()
rc = self.run(['hg', '-R', vcdir, 'status'], nonZeroIsFatal=abortOnError, out=out)
if rc == 0:
output = out.data
if strict:
return output == ''
else:
if len(output) > 0:
for line in output.split('\n'):
if len(line) > 0 and not line.startswith('?'):
return False
return True
else:
return False
def _path(self, vcdir, name, abortOnError=True):
out = OutputCapture()
rc = self.run(['hg', '-R', vcdir, 'paths'], nonZeroIsFatal=abortOnError, out=out)
if rc == 0:
output = out.data
prefix = name + ' = '
for line in output.split(os.linesep):
if line.startswith(prefix):
return line[len(prefix):]
if abortOnError:
abort("no '{}' path for repository {}".format(name, vcdir))
return None
def default_push(self, vcdir, abortOnError=True):
push = self._path(vcdir, 'default-push', abortOnError=False)
if push:
return push
return self.default_pull(vcdir, abortOnError=abortOnError)
def default_pull(self, vcdir, abortOnError=True):
return self._path(vcdir, 'default', abortOnError=abortOnError)
def push(self, vcdir, dest=None, rev=None, abortOnError=False):
cmd = ['hg', '-R', vcdir, 'push']
if rev:
cmd.append('-r')
cmd.append(rev)
if dest:
cmd.append(dest)
self._log_push(vcdir, dest, rev)
out = OutputCapture()
rc = self.run(cmd, nonZeroIsFatal=abortOnError, out=out)
logvv(out.data)
return rc == 0
def update(self, vcdir, rev=None, mayPull=False, clean=False, abortOnError=False):
if rev and mayPull and not self.exists(vcdir, rev):
self.pull(vcdir, rev=rev, update=False, abortOnError=abortOnError)
cmd = ['hg', '-R', vcdir, 'update']
if rev:
cmd += ['-r', rev]
if clean:
cmd += ['-C']
return self.run(cmd, nonZeroIsFatal=abortOnError) == 0
def locate(self, vcdir, patterns=None, abortOnError=True):
if patterns is None:
patterns = []
elif not isinstance(patterns, list):
patterns = [patterns]
out = LinesOutputCapture()
rc = self.run(['hg', 'locate', '-R', vcdir] + patterns, out=out, nonZeroIsFatal=False)
if rc == 1:
# hg locate returns 1 if no matches were found
return []
elif rc == 0:
return out.lines
else:
if abortOnError:
abort('locate returned: ' + str(rc))
else:
return None
def isDirty(self, vcdir, abortOnError=True):
self.check_for_hg()
try:
return len(_check_output_str(['hg', 'status', '-q', '-R', vcdir])) > 0
except subprocess.CalledProcessError:
if abortOnError:
abort('failed to get status')
else:
return None
def status(self, vcdir, abortOnError=True):
cmd = ['hg', '-R', vcdir, 'status']
return self.run(cmd, nonZeroIsFatal=abortOnError) == 0
def bookmark(self, vcdir, name, rev, abortOnError=True):
ret = run(['hg', '-R', vcdir, 'bookmark', '-r', rev, '-i', '-f', name], nonZeroIsFatal=False)
if ret != 0:
logging = abort if abortOnError else warn
logging("Failed to create bookmark {0} at revision {1} in {2}".format(name, rev, vcdir))
def latest(self, vcdir, rev1, rev2, abortOnError=True):
#hg log -r 'heads(ancestors(26030a079b91) and ancestors(6245feb71195))' --template '{node}\n'
self.check_for_hg()
try:
revs = [rev1, rev2]
revsetIntersectAncestors = ' or '.join(('ancestors({})'.format(rev) for rev in revs))
revset = 'heads({})'.format(revsetIntersectAncestors)
out = _check_output_str(['hg', '-R', vcdir, 'log', '-r', revset, '--template', '{node}\n'])
parents = out.rstrip('\n').split('\n')
if len(parents) != 1:
if abortOnError:
abort('hg log returned {} possible latest (expected 1)'.format(len(parents)))
return None
return parents[0]
except subprocess.CalledProcessError:
if abortOnError:
abort('latest failed')
else:
return None
def exists(self, vcdir, rev):
self.check_for_hg()
try:
sentinel = 'exists'
out = _check_output_str(['hg', '-R', vcdir, 'log', '-r', 'present({})'.format(rev), '--template', sentinel])
return sentinel in out
except subprocess.CalledProcessError:
abort('exists failed')
def root(self, directory, abortOnError=True):
if VC._find_metadata_dir(directory, '.hg'):
if self.check_for_hg(abortOnError=True):
try:
out = _check_output_str(['hg', 'root'], cwd=directory, stderr=subprocess.STDOUT)
return out.strip()
except subprocess.CalledProcessError:
if abortOnError:
abort('`hg root` failed')
elif abortOnError:
abort('No .hg directory')
return None
class GitConfig(VC):
has_git = None
"""
Encapsulates access to Git (git)
"""
def __init__(self):
VC.__init__(self, 'git', 'Git')
self.missing = 'No Git executable found. You must install Git in order to proceed!'
self.object_cache_mode = get_env('MX_GIT_CACHE') or None
if self.object_cache_mode not in [None, 'reference', 'dissociated', 'refcache']:
abort("MX_GIT_CACHE was '{}' expected '', 'reference', 'dissociated' or 'refcache'")
def check(self, abortOnError=True):
return self
def check_for_git(self, abortOnError=True):
if GitConfig.has_git is None:
try:
_check_output_str(['git', '--version'])
GitConfig.has_git = True
except OSError:
GitConfig.has_git = False
if not GitConfig.has_git:
if abortOnError:
abort(self.missing)
return self if GitConfig.has_git else None
def run(self, *args, **kwargs):
# Ensure git exists before executing the command
self.check_for_git()
return run(*args, **kwargs)
def init(self, vcdir, abortOnError=True, bare=False):
cmd = ['git', 'init']
if bare:
cmd.append('--bare')
cmd.append(vcdir)
return self.run(cmd, nonZeroIsFatal=abortOnError) == 0
def is_this_vc(self, vcdir):
gitdir = join(vcdir, self.metadir())
# check for existence to also cover git submodules
return os.path.exists(gitdir)
def git_command(self, vcdir, args, abortOnError=False, quiet=True):
args = ['git', '--no-pager'] + args
if not quiet:
print(' '.join(map(pipes.quote, args)))
out = OutputCapture()
err = OutputCapture()
rc = self.run(args, cwd=vcdir, nonZeroIsFatal=False, out=out, err=err)
if rc in (0, 1):
return out.data
else:
if abortOnError:
abort("Running '{}' in '{}' returned '{}'.\nStdout:\n{}Stderr:\n{}".format(' '.join(map(pipes.quote, args)), vcdir, rc, out.data, err.data))
return None
def add(self, vcdir, path, abortOnError=True):
# git add does not support quiet mode, so we capture the output instead ...
out = OutputCapture()
return self.run(['git', 'add', path], cwd=vcdir, out=out) == 0
def commit(self, vcdir, msg, abortOnError=True):
return self.run(['git', 'commit', '-a', '-m', msg], cwd=vcdir) == 0
def tip(self, vcdir, abortOnError=True):
"""
Get the most recent changeset for repo at `vcdir`.
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: most recent changeset for specified repository,
None if failure and `abortOnError` is False
:rtype: str
"""
self.check_for_git()
# We don't use run because this can be called very early before _opts is set
try:
return _check_output_str(['git', 'rev-list', 'HEAD', '-1'], cwd=vcdir)
except subprocess.CalledProcessError:
if abortOnError:
abort('git rev-list HEAD failed')
else:
return None
def parent(self, vcdir, abortOnError=True):
"""
Get the parent changeset of the working directory for repo at `vcdir`.
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: most recent changeset for specified repository,
None if failure and `abortOnError` is False
:rtype: str
"""
self.check_for_git()
# We don't use run because this can be called very early before _opts is set
if exists(join(vcdir, self.metadir(), 'MERGE_HEAD')):
if abortOnError:
abort('More than one parent exist during merge')
return None
try:
out = _check_output_str(['git', 'show', '--pretty=format:%H', "-s", 'HEAD'], cwd=vcdir)
return out.strip()
except subprocess.CalledProcessError:
if abortOnError:
abort('git show failed')
else:
return None
def parent_info(self, vcdir, abortOnError=True):
out = self.git_command(vcdir, ["show", "-s", "--format=%an <%ae>|||%at|||%cn <%ce>|||%ct|||%s", "HEAD"], abortOnError=abortOnError)
author, author_ts, committer, committer_ts, description = out.split("|||")
return self._sanitize_parent_info({
"author": author,
"author-ts": author_ts,
"committer": committer,
"committer-ts": committer_ts,
"description": description,
})
def _tags(self, vcdir, prefix, abortOnError=True):
"""
Get the list of tags starting with `prefix` in the repository at `vcdir` that are ancestors
of the current HEAD.
:param str vcdir: a valid repository path
:param str prefix: the prefix used to filter the tags
:param bool abortOnError: if True abort on mx error
:rtype: list of str
"""
_tags_prefix = 'tag: '
try:
tags_out = _check_output_str(['git', 'log', '--simplify-by-decoration', '--pretty=format:%d', 'HEAD'], cwd=vcdir)
tags_out = tags_out.strip()
tags = []
for line in tags_out.split('\n'):
line = line.strip()
if not line:
continue
assert line.startswith('(') and line.endswith(')'), "Unexpected format: " + line
search = _tags_prefix + prefix
for decoration in line[1:-1].split(', '):
if decoration.startswith(search):
tags.append(decoration[len(_tags_prefix):])
return tags
except subprocess.CalledProcessError as e:
if abortOnError:
abort('git tag failed: ' + str(e))
else:
return None
def _commitish_revision(self, vcdir, commitish, abortOnError=True):
"""
Get the commit hash for a commit-ish specifier.
:param str vcdir: a valid repository path
:param str commitish: a commit-ish specifier
:param bool abortOnError: if True abort on mx error
:rtype: str
"""
try:
if not commitish.endswith('^{commit}'):
commitish += '^{commit}'
rev = _check_output_str(['git', 'show', '-s', '--format=%H', commitish], cwd=vcdir)
res = rev.strip()
assert re.match(r'[0-9a-f]{40}', res) is not None, 'output is not a commit hash: ' + res
return res
except subprocess.CalledProcessError as e:
if abortOnError:
abort('git show failed: ' + str(e))
else:
return None
def _latest_revision(self, vcdir, abortOnError=True):
return self._commitish_revision(vcdir, 'HEAD', abortOnError=abortOnError)
def release_version_from_tags(self, vcdir, prefix, snapshotSuffix='dev', abortOnError=True):
"""
Returns a release version derived from VC tags that match the pattern <prefix>-<number>(.<number>)*
or None if no such tags exist.
:param str vcdir: a valid repository path
:param str prefix: the prefix
:param str snapshotSuffix: the snapshot suffix
:param bool abortOnError: if True abort on mx error
:return: a release version
:rtype: str
"""
tag_prefix = prefix + '-'
v_re = re.compile("^" + re.escape(tag_prefix) + r"\d+(?:\.\d+)*$")
matching_tags = [t for t in self._tags(vcdir, tag_prefix, abortOnError=abortOnError) if v_re.match(t)]
if matching_tags:
latest_rev = self._latest_revision(vcdir, abortOnError=abortOnError)
if latest_rev:
matching_versions = [[int(x) for x in tag[len(tag_prefix):].split('.')] for tag in matching_tags]
matching_versions = sorted(matching_versions, reverse=True)
most_recent_version = matching_versions[0]
most_recent_tag = tag_prefix + '.'.join((str(x) for x in most_recent_version))
most_recent_tag_revision = self._commitish_revision(vcdir, most_recent_tag)
return VC._version_string_helper(latest_rev, most_recent_tag_revision, most_recent_version, snapshotSuffix)
return None
def parent_tags(self, vcdir):
try:
return _check_output_str(['git', 'tag', '--list', '--points-at', 'HEAD'], cwd=vcdir).strip().split('\r\n')
except subprocess.CalledProcessError as e:
abort('git tag failed: ' + str(e))
@classmethod
def _head_to_ref(cls, head_name):
return 'refs/heads/{0}'.format(head_name)
@classmethod
def set_branch(cls, vcdir, branch_name, branch_commit='HEAD', with_remote=True):
"""
Sets branch_name to branch_commit. By using with_remote (the default) the change is
propagated to origin (but only if the given branch_commit is ahead of its remote
counterpart (if one exists))
:param vcdir: the local git repository directory
:param branch_name: the name the branch should have
:param branch_commit: the commit_id the branch should point-to
:param with_remote: if True (default) the change is propagated to origin
:return: 0 if setting branch was successful
"""
run(['git', 'branch', '--no-track', '--force', branch_name, branch_commit], cwd=vcdir)
if not with_remote:
return 0
# guaranteed to fail if branch_commit is behind its remote counterpart
return run(['git', 'push', 'origin', cls._head_to_ref(branch_name)], nonZeroIsFatal=False, cwd=vcdir)
@classmethod
def get_matching_branches(cls, repository, brefs, vcdir=None):
"""
Get dict of branch_name, commit_id entries for branches that match given brefs pattern.
If vcdir is given then command will be run as if it started in vcdir (allows to use
git REMOTES for repository).
:param repository: either URL of git repo or remote that is defined in the local repo
:param brefs: branch name or branch pattern
:param vcdir: local repo directory
:return: dict of branch_name, commit_id entries
"""
command = ['git']
if vcdir:
command += ['-C', vcdir]
command += ['ls-remote', repository, cls._head_to_ref(brefs)]
result = dict()
try:
head_ref_prefix_length = len(cls._head_to_ref(''))
for line in _check_output_str(command).splitlines():
commit_id, branch_name = line.split('\t')
result[branch_name[head_ref_prefix_length:]] = commit_id
except subprocess.CalledProcessError:
pass
return result
@classmethod
def get_branch_remote(cls, remote_url, branch_name):
"""
Get commit_id that the branch given by remote_url and branch_name points-to.
:param remote_url: the URL of the git repo that contains the branch
:param branch_name: the name of the branch whose commit we are interested in
:return: commit_id the branch points-to or None
"""
branches = cls.get_matching_branches(remote_url, branch_name)
if len(branches) != 1:
return None
return next(iter(branches.values()))
def metadir(self):
return '.git'
def _local_cache_repo(self):
cache_path = get_env('MX_GIT_CACHE_DIR') or join(dot_mx_dir(), 'git-cache')
if not exists(cache_path) or len(os.listdir(cache_path)) == 0:
self.init(cache_path, bare=True)
return cache_path
def _locked_cmd(self, repo, cmd, read_lock=False):
use_lock = self.object_cache_mode == 'refcache' and flock_cmd() is not None
if use_lock:
lock_cmd = [flock_cmd()]
if read_lock:
lock_cmd.append("-s")
lock_cmd.append(join(repo, 'lock'))
cmd = lock_cmd + cmd
return cmd
def _clone(self, url, dest=None, branch=None, rev=None, abortOnError=True, **extra_args):
hashed_url = hashlib.sha1(_encode(url)).hexdigest()
cmd = ['git', 'clone']
if rev and self.object_cache_mode == 'refcache' and GitConfig._is_hash(rev):
cache = self._local_cache_repo()
if not self.exists(cache, rev):
log("Fetch from " + url + " into cache " + cache)
self._fetch(cache, url, ['+refs/heads/*:refs/remotes/' + hashed_url + '/*'], prune=True, lock=True, include_tags=False)
cmd += ['--no-checkout', '--shared', '--origin', 'cache',
'-c', 'gc.auto=0',
'-c', 'remote.cache.fetch=+refs/remotes/' + hashed_url + '/*:refs/remotes/cache/*',
'-c', 'remote.origin.url=' + url,
'-c', 'remote.origin.fetch=+refs/heads/*:refs/remotes/origin/*', cache]
else:
if branch:
cmd += ['--branch', branch]
if self.object_cache_mode:
cache = self._local_cache_repo()
log("Fetch from " + url + " into cache " + cache)
self._fetch(cache, url, '+refs/heads/*:refs/remotes/' + hashed_url + '/*', prune=True, lock=True)
cmd += ['--reference', cache]
if self.object_cache_mode == 'dissociated':
cmd += ['--dissociate']
cmd.append(url)
if dest:
cmd.append(dest)
self._log_clone(url, dest, rev)
out = OutputCapture()
if self.object_cache_mode:
cmd = self._locked_cmd(self._local_cache_repo(), cmd, read_lock=True)
rc = self.run(cmd, nonZeroIsFatal=abortOnError, out=out)
logvv(out.data)
return rc == 0
def _reset_rev(self, rev, dest=None, abortOnError=True, **extra_args):
cmd = ['git']
cwd = None if dest is None else dest
cmd.extend(['reset', '--hard', rev])
out = OutputCapture()
rc = self.run(cmd, nonZeroIsFatal=abortOnError, cwd=cwd, out=out)
logvv(out.data)
return rc == 0
hash_re = re.compile(r"^[0-9a-f]{7,40}$")
@staticmethod
def _is_hash(rev):
return rev and bool(GitConfig.hash_re.match(rev))
def clone(self, url, dest=None, rev='master', abortOnError=True, **extra_args):
"""
Clone the repo at `url` to `dest` using `rev`
:param str url: the repository url
:param str dest: the path to destination, if None the destination is
chosen by the vcs
:param str rev: the desired revision, if None use tip
:param dict extra_args: for subclass-specific information in/out
:return: True if the operation is successful, False otherwise
:rtype: bool
"""
# TODO: speedup git clone
# git clone git://source.winehq.org/git/wine.git ~/wine-git --depth 1
# downsides: This parameter will have the effect of preventing you from
# cloning it or fetching from it, and other repositories will be unable
# to push to you, and you won't be able to push to other repositories.
branch = None if GitConfig._is_hash(rev) else rev
success = self._clone(url, dest=dest, abortOnError=abortOnError, branch=branch, rev=rev, **extra_args)
if success and rev and GitConfig._is_hash(rev):
success = self._reset_rev(rev, dest=dest, abortOnError=abortOnError, **extra_args)
if not success:
# TODO: should the cloned repo be removed from disk if the reset op failed?
log('reset revision failed, removing {0}'.format(dest))
shutil.rmtree(os.path.abspath(dest))
return success
def _fetch(self, vcdir, repository=None, refspec=None, abortOnError=True, prune=False, lock=False, include_tags=True):
try:
cmd = ['git', 'fetch']
if prune:
cmd.append('--prune')
if not include_tags:
cmd.append('--no-tags')
if repository:
cmd.append(repository)
if refspec:
if isinstance(refspec, list):
cmd += refspec
else:
cmd.append(refspec)
if lock:
cmd = self._locked_cmd(vcdir, cmd)
logvv(' '.join(map(pipes.quote, cmd)))
return subprocess.check_call(cmd, cwd=vcdir)
except subprocess.CalledProcessError:
if abortOnError:
abort('git fetch failed')
else:
return None
def _log_changes(self, vcdir, path=None, incoming=True, abortOnError=True):
out = OutputCapture()
if incoming:
cmd = ['git', 'log', '..origin/master']
else:
cmd = ['git', 'log', 'origin/master..']
if path:
cmd.extend(['--', path])
rc = self.run(cmd, nonZeroIsFatal=False, cwd=vcdir, out=out)
if rc in (0, 1):
return out.data
else:
if abortOnError:
abort('{0} returned {1}'.format(
'incoming' if incoming else 'outgoing', str(rc)))
return None
def active_branch(self, vcdir, abortOnError=True):
out = OutputCapture()
cmd = ['git', 'symbolic-ref', '--short', '--quiet', 'HEAD']
rc = self.run(cmd, nonZeroIsFatal=abortOnError, cwd=vcdir, out=out)
if rc != 0:
return None
else:
return out.data.rstrip('\r\n')
def update_to_branch(self, vcdir, branch, abortOnError=True):
cmd = ['git', 'checkout', branch, '--']
return self.run(cmd, nonZeroIsFatal=abortOnError, cwd=vcdir) == 0
def incoming(self, vcdir, abortOnError=True):
"""
list incoming changesets
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: most recent changeset for specified repository, None if failure and `abortOnError` is False
:rtype: str
"""
rc = self._fetch(vcdir, abortOnError=abortOnError)
if rc == 0:
return self._log_changes(vcdir, incoming=True, abortOnError=abortOnError)
else:
if abortOnError:
abort('incoming returned ' + str(rc))
return None
def outgoing(self, vcdir, dest=None, abortOnError=True):
"""
llist outgoing changesets to 'dest' or default-push if None
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: most recent changeset for specified repository,
None if failure and `abortOnError` is False
:rtype: str
"""
rc = self._fetch(vcdir, abortOnError=abortOnError)
if rc == 0:
return self._log_changes(vcdir, path=dest, incoming=False, abortOnError=abortOnError)
else:
if abortOnError:
abort('outgoing returned ' + str(rc))
return None
def pull(self, vcdir, rev=None, update=False, abortOnError=True):
"""
Pull a given changeset (the head if `rev` is None), optionally updating
the working directory. Updating is only done if something was pulled.
If there were no new changesets or `rev` was already known locally,
no update is performed.
:param str vcdir: a valid repository path
:param str rev: the desired revision, if None use tip
:param bool abortOnError: if True abort on mx error
:return: True if the operation is successful, False otherwise
:rtype: bool
"""
if update and not rev:
cmd = ['git', 'pull']
self._log_pull(vcdir, rev)
out = OutputCapture()
rc = self.run(cmd, nonZeroIsFatal=abortOnError, cwd=vcdir, out=out)
logvv(out.data)
return rc == 0
else:
rc = self._fetch(vcdir, abortOnError=abortOnError)
if rc == 0:
if rev and update:
return self.update(vcdir, rev=rev, mayPull=False, clean=False, abortOnError=abortOnError)
else:
if abortOnError:
abort('fetch returned ' + str(rc))
return False
def can_push(self, vcdir, strict=True, abortOnError=True):
"""
Check if `vcdir` can be pushed.
:param str vcdir: a valid repository path
:param bool strict: if set no uncommitted changes or unadded are allowed
:return: True if we can push, False otherwise
:rtype: bool
"""
out = OutputCapture()
rc = self.run(['git', 'status', '--porcelain'], cwd=vcdir, nonZeroIsFatal=abortOnError, out=out)
if rc == 0:
output = out.data
if strict:
return output == ''
else:
if len(output) > 0:
for line in output.split('\n'):
if len(line) > 0 and not line.startswith('??'):
return False
return True
else:
return False
def _branch_remote(self, vcdir, branch, abortOnError=True):
out = OutputCapture()
rc = self.run(['git', 'config', '--get', 'branch.' + branch + '.remote'], cwd=vcdir, nonZeroIsFatal=abortOnError, out=out)
if rc == 0:
return out.data.rstrip('\r\n')
assert not abortOnError
return None
def _remote_url(self, vcdir, remote, push=False, abortOnError=True):
if is_windows():
cmd = ['git', 'ls-remote', '--get-url']
else:
cmd = ['git', 'remote', 'get-url']
if push:
cmd += ['--push']
cmd += [remote]
out = OutputCapture()
err = OutputCapture()
rc = self.run(cmd, cwd=vcdir, nonZeroIsFatal=False, out=out, err=err)
if rc == 0:
return out.data.rstrip('\r\n')
else:
log("git version doesn't support 'get-url', retrieving value from config instead.")
config_name = 'remote.{}.{}url'.format(remote, "push" if push is True else "")
cmd = ['git', 'config', config_name]
out = OutputCapture()
err = OutputCapture()
rc = self.run(cmd, cwd=vcdir, nonZeroIsFatal=False, out=out, err=err)
if rc == 0:
return out.data.rstrip('\r\n')
elif push is True:
non_push_config_name = 'remote.{}.url'.format(remote)
log("git config {} isn't defined. Attempting with {}".format(config_name, non_push_config_name))
cmd = ['git', 'config', non_push_config_name]
out = OutputCapture()
err = OutputCapture()
rc = self.run(cmd, cwd=vcdir, nonZeroIsFatal=False, out=out, err=err)
if rc == 0:
return out.data.rstrip('\r\n')
else:
log(err)
if abortOnError:
abort("Failed to retrieve the remote URL")
return None
def _path(self, vcdir, name, abortOnError=True):
branch = self.active_branch(vcdir, abortOnError=False)
if not branch:
branch = 'master'
remote = self._branch_remote(vcdir, branch, abortOnError=False)
if not remote and branch != 'master':
remote = self._branch_remote(vcdir, 'master', abortOnError=False)
if not remote:
remote = 'origin'
return self._remote_url(vcdir, remote, name == 'push', abortOnError=abortOnError)
def default_push(self, vcdir, abortOnError=True):
"""
get the default push target for this repo
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: default push target for repo
:rtype: str
"""
push = self._path(vcdir, 'push', abortOnError=False)
if push:
return push
return self.default_pull(vcdir, abortOnError=abortOnError)
def default_pull(self, vcdir, abortOnError=True):
"""
get the default pull target for this repo
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: default pull target for repo
:rtype: str
"""
return self._path(vcdir, 'fetch', abortOnError=abortOnError)
def push(self, vcdir, dest=None, rev=None, abortOnError=False):
"""
Push `vcdir` at rev `rev` to default if `dest`
is None, else push to `dest`.
:param str vcdir: a valid repository path
:param str rev: the desired revision
:param str dest: the path to destination
:param bool abortOnError: if True abort on mx error
:return: True on success, False otherwise
:rtype: bool
"""
cmd = ['git', 'push']
cmd.append(dest if dest else 'origin')
cmd.append('{0}master'.format('{0}:'.format(rev) if rev else ''))
self._log_push(vcdir, dest, rev)
out = OutputCapture()
rc = self.run(cmd, cwd=vcdir, nonZeroIsFatal=abortOnError, out=out)
logvv(out.data)
return rc == 0
def update(self, vcdir, rev=None, mayPull=False, clean=False, abortOnError=False):
"""
update the `vcdir` working directory.
If `rev` is not specified, update to the tip of the current branch.
If `rev` is specified, `mayPull` controls whether a pull will be attempted if
`rev` can not be found locally.
If `clean` is True, uncommitted changes will be discarded (no backup!).
:param str vcdir: a valid repository path
:param str rev: the desired revision
:param bool mayPull: flag to controll whether to pull or not
:param bool clean: discard uncommitted changes without backing up
:param bool abortOnError: if True abort on mx error
:return: True on success, False otherwise
:rtype: bool
"""
if rev and mayPull and not self.exists(vcdir, rev):
self.pull(vcdir, rev=rev, update=False, abortOnError=abortOnError)
if not self.exists(vcdir, rev):
abort('Fetch of %s succeeded\nbut did not contain requested revision %s.\nCheck that the suite.py repository location is mentioned by \'git remote -v\'' % (vcdir, rev))
cmd = ['git', 'checkout']
if clean:
cmd.append('-f')
if rev:
cmd.extend(['--detach', rev])
if not _opts.verbose:
cmd.append('-q')
else:
cmd.extend(['master', '--'])
return self.run(cmd, cwd=vcdir, nonZeroIsFatal=abortOnError) == 0
def locate(self, vcdir, patterns=None, abortOnError=True):
"""
Return a list of paths under vc control that match `patterns`
:param str vcdir: a valid repository path
:param patterns: a list of patterns
:type patterns: str or list or None
:param bool abortOnError: if True abort on mx error
:return: a list of paths under vc control
:rtype: list
"""
if patterns is None:
patterns = []
elif not isinstance(patterns, list):
patterns = [patterns]
out = LinesOutputCapture()
err = OutputCapture()
rc = self.run(['git', 'ls-files'] + patterns, cwd=vcdir, out=out, err=err, nonZeroIsFatal=False)
if rc == 0:
return out.lines
else:
if abortOnError:
abort('locate returned: {}\n{}'.format(rc, out.data))
else:
return None
def isDirty(self, vcdir, abortOnError=True):
"""
check whether the working directory is dirty
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: True of the working directory is dirty, False otherwise
:rtype: bool
"""
self.check_for_git()
try:
output = _check_output_str(['git', 'status', '--porcelain', '--untracked-files=no'], cwd=vcdir)
return len(output.strip()) > 0
except subprocess.CalledProcessError:
if abortOnError:
abort('failed to get status')
else:
return None
def status(self, vcdir, abortOnError=True):
"""
report the status of the repository
:param str vcdir: a valid repository path
:param bool abortOnError: if True abort on mx error
:return: True on success, False otherwise
:rtype: bool
"""
return run(['git', 'status'], cwd=vcdir, nonZeroIsFatal=abortOnError) == 0
def bookmark(self, vcdir, name, rev, abortOnError=True):
"""
Place a bookmark at a given revision
:param str vcdir: a valid repository path
:param str name: the name of the bookmark
:param str rev: the desired revision
:param bool abortOnError: if True abort on mx error
:return: True on success, False otherwise
:rtype: bool
"""
return run(['git', 'branch', '-f', name, rev], cwd=vcdir, nonZeroIsFatal=abortOnError) == 0
def latest(self, vcdir, rev1, rev2, abortOnError=True):
"""
Returns the latest of 2 revisions (in chronological order).
The revisions should be related in the DAG.
:param str vcdir: a valid repository path
:param str rev1: the first revision
:param str rev2: the second revision
:param bool abortOnError: if True abort on mx error
:return: the latest of the 2 revisions
:rtype: str or None
"""
self.check_for_git()
try:
out = _check_output_str(['git', 'rev-list', '-n', '1', '--date-order', rev1, rev2], cwd=vcdir)
changesets = out.strip().split('\n')
if len(changesets) != 1:
if abortOnError:
abort('git rev-list returned {0} possible latest (expected 1)'.format(len(changesets)))
return None
return changesets[0]
except subprocess.CalledProcessError:
if abortOnError:
abort('latest failed')
else:
return None
def exists(self, vcdir, rev):
"""
Check if a given revision exists in the repository.
:param str vcdir: a valid repository path
:param str rev: the second revision
:return: True if revision exists, False otherwise
:rtype: bool
"""
self.check_for_git()
try:
_check_output_str(['git', 'cat-file', '-e', rev], cwd=vcdir)
return True
except subprocess.CalledProcessError:
return False
def root(self, directory, abortOnError=True):
if VC._find_metadata_dir(directory, '.git'):
if self.check_for_git(abortOnError=True):
try:
out = _check_output_str(['git', 'rev-parse', '--show-toplevel'], cwd=directory, stderr=subprocess.STDOUT)
return out.strip()
except subprocess.CalledProcessError:
if abortOnError:
abort('`git rev-parse --show-toplevel` (root) failed')
elif abortOnError:
abort('No .git directory')
return None
class BinaryVC(VC):
"""
Emulates a VC system for binary suites, as far as possible, but particularly pull/tip
"""
def __init__(self):
VC.__init__(self, 'binary', 'MX Binary')
def check(self, abortOnError=True):
return True
def is_this_vc(self, vcdir):
try:
return self.parent(vcdir, abortOnError=False)
except IOError:
return False
def clone(self, url, dest=None, rev=None, abortOnError=True, **extra_args):
"""
Downloads the ``mx-suitename.jar`` file. The caller is responsible for downloading
the suite distributions. The actual version downloaded is written to the file
``mx-suitename.jar.<version>``.
:param extra_args: Additional args that must include `suite_name` which is a string
denoting the suite name and `result` which is a dict for output values. If this
method returns True, then there will be a `adj_version` entry in this dict
containing the actual (adjusted) version
:return: True if the clone was successful, False otherwise
:rtype: bool
"""
assert dest
suite_name = extra_args['suite_name']
metadata = self.Metadata(suite_name, url, None, None)
if not rev:
rev = self._tip(metadata)
metadata.snapshotVersion = '{0}-SNAPSHOT'.format(rev)
mxname = _mx_binary_distribution_root(suite_name)
self._log_clone("{}/{}/{}".format(url, _mavenGroupId(suite_name).replace('.', '/'), mxname), dest, rev)
mx_jar_path = join(dest, _mx_binary_distribution_jar(suite_name))
if not self._pull_artifact(metadata, _mavenGroupId(suite_name), mxname, mxname, mx_jar_path, abortOnVersionError=abortOnError):
return False
run([get_jdk(tag=DEFAULT_JDK_TAG).jar, 'xf', mx_jar_path], cwd=dest)
self._writeMetadata(dest, metadata)
return True
def _pull_artifact(self, metadata, groupId, artifactId, name, path, sourcePath=None, abortOnVersionError=True, extension='jar'):
repo = MavenRepo(metadata.repourl)
snapshot = repo.getSnapshot(groupId, artifactId, metadata.snapshotVersion)
if not snapshot:
if abortOnVersionError:
url = repo.getSnapshotUrl(groupId, artifactId, metadata.snapshotVersion)
abort('Version {} not found for {}:{} ({})'.format(metadata.snapshotVersion, groupId, artifactId, url))
return False
build = snapshot.getCurrentSnapshotBuild()
metadata.snapshotTimestamp = snapshot.currentTime
try:
(jar_url, jar_sha_url) = build.getSubArtifact(extension)
except MavenSnapshotArtifact.NonUniqueSubArtifactException:
raise abort('Multiple {}s found for {} in snapshot {} in repository {}'.format(extension, name, build.version, repo.repourl))
download_file_with_sha1(artifactId, path, [jar_url], _hashFromUrl(jar_sha_url), path + '.sha1', resolve=True, mustExist=True, sources=False)
if sourcePath:
try:
(source_url, source_sha_url) = build.getSubArtifactByClassifier('sources')
except MavenSnapshotArtifact.NonUniqueSubArtifactException:
raise abort('Multiple source artifacts found for {} in snapshot {} in repository {}'.format(name, build.version, repo.repourl))
download_file_with_sha1(artifactId + '_sources', sourcePath, [source_url], _hashFromUrl(source_sha_url), sourcePath + '.sha1', resolve=True, mustExist=True, sources=True)
return True
class Metadata:
def __init__(self, suiteName, repourl, snapshotVersion, snapshotTimestamp):
self.suiteName = suiteName
self.repourl = repourl
self.snapshotVersion = snapshotVersion
self.snapshotTimestamp = snapshotTimestamp
def _writeMetadata(self, vcdir, metadata):
with open(join(vcdir, _mx_binary_distribution_version(metadata.suiteName)), 'w') as f:
f.write("{0},{1},{2}".format(metadata.repourl, metadata.snapshotVersion, metadata.snapshotTimestamp))
def _readMetadata(self, vcdir):
suiteName = basename(vcdir)
with open(join(vcdir, _mx_binary_distribution_version(suiteName))) as f:
parts = f.read().split(',')
if len(parts) == 2:
# Older versions of the persisted metadata do not contain the snapshot timestamp.
repourl, snapshotVersion = parts
snapshotTimestamp = None
else:
repourl, snapshotVersion, snapshotTimestamp = parts
return self.Metadata(suiteName, repourl, snapshotVersion, snapshotTimestamp)
def getDistribution(self, vcdir, distribution):
suiteName = basename(vcdir)
reason = distribution.needsUpdate(TimeStampFile(join(vcdir, _mx_binary_distribution_version(suiteName)), followSymlinks=False))
if not reason:
return
log('Updating {} [{}]'.format(distribution, reason))
metadata = self._readMetadata(vcdir)
artifactId = distribution.maven_artifact_id()
groupId = distribution.maven_group_id()
path = distribution.path[:-len(distribution.localExtension())] + distribution.remoteExtension()
if distribution.isJARDistribution():
sourcesPath = distribution.sourcesPath
else:
sourcesPath = None
with SafeFileCreation(path, companion_patterns=["{path}.sha1"]) as sfc, SafeFileCreation(sourcesPath, companion_patterns=["{path}.sha1"]) as sourceSfc:
self._pull_artifact(metadata, groupId, artifactId, distribution.remoteName(), sfc.tmpPath, sourcePath=sourceSfc.tmpPath, extension=distribution.remoteExtension())
final_path = distribution.postPull(sfc.tmpPath)
if final_path:
os.rename(final_path, distribution.path)
assert exists(distribution.path)
distribution.notify_updated()
def pull(self, vcdir, rev=None, update=True, abortOnError=True):
if not update:
return False # TODO or True?
metadata = self._readMetadata(vcdir)
if not rev:
rev = self._tip(metadata)
if rev == self._id(metadata):
return False
metadata.snapshotVersion = '{0}-SNAPSHOT'.format(rev)
tmpdir = tempfile.mkdtemp()
mxname = _mx_binary_distribution_root(metadata.suiteName)
tmpmxjar = join(tmpdir, mxname + '.jar')
if not self._pull_artifact(metadata, _mavenGroupId(metadata.suiteName), mxname, mxname, tmpmxjar, abortOnVersionError=abortOnError):
shutil.rmtree(tmpdir)
return False
# pull the new version and update 'working directory'
# i.e. delete first as everything will change
shutil.rmtree(vcdir)
mx_jar_path = join(vcdir, _mx_binary_distribution_jar(metadata.suiteName))
ensure_dir_exists(dirname(mx_jar_path))
shutil.copy2(tmpmxjar, mx_jar_path)
shutil.rmtree(tmpdir)
run([get_jdk(tag=DEFAULT_JDK_TAG).jar, 'xf', mx_jar_path], cwd=vcdir)
self._writeMetadata(vcdir, metadata)
return True
def update(self, vcdir, rev=None, mayPull=False, clean=False, abortOnError=False):
return self.pull(vcdir=vcdir, rev=rev, update=True, abortOnError=abortOnError)
def tip(self, vcdir, abortOnError=True):
self._tip(self._readMetadata(vcdir))
def _tip(self, metadata):
repo = MavenRepo(metadata.repourl)
warn("Using `tip` on a binary suite is unreliable.")
latestSnapshotversion = repo.getArtifactVersions(_mavenGroupId(metadata.suiteName), _mx_binary_distribution_root(metadata.suiteName)).latestVersion
assert latestSnapshotversion.endswith('-SNAPSHOT')
return latestSnapshotversion[:-len('-SNAPSHOT')]
def default_pull(self, vcdir, abortOnError=True):
return self._readMetadata(vcdir).repourl
def parent(self, vcdir, abortOnError=True):
return self._id(self._readMetadata(vcdir))
def parent_info(self, vcdir, abortOnError=True):
def decode(ts):
if ts is None:
return 0
yyyy = int(ts[0:4])
mm = int(ts[4:6])
dd = int(ts[6:8])
hh = int(ts[9:11])
mi = int(ts[11:13])
ss = int(ts[13:15])
return (datetime(yyyy, mm, dd, hh, mi, ss) - datetime(1970, 1, 1)).total_seconds()
metadata = self._readMetadata(vcdir)
timestamp = decode(metadata.snapshotTimestamp)
return {
"author": "<unknown>",
"author-ts": timestamp,
"committer": "<unknown>",
"committer-ts": timestamp,
}
def _id(self, metadata):
assert metadata.snapshotVersion.endswith('-SNAPSHOT')
return metadata.snapshotVersion[:-len('-SNAPSHOT')]
def isDirty(self, abortOnError=True):
# a binary repo can not be dirty
return False
def status(self, abortOnError=True):
# a binary repo has nothing to report
return True
def root(self, directory, abortOnError=True):
if abortOnError:
abort("A binary VC has no 'root'")
def active_branch(self, vcdir, abortOnError=True):
if abortOnError:
abort("A binary VC has no active branch")
def update_to_branch(self, vcdir, branch, abortOnError=True):
if abortOnError:
abort("A binary VC has no branch")
return False
### Maven, _private
def _map_to_maven_dist_name(name):
return name.lower().replace('_', '-')
class MavenArtifactVersions:
def __init__(self, latestVersion, releaseVersion, versions):
self.latestVersion = latestVersion
self.releaseVersion = releaseVersion
self.versions = versions
class MavenSnapshotBuilds:
def __init__(self, currentTime, currentBuildNumber, snapshots):
self.currentTime = currentTime
self.currentBuildNumber = currentBuildNumber
self.snapshots = snapshots
def getCurrentSnapshotBuild(self):
return self.snapshots[(self.currentTime, self.currentBuildNumber)]
class MavenSnapshotArtifact:
def __init__(self, groupId, artifactId, version, snapshotBuildVersion, repo):
self.groupId = groupId
self.artifactId = artifactId
self.version = version
self.snapshotBuildVersion = snapshotBuildVersion
self.subArtifacts = []
self.repo = repo
class SubArtifact:
def __init__(self, extension, classifier):
self.extension = extension
self.classifier = classifier
def __repr__(self):
return str(self)
def __str__(self):
return "{0}.{1}".format(self.classifier, self.extension) if self.classifier else self.extension
def addSubArtifact(self, extension, classifier):
self.subArtifacts.append(self.SubArtifact(extension, classifier))
class NonUniqueSubArtifactException(Exception):
pass
def _getUniqueSubArtifact(self, criterion):
filtered = [sub for sub in self.subArtifacts if criterion(sub.extension, sub.classifier)]
if len(filtered) == 0:
return None
if len(filtered) > 1:
raise self.NonUniqueSubArtifactException()
sub = filtered[0]
if sub.classifier:
url = "{url}/{group}/{artifact}/{version}/{artifact}-{snapshotBuildVersion}-{classifier}.{extension}".format(
url=self.repo.repourl,
group=self.groupId.replace('.', '/'),
artifact=self.artifactId,
version=self.version,
snapshotBuildVersion=self.snapshotBuildVersion,
classifier=sub.classifier,
extension=sub.extension)
else:
url = "{url}/{group}/{artifact}/{version}/{artifact}-{snapshotBuildVersion}.{extension}".format(
url=self.repo.repourl,
group=self.groupId.replace('.', '/'),
artifact=self.artifactId,
version=self.version,
snapshotBuildVersion=self.snapshotBuildVersion,
extension=sub.extension)
return url, url + '.sha1'
def getSubArtifact(self, extension, classifier=None):
return self._getUniqueSubArtifact(lambda e, c: e == extension and c == classifier)
def getSubArtifactByClassifier(self, classifier):
return self._getUniqueSubArtifact(lambda e, c: c == classifier)
def __repr__(self):
return str(self)
def __str__(self):
return "{0}:{1}:{2}-SNAPSHOT".format(self.groupId, self.artifactId, self.snapshotBuildVersion)
class MavenRepo:
def __init__(self, repourl):
self.repourl = repourl
self.artifactDescs = {}
def getArtifactVersions(self, groupId, artifactId):
metadataUrl = "{0}/{1}/{2}/maven-metadata.xml".format(self.repourl, groupId.replace('.', '/'), artifactId)
logv('Retrieving and parsing {0}'.format(metadataUrl))
try:
metadataFile = _urlopen(metadataUrl, timeout=10)
except _urllib_error.HTTPError as e:
_suggest_http_proxy_error(e)
abort('Error while retrieving metadata for {}:{}: {}'.format(groupId, artifactId, str(e)))
try:
tree = etreeParse(metadataFile)
root = tree.getroot()
assert root.tag == 'metadata'
assert root.find('groupId').text == groupId
assert root.find('artifactId').text == artifactId
versioning = root.find('versioning')
latest = versioning.find('latest')
release = versioning.find('release')
versions = versioning.find('versions')
versionStrings = [v.text for v in versions.iter('version')]
releaseVersionString = release.text if release is not None and len(release) != 0 else None
if latest is not None and len(latest) != 0:
latestVersionString = latest.text
else:
logv('Element \'latest\' not specified in metadata. Fallback: Find latest via \'versions\'.')
latestVersionString = None
for version_str in reversed(versionStrings):
snapshot_metadataUrl = self.getSnapshotUrl(groupId, artifactId, version_str)
try:
snapshot_metadataFile = _urlopen(snapshot_metadataUrl, timeout=10)
except _urllib_error.HTTPError as e:
logv('Version {0} not accessible. Try previous snapshot.'.format(metadataUrl))
snapshot_metadataFile = None
if snapshot_metadataFile:
logv('Using version {0} as latestVersionString.'.format(version_str))
latestVersionString = version_str
snapshot_metadataFile.close()
break
return MavenArtifactVersions(latestVersionString, releaseVersionString, versionStrings)
except _urllib_error.URLError as e:
abort('Error while retrieving versions for {0}:{1}: {2}'.format(groupId, artifactId, str(e)))
finally:
if metadataFile:
metadataFile.close()
def getSnapshotUrl(self, groupId, artifactId, version):
return "{0}/{1}/{2}/{3}/maven-metadata.xml".format(self.repourl, groupId.replace('.', '/'), artifactId, version)
def getSnapshot(self, groupId, artifactId, version):
assert version.endswith('-SNAPSHOT')
metadataUrl = self.getSnapshotUrl(groupId, artifactId, version)
logv('Retrieving and parsing {0}'.format(metadataUrl))
try:
metadataFile = _urlopen(metadataUrl, timeout=10)
except _urllib_error.URLError as e:
if isinstance(e, _urllib_error.HTTPError) and e.code == 404:
return None
_suggest_http_proxy_error(e)
abort('Error while retrieving snapshot for {}:{}:{}: {}'.format(groupId, artifactId, version, str(e)))
try:
tree = etreeParse(metadataFile)
root = tree.getroot()
assert root.tag == 'metadata'
assert root.find('groupId').text == groupId
assert root.find('artifactId').text == artifactId
assert root.find('version').text == version
versioning = root.find('versioning')
snapshot = versioning.find('snapshot')
snapshotVersions = versioning.find('snapshotVersions')
currentSnapshotTime = snapshot.find('timestamp').text
currentSnapshotBuildElement = snapshot.find('buildNumber')
currentSnapshotBuildNumber = int(currentSnapshotBuildElement.text) if currentSnapshotBuildElement is not None else 0
versionPrefix = version[:-len('-SNAPSHOT')] + '-'
prefixLen = len(versionPrefix)
snapshots = {}
for snapshotVersion in snapshotVersions.iter('snapshotVersion'):
fullVersion = snapshotVersion.find('value').text
separatorIndex = fullVersion.index('-', prefixLen)
timeStamp = fullVersion[prefixLen:separatorIndex]
buildNumber = int(fullVersion[separatorIndex+1:])
extension = snapshotVersion.find('extension').text
classifier = snapshotVersion.find('classifier')
classifierString = None
if classifier is not None and len(classifier.text) > 0:
classifierString = classifier.text
artifact = snapshots.setdefault((timeStamp, buildNumber), MavenSnapshotArtifact(groupId, artifactId, version, fullVersion, self))
artifact.addSubArtifact(extension, classifierString)
return MavenSnapshotBuilds(currentSnapshotTime, currentSnapshotBuildNumber, snapshots)
finally:
if metadataFile:
metadataFile.close()
_maven_local_repository = None
def maven_local_repository(): # pylint: disable=invalid-name
global _maven_local_repository
if not _maven_local_repository:
class _MavenLocalRepository(Repository):
"""This singleton class represents mavens local repository (usually under ~/.m2/repository)"""
def __init__(self):
try:
res = {'lines': '', 'xml': False, 'total_output': ''}
def xml_settings_grabber(line):
res['total_output'] += line
if not res['xml'] and not res['lines'] and line.startswith('<settings '):
res['xml'] = True
if res['xml']:
res['lines'] += line
if line.startswith('</settings>'):
res['xml'] = False
run_maven(['help:effective-settings'], out=xml_settings_grabber)
dom = minidomParseString(res['lines'])
local_repo = dom.getElementsByTagName('localRepository')[0].firstChild.data
url = 'file://' + local_repo
except BaseException as e:
ls = os.linesep
raise abort('Unable to determine maven local repository URL{}Caused by: {}{}Output:{}{}'.format(ls, repr(e), ls, ls, res['total_output']))
Repository.__init__(self, suite('mx'), 'maven local repository', url, url, [])
def resolveLicenses(self):
return True
_maven_local_repository = _MavenLocalRepository()
return _maven_local_repository
def maven_download_urls(groupId, artifactId, version, classifier=None, baseURL=None):
if baseURL is None:
baseURLs = _mavenRepoBaseURLs
else:
baseURLs = [baseURL]
args = {
'groupId': groupId.replace('.', '/'),
'artifactId': artifactId,
'version': version,
'classifier' : '-{0}'.format(classifier) if classifier else ''
}
return ["{base}{groupId}/{artifactId}/{version}/{artifactId}-{version}{classifier}.jar".format(base=base, **args) for base in baseURLs]
### ~~~~~~~~~~~~~ Maven, _private
def _mavenGroupId(suite):
if isinstance(suite, Suite):
group_id = suite._get_early_suite_dict_property('groupId')
if group_id:
return group_id
name = suite.name
else:
assert isinstance(suite, str)
name = suite
return 'com.oracle.' + _map_to_maven_dist_name(name)
def _genPom(dist, versionGetter, validateMetadata='none'):
"""
:type dist: Distribution
"""
groupId = dist.maven_group_id()
artifactId = dist.maven_artifact_id()
version = versionGetter(dist.suite)
pom = XMLDoc()
pom.open('project', attributes={
'xmlns': "http://maven.apache.org/POM/4.0.0",
'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance",
'xsi:schemaLocation': "http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
})
pom.element('modelVersion', data="4.0.0")
pom.element('groupId', data=groupId)
pom.element('artifactId', data=artifactId)
pom.element('version', data=version)
if dist.remoteExtension() != 'jar':
pom.element('packaging', data=dist.remoteExtension())
if dist.suite.url:
pom.element('url', data=dist.suite.url)
elif validateMetadata != 'none':
if 'suite-url' in dist.suite.getMxCompatibility().supportedMavenMetadata() or validateMetadata == 'full':
abort("Suite {} is missing the 'url' attribute".format(dist.suite.name))
warn("Suite {}'s version is too old to contain the 'url' attribute".format(dist.suite.name))
acronyms = ['API', 'DSL', 'SL', 'TCK']
name = ' '.join((t if t in acronyms else t.lower().capitalize() for t in dist.name.split('_')))
pom.element('name', data=name)
if hasattr(dist, 'description'):
pom.element('description', data=dist.description)
elif validateMetadata != 'none':
if 'dist-description' in dist.suite.getMxCompatibility().supportedMavenMetadata() or validateMetadata == 'full':
dist.abort("Distribution is missing the 'description' attribute")
dist.warn("Distribution's suite version is too old to have the 'description' attribute")
if dist.suite.developer:
pom.open('developers')
pom.open('developer')
def _addDevAttr(name, default=None):
if name in dist.suite.developer:
value = dist.suite.developer[name]
else:
value = default
if value:
pom.element(name, data=value)
elif validateMetadata != 'none':
abort("Suite {}'s developer metadata is missing the '{}' attribute".format(dist.suite.name, name))
_addDevAttr('name')
_addDevAttr('email')
_addDevAttr('organization')
_addDevAttr('organizationUrl', dist.suite.url)
pom.close('developer')
pom.close('developers')
elif validateMetadata != 'none':
if 'suite-developer' in dist.suite.getMxCompatibility().supportedMavenMetadata() or validateMetadata == 'full':
abort("Suite {} is missing the 'developer' attribute".format(dist.suite.name))
warn("Suite {}'s version is too old to contain the 'developer' attribute".format(dist.suite.name))
if dist.theLicense:
pom.open('licenses')
for distLicense in dist.theLicense:
pom.open('license')
pom.element('name', data=distLicense.fullname)
pom.element('url', data=distLicense.url)
pom.close('license')
pom.close('licenses')
elif validateMetadata != 'none':
if dist.suite.getMxCompatibility().supportsLicenses() or validateMetadata == 'full':
dist.abort("Distribution is missing 'license' attribute")
dist.warn("Distribution's suite version is too old to have the 'license' attribute")
directDistDeps = [d for d in dist.deps if d.isDistribution()]
directLibDeps = dist.excludedLibs
if directDistDeps or directLibDeps:
pom.open('dependencies')
for dep in directDistDeps:
if dep.suite.internal:
warn("_genPom({}): ignoring internal dependency {}".format(dist, dep))
continue
if validateMetadata != 'none' and not getattr(dep, 'maven', False):
if validateMetadata == 'full':
dist.abort("Distribution depends on non-maven distribution {}".format(dep))
dist.warn("Distribution depends on non-maven distribution {}".format(dep))
for platform in dep.platforms:
pom.open('dependency')
pom.element('groupId', data=dep.maven_group_id())
pom.element('artifactId', data=dep.maven_artifact_id(platform=platform))
dep_version = versionGetter(dep.suite)
if validateMetadata != 'none' and 'SNAPSHOT' in dep_version and 'SNAPSHOT' not in version:
if validateMetadata == 'full':
dist.abort("non-snapshot distribution depends on snapshot distribution {}".format(dep))
dist.warn("non-snapshot distribution depends on snapshot distribution {}".format(dep))
pom.element('version', data=dep_version)
if dep.remoteExtension() != 'jar':
pom.element('type', data=dep.remoteExtension())
pom.close('dependency')
for l in directLibDeps:
if (l.isJdkLibrary() or l.isJreLibrary()) and l.is_provided_by(get_jdk()) and l.is_provided_by(get_jdk(dist.maxJavaCompliance())):
continue
if hasattr(l, 'maven'):
mavenMetaData = l.maven
pom.open('dependency')
pom.element('groupId', data=mavenMetaData['groupId'])
pom.element('artifactId', data=mavenMetaData['artifactId'])
pom.element('version', data=mavenMetaData['version'])
if dist.suite.getMxCompatibility().mavenSupportsClassifier():
if 'suffix' in mavenMetaData:
l.abort('The use of "suffix" as maven metadata is not supported in this version. Use "classifier" instead.')
if 'classifier' in mavenMetaData:
pom.element('classifier', data=mavenMetaData['classifier'])
else:
if 'suffix' in mavenMetaData:
pom.element('classifier', data=mavenMetaData['suffix'])
pom.close('dependency')
elif validateMetadata != 'none':
if 'library-coordinates' in dist.suite.getMxCompatibility().supportedMavenMetadata() or validateMetadata == 'full':
l.abort("Library is missing maven metadata")
l.warn("Library's suite version is too old to have maven metadata")
pom.close('dependencies')
if dist.suite.vc:
pom.open('scm')
scm = dist.suite.scm_metadata(abortOnError=validateMetadata != 'none')
pom.element('connection', data='scm:{}:{}'.format(dist.suite.vc.kind, scm.read))
if scm.read != scm.write or validateMetadata == 'full':
pom.element('developerConnection', data='scm:{}:{}'.format(dist.suite.vc.kind, scm.write))
pom.element('url', data=scm.url)
pom.close('scm')
elif validateMetadata == 'full':
abort("Suite {} is not in a vcs repository, as a result 'scm' attribute cannot be generated for it".format(dist.suite.name))
pom.close('project')
return pom.xml(indent=' ', newl='\n')
def _tmpPomFile(dist, versionGetter, validateMetadata='none'):
tmp = tempfile.NamedTemporaryFile('w', suffix='.pom', delete=False)
tmp.write(_genPom(dist, versionGetter, validateMetadata))
tmp.close()
return tmp.name
def _deploy_binary_maven(suite, artifactId, groupId, filePath, version, repo,
srcPath=None,
description=None,
settingsXml=None,
extension='jar',
dryRun=False,
pomFile=None,
gpg=False,
keyid=None,
javadocPath=None,
extraFiles=None):
"""
:type extraFiles: list[(str, str, str)]
"""
assert exists(filePath), filePath
assert not srcPath or exists(srcPath), srcPath
cmd = ['--batch-mode']
if not _opts.verbose:
cmd.append('--quiet')
if _opts.verbose:
cmd.append('--errors')
if _opts.very_verbose:
cmd.append('--debug')
if settingsXml:
cmd += ['-s', settingsXml]
if repo != maven_local_repository():
cmd += [
'-DrepositoryId=' + repo.get_maven_id(),
'-Durl=' + repo.get_url(version)
]
if gpg:
cmd += ['gpg:sign-and-deploy-file']
else:
cmd += ['deploy:deploy-file']
if keyid:
cmd += ['-Dgpg.keyname=' + keyid]
else:
cmd += ['install:install-file']
if gpg or keyid:
abort('Artifact signing not supported for ' + repo.name)
cmd += [
'-DgroupId=' + groupId,
'-DartifactId=' + artifactId,
'-Dversion=' + version,
'-Dfile=' + filePath,
'-Dpackaging=' + extension,
'-DretryFailedDeploymentCount=10'
]
if pomFile:
cmd.append('-DpomFile=' + pomFile)
else:
cmd.append('-DgeneratePom=true')
if srcPath:
cmd.append('-Dsources=' + srcPath)
if javadocPath:
cmd.append('-Djavadoc=' + javadocPath)
if description:
cmd.append('-Ddescription=' + description)
if extraFiles:
cmd.append('-Dfiles=' + ','.join(ef[0] for ef in extraFiles))
cmd.append('-Dclassifiers=' + ','.join(ef[1] for ef in extraFiles))
cmd.append('-Dtypes=' + ','.join(ef[2] for ef in extraFiles))
action = 'Installing' if repo == maven_local_repository() else 'Deploying'
log('{} {}:{}...'.format(action, groupId, artifactId))
if dryRun:
logv(' '.join((pipes.quote(t) for t in cmd)))
else:
run_maven(cmd)
def _deploy_skip_existing(args, dists, version, repo):
if args.skip_existing:
non_existing_dists = []
for dist in dists:
if version.endswith('-SNAPSHOT'):
metadata_append = '-local' if repo == maven_local_repository() else ''
metadata_url = '{0}/{1}/{2}/{3}/maven-metadata{4}.xml'.format(repo.get_url(version), dist.maven_group_id().replace('.', '/'), dist.maven_artifact_id(), version, metadata_append)
else:
metadata_url = '{0}/{1}/{2}/{3}/'.format(repo.get_url(version), dist.maven_group_id().replace('.', '/'), dist.maven_artifact_id(), version)
if download_file_exists([metadata_url]):
log('Skip existing {}:{}'.format(dist.maven_group_id(), dist.maven_artifact_id()))
else:
non_existing_dists.append(dist)
return non_existing_dists
else:
return dists
def deploy_binary(args):
"""deploy binaries for the primary suite to remote maven repository
All binaries must be built first using ``mx build``.
"""
parser = ArgumentParser(prog='mx deploy-binary')
parser.add_argument('-s', '--settings', action='store', help='Path to settings.mxl file used for Maven')
parser.add_argument('-n', '--dry-run', action='store_true', help='Dry run that only prints the action a normal run would perform without actually deploying anything')
parser.add_argument('--only', action='store', help='Limit deployment to these distributions')
parser.add_argument('--platform-dependent', action='store_true', help='Limit deployment to platform dependent distributions only')
parser.add_argument('--all-suites', action='store_true', help='Deploy suite and the distributions it depends on in other suites')
parser.add_argument('--skip-existing', action='store_true', help='Do not deploy distributions if already in repository')
parser.add_argument('repository_id', metavar='repository-id', nargs='?', action='store', help='Repository ID used for binary deploy. If none is given, mavens local repository is used instead.')
parser.add_argument('url', metavar='repository-url', nargs='?', action='store', help='Repository URL used for binary deploy. If no url is given, the repository-id is looked up in suite.py')
args = parser.parse_args(args)
if args.all_suites:
_suites = suites()
else:
_suites = primary_or_specific_suites()
for s in _suites:
if s.isSourceSuite():
_deploy_binary(args, s)
def _deploy_binary(args, suite):
if not suite.getMxCompatibility().supportsLicenses():
log("Not deploying '{0}' because licenses aren't defined".format(suite.name))
return
if not suite.getMxCompatibility().supportsRepositories():
log("Not deploying '{0}' because repositories aren't defined".format(suite.name))
return
if not suite.vc:
abort('Current suite has no version control')
_mvn.check()
def versionGetter(suite):
return '{0}-SNAPSHOT'.format(suite.vc.parent(suite.vc_dir))
dists = suite.dists
if args.only:
only = args.only.split(',')
dists = [d for d in dists if d.name in only or d.qualifiedName() in only]
if args.platform_dependent:
dists = [d for d in dists if d.platformDependent]
mxMetaName = _mx_binary_distribution_root(suite.name)
suite.create_mx_binary_distribution_jar()
mxMetaJar = suite.mx_binary_distribution_jar_path()
assert exists(mxMetaJar)
if args.all_suites:
dists = [d for d in dists if d.exists()]
for dist in dists:
if not dist.exists():
abort("'{0}' is not built, run 'mx build' first".format(dist.name))
platform_dependence = any(d.platformDependent for d in dists)
if args.url:
repo = Repository(None, args.repository_id, args.url, args.url, repository(args.repository_id).licenses)
elif args.repository_id:
if not suite.getMxCompatibility().supportsRepositories():
abort("Repositories are not supported in {}'s suite version".format(suite.name))
repo = repository(args.repository_id)
else:
repo = maven_local_repository()
version = versionGetter(suite)
if not args.only:
action = 'Installing' if repo == maven_local_repository() else 'Deploying'
log('{} suite {} version {}'.format(action, suite.name, version))
dists = _deploy_skip_existing(args, dists, version, repo)
if not dists:
return
_maven_deploy_dists(dists, versionGetter, repo, args.settings, dryRun=args.dry_run, deployMapFiles=True)
if not args.platform_dependent and not args.only:
_deploy_binary_maven(suite, _map_to_maven_dist_name(mxMetaName), _mavenGroupId(suite.name), mxMetaJar, version, repo, settingsXml=args.settings, dryRun=args.dry_run)
if not args.all_suites and suite == primary_suite() and suite.vc.kind == 'git' and suite.vc.active_branch(suite.vc_dir) == 'master':
deploy_branch_name = 'binary'
platform_dependent_base = deploy_branch_name + '_'
binary_deployed_ref = platform_dependent_base + Distribution.platformName() if platform_dependence else deploy_branch_name
deployed_rev = suite.version()
assert deployed_rev == suite.vc.parent(suite.vc_dir), 'Version mismatch: suite.version() != suite.vc.parent(suite.vc_dir)'
def try_remote_branch_update(branch_name):
deploy_item_msg = "'{0}'-branch to {1}".format(branch_name, deployed_rev)
log("On master branch: Try setting " + deploy_item_msg)
retcode = GitConfig.set_branch(suite.vc_dir, branch_name, deployed_rev)
if retcode:
log("Updating " + deploy_item_msg + " failed (probably more recent deployment)")
else:
log("Successfully updated " + deploy_item_msg)
try_remote_branch_update(binary_deployed_ref)
if platform_dependence:
log("Suite has platform_dependence: Update " + deploy_branch_name)
platform_dependent_branches = GitConfig.get_matching_branches('origin', platform_dependent_base + '*', vcdir=suite.vc_dir)
not_on_same_rev = [(branch_name, commit_id) for branch_name, commit_id in platform_dependent_branches.items() if commit_id != deployed_rev]
if len(not_on_same_rev):
log("Skip " + deploy_branch_name + " update! The following branches are not yet on " + deployed_rev + ":")
for branch_name, commit_id in not_on_same_rev:
log(" " + branch_name + " --> " + commit_id)
else:
try_remote_branch_update(deploy_branch_name)
def _maven_deploy_dists(dists, versionGetter, repo, settingsXml,
dryRun=False,
validateMetadata='none',
gpg=False,
keyid=None,
generateJavadoc=False,
deployMapFiles=False,
deployRepoMetadata=False):
if repo != maven_local_repository():
# Non-local deployment requires license checking
for dist in dists:
if not dist.theLicense:
abort('Distributions without license are not cleared for upload to {}: can not upload {}'.format(repo.name, dist.name))
for distLicense in dist.theLicense:
if distLicense not in repo.licenses:
abort('Distribution with {} license are not cleared for upload to {}: can not upload {}'.format(distLicense.name, repo.name, dist.name))
if deployRepoMetadata:
repo_metadata_xml = XMLDoc()
repo_metadata_xml.open('suite-revisions')
for s_ in suites():
if s_.vc:
commit_timestamp = s_.vc.parent_info(s_.vc_dir)['committer-ts']
repo_metadata_xml.element('suite', attributes={
"name": s_.name,
"revision": s_.vc.parent(s_.vc_dir),
"date": datetime.utcfromtimestamp(commit_timestamp).isoformat(),
"kind": s_.vc.kind
})
for d_ in dists:
for extra_data_tag, extra_data_attributes in d_.extra_suite_revisions_data():
repo_metadata_xml.element(extra_data_tag, attributes=extra_data_attributes)
repo_metadata_xml.close('suite-revisions')
repo_metadata_fd, repo_metadata_name = mkstemp(suffix='.xml', text=True)
repo_metadata = repo_metadata_xml.xml(indent=' ', newl='\n')
if _opts.very_verbose or (dryRun and _opts.verbose):
log(repo_metadata)
with os.fdopen(repo_metadata_fd, 'w') as f:
f.write(repo_metadata)
else:
repo_metadata_name = None
for dist in dists:
for platform in dist.platforms:
if dist.maven_artifact_id() != dist.maven_artifact_id(platform):
full_maven_name = "{}:{}".format(dist.maven_group_id(), dist.maven_artifact_id(platform))
if repo == maven_local_repository():
log("Installing dummy {}".format(full_maven_name))
# Allow installing local dummy platform dependend artifacts for other platforms
foreign_platform_dummy_tarball = tempfile.NamedTemporaryFile('w', suffix='.tar.gz', delete=False)
foreign_platform_dummy_tarball.close()
with Archiver(foreign_platform_dummy_tarball.name, kind='tgz') as arc:
arc.add_str("Dummy artifact {} for local maven install\n".format(full_maven_name), full_maven_name + ".README", None)
_deploy_binary_maven(dist.suite, dist.maven_artifact_id(platform), dist.maven_group_id(), foreign_platform_dummy_tarball.name, versionGetter(dist.suite), repo, settingsXml=settingsXml, extension=dist.remoteExtension(), dryRun=dryRun)
os.unlink(foreign_platform_dummy_tarball.name)
else:
logv("Skip deploying {}".format(full_maven_name))
else:
pomFile = _tmpPomFile(dist, versionGetter, validateMetadata)
if _opts.very_verbose or (dryRun and _opts.verbose):
with open(pomFile) as f:
log(f.read())
if dist.isJARDistribution():
javadocPath = None
if generateJavadoc:
projects = [p for p in dist.archived_deps() if p.isJavaProject()]
tmpDir = tempfile.mkdtemp(prefix='mx-javadoc')
javadocArgs = ['--base', tmpDir, '--unified', '--projects', ','.join((p.name for p in projects))]
if dist.javadocType == 'implementation':
javadocArgs += ['--implementation']
else:
assert dist.javadocType == 'api'
if dist.allowsJavadocWarnings:
javadocArgs += ['--allow-warnings']
javadoc(javadocArgs, includeDeps=False, mayBuild=False, quietForNoPackages=True)
tmpJavadocJar = tempfile.NamedTemporaryFile('w', suffix='.jar', delete=False)
tmpJavadocJar.close()
javadocPath = tmpJavadocJar.name
emptyJavadoc = True
with zipfile.ZipFile(javadocPath, 'w', compression=zipfile.ZIP_DEFLATED) as arc:
javadocDir = join(tmpDir, 'javadoc')
for (dirpath, _, filenames) in os.walk(javadocDir):
for filename in filenames:
emptyJavadoc = False
src = join(dirpath, filename)
dst = os.path.relpath(src, javadocDir)
arc.write(src, dst)
shutil.rmtree(tmpDir)
if emptyJavadoc:
if validateMetadata == 'full' and dist.suite.getMxCompatibility().validate_maven_javadoc():
raise abort("Missing javadoc for {}".format(dist.name))
javadocPath = None
warn('Javadoc for {0} was empty'.format(dist.name))
extraFiles = []
if deployMapFiles and dist.is_stripped():
extraFiles.append((dist.strip_mapping_file(), 'proguard', 'map'))
if repo_metadata_name:
extraFiles.append((repo_metadata_name, 'suite-revisions', 'xml'))
jar_to_deploy = dist.path
if isinstance(dist.maven, dict):
deployment_module_info = dist.maven.get('moduleInfo')
if deployment_module_info:
jdk = get_jdk(dist.maxJavaCompliance())
if jdk.javaCompliance <= '1.8':
warn('Distribution with "moduleInfo" sub-attribute of the "maven" attribute deployed with JAVA_HOME <= 8', context=dist)
else:
jmd = as_java_module(dist, jdk)
if not jmd.alternatives:
abort('"moduleInfo" sub-attribute of the "maven" attribute specified but distribution does not contain any "moduleInfo:*" attributes', context=dist)
alt_jmd = jmd.alternatives.get(deployment_module_info)
if not alt_jmd:
abort('"moduleInfo" sub-attribute of the "maven" attribute specifies non-existing "moduleInfo:{}" attribute'.format(deployment_module_info), context=dist)
jar_to_deploy = alt_jmd.jarpath
pushed_file = dist.prePush(jar_to_deploy)
pushed_src_file = dist.prePush(dist.sourcesPath)
_deploy_binary_maven(dist.suite, dist.maven_artifact_id(), dist.maven_group_id(), pushed_file, versionGetter(dist.suite), repo,
srcPath=pushed_src_file,
settingsXml=settingsXml,
extension=dist.remoteExtension(),
dryRun=dryRun,
pomFile=pomFile,
gpg=gpg, keyid=keyid,
javadocPath=javadocPath,
extraFiles=extraFiles)
if pushed_file != jar_to_deploy:
os.unlink(pushed_file)
if pushed_src_file != dist.sourcesPath:
os.unlink(pushed_src_file)
if javadocPath:
os.unlink(javadocPath)
elif dist.isTARDistribution() or dist.isZIPDistribution():
extraFiles = []
if repo_metadata_name:
extraFiles.append((repo_metadata_name, 'suite-revisions', 'xml'))
_deploy_binary_maven(dist.suite, dist.maven_artifact_id(), dist.maven_group_id(), dist.prePush(dist.path), versionGetter(dist.suite), repo,
settingsXml=settingsXml,
extension=dist.remoteExtension(),
dryRun=dryRun,
pomFile=pomFile,
gpg=gpg, keyid=keyid,
extraFiles=extraFiles)
else:
abort_or_warn('Unsupported distribution: ' + dist.name, dist.suite.getMxCompatibility().maven_deploy_unsupported_is_error())
os.unlink(pomFile)
if repo_metadata_name:
os.unlink(repo_metadata_name)
def _dist_matcher(dist, tags, all_distributions, only, skip, all_distribution_types):
maven = getattr(dist, 'maven', False)
if tags is not None:
maven_tag = 'default'
if isinstance(maven, dict) and 'tag' in maven:
maven_tag = maven['tag']
if maven_tag not in tags:
return False
if all_distributions:
return True
if not dist.isJARDistribution() and not all_distribution_types:
return False
if only is not None:
return any(fnmatch.fnmatch(dist.name, o) or fnmatch.fnmatch(dist.qualifiedName(), o) for o in only)
if skip is not None and any(fnmatch.fnmatch(dist.name, s) or fnmatch.fnmatch(dist.qualifiedName(), s) for s in skip):
return False
return getattr(dist, 'maven', False) and not dist.is_test_distribution()
def maven_deploy(args):
"""deploy jars for the primary suite to remote maven repository
All binaries must be built first using 'mx build'.
"""
parser = ArgumentParser(prog='mx maven-deploy')
parser.add_argument('-s', '--settings', action='store', help='Path to settings.mxl file used for Maven')
parser.add_argument('-n', '--dry-run', action='store_true', help='Dry run that only prints the action a normal run would perform without actually deploying anything')
parser.add_argument('--all-suites', action='store_true', help='Deploy suite and the distributions it depends on in other suites')
parser.add_argument('--only', action='store', help='Comma-separated list of globs of distributions to be deployed')
parser.add_argument('--skip', action='store', help='Comma-separated list of globs of distributions not to be deployed')
parser.add_argument('--skip-existing', action='store_true', help='Do not deploy distributions if already in repository')
parser.add_argument('--validate', help='Validate that maven metadata is complete enough for publication', default='compat', choices=['none', 'compat', 'full'])
parser.add_argument('--suppress-javadoc', action='store_true', help='Suppress javadoc generation and deployment')
parser.add_argument('--all-distribution-types', help='Include all distribution types. By default, only JAR distributions are included', action='store_true')
parser.add_argument('--all-distributions', help='Include all distributions, regardless of the maven flags.', action='store_true')
parser.add_argument('--version-string', action='store', help='Provide custom version string for deployment')
parser.add_argument('--licenses', help='Comma-separated list of licenses that are cleared for upload. Only used if no url is given. Otherwise licenses are looked up in suite.py', default='')
parser.add_argument('--gpg', action='store_true', help='Sign files with gpg before deploying')
parser.add_argument('--gpg-keyid', help='GPG keyid to use when signing files (implies --gpg)', default=None)
parser.add_argument('--tags', help='Comma-separated list of tags to match in the maven metadata of the distribution. When left unspecified, no filtering is done. The default tag is \'default\'', default=None)
parser.add_argument('--with-suite-revisions-metadata', help='Deploy suite revisions metadata file', action='store_true')
parser.add_argument('repository_id', metavar='repository-id', nargs='?', action='store', help='Repository ID used for Maven deploy')
parser.add_argument('url', metavar='repository-url', nargs='?', action='store', help='Repository URL used for Maven deploy, if no url is given, the repository-id is looked up in suite.py')
args = parser.parse_args(args)
if args.gpg_keyid and not args.gpg:
args.gpg = True
logv('Implicitly setting gpg to true since a keyid was specified')
_mvn.check()
def versionGetter(suite):
if args.version_string:
return args.version_string
return suite.release_version(snapshotSuffix='SNAPSHOT')
if args.all_suites:
_suites = suites()
else:
_suites = primary_or_specific_suites()
tags = args.tags.split(',') if args.tags is not None else None
only = args.only.split(',') if args.only is not None else None
skip = args.skip.split(',') if args.skip is not None else None
has_deployed_dist = False
for s in _suites:
dists = [d for d in s.dists if _dist_matcher(d, tags, args.all_distributions, only, skip, args.all_distribution_types)]
if args.url:
licenses = get_license(args.licenses.split(','))
repo = Repository(None, args.repository_id, args.url, args.url, licenses)
elif args.repository_id:
if not s.getMxCompatibility().supportsRepositories():
abort("Repositories are not supported in {}'s suite version".format(s.name))
repo = repository(args.repository_id)
else:
repo = maven_local_repository()
dists = _deploy_skip_existing(args, dists, versionGetter(s), repo)
if not dists and not args.all_suites:
warn("No distribution to deploy in " + s.name)
continue
for dist in dists:
if not dist.exists():
abort("'{0}' is not built, run 'mx build' first".format(dist.name))
generateJavadoc = None if args.suppress_javadoc else s.getMxCompatibility().mavenDeployJavadoc()
action = 'Installing' if repo == maven_local_repository() else 'Deploying'
log('{} {} distributions for version {}'.format(action, s.name, versionGetter(s)))
_maven_deploy_dists(dists, versionGetter, repo, args.settings,
dryRun=args.dry_run,
validateMetadata=args.validate,
gpg=args.gpg,
keyid=args.gpg_keyid,
generateJavadoc=generateJavadoc,
deployRepoMetadata=args.with_suite_revisions_metadata)
has_deployed_dist = True
if not has_deployed_dist:
abort("No distribution was deployed!")
def maven_url(args):
_artifact_url(args, 'mx maven-url', 'mx maven-deploy', lambda s: s.release_version('SNAPSHOT'))
def binary_url(args):
def snapshot_version(suite):
if suite.vc:
'{0}-SNAPSHOT'.format(suite.vc.parent(suite.vc_dir))
else:
abort('binary_url requires suite to be under a vcs repository')
_artifact_url(args, 'mx binary-url', 'mx deploy-binary', snapshot_version)
def _artifact_url(args, prog, deploy_prog, snapshot_version_fun):
parser = ArgumentParser(prog=prog)
parser.add_argument('repository_id', action='store', help='Repository name')
parser.add_argument('dist_name', action='store', help='Distribution name')
parser.add_argument('--no-sha1', action='store_false', dest='sha1', help='Do not display the URL of the .sha1 file')
args = parser.parse_args(args)
repo = repository(args.repository_id)
dist = distribution(args.dist_name)
group_id = dist.maven_group_id()
artifact_id = dist.maven_artifact_id()
snapshot_version = snapshot_version_fun(dist.suite)
extension = dist.remoteExtension()
maven_repo = MavenRepo(repo.get_url(snapshot_version))
snapshot = maven_repo.getSnapshot(group_id, artifact_id, snapshot_version)
if not snapshot:
url = maven_repo.getSnapshotUrl(group_id, artifact_id, snapshot_version)
abort('Version {} not found for {}:{} ({})\nNote that the binary must have been deployed with `{}`'.format(snapshot_version, group_id, artifact_id, url, deploy_prog))
build = snapshot.getCurrentSnapshotBuild()
try:
url, sha1_url = build.getSubArtifact(extension)
print(url)
if args.sha1:
print(sha1_url)
except MavenSnapshotArtifact.NonUniqueSubArtifactException:
abort('Multiple {}s found for {} in snapshot {} in repository {}'.format(extension, dist.remoteName(), build.version, maven_repo.repourl))
class MavenConfig:
def __init__(self):
self.has_maven = None
self.missing = 'no mvn executable found'
def check(self, abortOnError=True):
if self.has_maven is None:
try:
run_maven(['--version'], out=lambda e: None)
self.has_maven = True
except OSError:
self.has_maven = False
warn(self.missing)
if not self.has_maven:
if abortOnError:
abort(self.missing)
else:
warn(self.missing)
return self if self.has_maven else None
### ~~~~~~~~~~~~~ VC, SCM
class SCMMetadata(object):
def __init__(self, url, read, write):
self.url = url
self.read = read
self.write = write
_dynamic_imports = None
def get_dynamic_imports():
"""
:return: a list of tuples (suite_name, in_subdir)
:rtype: (str, bool)
"""
global _dynamic_imports
if _dynamic_imports is None:
dynamic_imports_from_env = get_env('DYNAMIC_IMPORTS')
dynamic_imports = dynamic_imports_from_env.split(',') if dynamic_imports_from_env else []
if _opts.dynamic_imports:
for opt in _opts.dynamic_imports:
dynamic_imports += opt.split(',')
else:
env_dynamic_imports = os.environ.get('DEFAULT_DYNAMIC_IMPORTS')
if env_dynamic_imports:
dynamic_imports += env_dynamic_imports.split(',')
_dynamic_imports = []
for dynamic_import in dynamic_imports:
idx = dynamic_import.find('/')
if idx < 0:
_dynamic_imports.append((dynamic_import, False))
else:
_dynamic_imports.append((dynamic_import[idx + 1:], True))
return _dynamic_imports
### ~~~~~~~~~~~~~ XML
class XMLElement(xml.dom.minidom.Element):
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(" %s=\"" % a_name)
xml.dom.minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if not self.ownerDocument.padTextNodeWithoutSiblings and len(self.childNodes) == 1 and isinstance(self.childNodes[0], xml.dom.minidom.Text):
# if the only child of an Element node is a Text node, then the
# text is printed without any indentation or new line padding
writer.write(">")
self.childNodes[0].writexml(writer)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write(">%s" % (newl))
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % (newl))
class XMLDoc(xml.dom.minidom.Document):
def __init__(self):
xml.dom.minidom.Document.__init__(self)
self.current = self
self.padTextNodeWithoutSiblings = False
def createElement(self, tagName):
# overwritten to create XMLElement
e = XMLElement(tagName)
e.ownerDocument = self
return e
def comment(self, txt):
self.current.appendChild(self.createComment(txt))
def open(self, tag, attributes=None, data=None):
if attributes is None:
attributes = {}
element = self.createElement(tag)
for key, value in attributes.items():
element.setAttribute(key, value)
self.current.appendChild(element)
self.current = element
if data is not None:
element.appendChild(self.createTextNode(data))
return self
def close(self, tag):
assert self.current != self
assert tag == self.current.tagName, str(tag) + ' != ' + self.current.tagName
self.current = self.current.parentNode
return self
def element(self, tag, attributes=None, data=None):
if attributes is None:
attributes = {}
return self.open(tag, attributes, data).close(tag)
def xml(self, indent='', newl='', escape=False, standalone=None):
assert self.current == self
result = _decode(self.toprettyxml(indent, newl, encoding="UTF-8"))
if not result.startswith('<?xml'):
# include xml tag if it's not already included
result = '<?xml version="1.0" encoding="UTF-8"?>\n' + result
if escape:
entities = {'"': """, "'": "'", '\n': ' '}
result = xml.sax.saxutils.escape(result, entities)
if standalone is not None:
result = result.replace('encoding="UTF-8"?>', 'encoding="UTF-8" standalone="' + str(standalone) + '"?>')
return result
mx_subst.results_substitutions.register_no_arg('os', get_os)
def get_opts():
"""
Gets the parsed command line options.
"""
assert _argParser.parsed is True
return _opts
### ~~~~~~~~~~~~~ Project
def projects_from_names(projectNames):
"""
Get the list of projects corresponding to projectNames; all projects if None
"""
if projectNames is None:
return projects()
else:
return [project(name) for name in projectNames]
def projects(opt_limit_to_suite=False, limit_to_primary=False):
"""
Get the list of all loaded projects limited by --suite option if opt_limit_to_suite == True and by primary suite if limit_to_primary == True
"""
sortedProjects = sorted((p for p in _projects.values() if not p.suite.internal))
if opt_limit_to_suite:
sortedProjects = _dependencies_opt_limit_to_suites(sortedProjects)
if limit_to_primary:
sortedProjects = _dependencies_limited_to_suites(sortedProjects, [primary_suite().name])
return sortedProjects
def projects_opt_limit_to_suites():
"""
Get the list of all loaded projects optionally limited by --suite option
"""
return projects(opt_limit_to_suite=True)
def _dependencies_limited_to_suites(deps, suites):
result = []
for d in deps:
s = d.suite
if s.name in suites:
result.append(d)
return result
def _dependencies_opt_limit_to_suites(deps):
if not _opts.specific_suites:
return deps
else:
return _dependencies_limited_to_suites(deps, _opts.specific_suites)
def annotation_processors():
"""
Gets the list of all projects that are part of an annotation processor.
"""
global _annotationProcessorProjects
if _annotationProcessorProjects is None:
aps = set()
for p in projects():
if p.isJavaProject():
for ap in p.annotation_processors():
if ap.isJARDistribution():
for d in ap.archived_deps():
if d.isProject():
aps.add(d)
_annotationProcessorProjects = list(aps)
return _annotationProcessorProjects
def get_license(names, fatalIfMissing=True, context=None):
def get_single_licence(name):
if isinstance(name, License):
return name
_, name = splitqualname(name)
l = _licenses.get(name)
if l is None and fatalIfMissing:
abort('license named ' + name + ' not found', context=context)
return l
if isinstance(names, str):
names = [names]
return [get_single_licence(name) for name in names]
def repository(name, fatalIfMissing=True, context=None):
""" :rtype: Repository"""
_, name = splitqualname(name)
r = _repositories.get(name)
if r is None and fatalIfMissing:
abort('repository named ' + name + ' not found among ' + str(list(_repositories.keys())), context=context)
return r
def splitqualname(name):
pname = name.partition(":")
if pname[0] != name:
return pname[0], pname[2]
else:
return None, name
def _patchTemplateString(s, args, context):
def _replaceVar(m):
groupName = m.group(1)
if not groupName in args:
abort("Unknown parameter {}".format(groupName), context=context)
return args[groupName]
return re.sub(r'<(.+?)>', _replaceVar, s)
### Distribution
def instantiatedDistributionName(name, args, context):
return _patchTemplateString(name, args, context).upper()
def reInstantiateDistribution(templateName, oldArgs, newArgs):
_, name = splitqualname(templateName)
context = "Template distribution " + name
t = _distTemplates.get(name)
if t is None:
abort('Distribution template named ' + name + ' not found', context=context)
oldName = instantiatedDistributionName(t.name, oldArgs, context)
oldDist = t.suite._unload_unregister_distribution(oldName)
newDist = instantiateDistribution(templateName, newArgs)
newDist.update_listeners.update(oldDist.update_listeners)
def instantiateDistribution(templateName, args, fatalIfMissing=True, context=None):
_, name = splitqualname(templateName)
if not context:
context = "Template distribution " + name
t = _distTemplates.get(name)
if t is None and fatalIfMissing:
abort('Distribution template named ' + name + ' not found', context=context)
missingParams = [p for p in t.parameters if p not in args]
if missingParams:
abort('Missing parameters while instantiating distribution template ' + t.name + ': ' + ', '.join(missingParams), context=t)
def _patch(v):
if isinstance(v, str):
return _patchTemplateString(v, args, context)
elif isinstance(v, dict):
return {kk: _patch(vv) for kk, vv in v.items()}
elif isinstance(v, list):
return [_patch(e) for e in v]
else:
return v
d = t.suite._load_distribution(instantiatedDistributionName(t.name, args, context), _patch(t.attrs))
if d is None and fatalIfMissing:
abort('distribution template ' + t.name + ' could not be instantiated with ' + str(args), context=t)
t.suite._register_distribution(d)
d.resolveDeps()
d.post_init()
return d
def _get_reasons_dep_was_removed(name, indent):
"""
Gets the causality chain for the dependency named `name` being removed.
Returns None if no dependency named `name` was removed.
"""
reason = _removedDeps.get(name)
if reason:
if isinstance(reason, tuple):
primary, secondary = reason
else:
primary = reason
secondary = []
causes = []
r = _get_reasons_dep_was_removed(primary, indent + 1)
if r:
causes.append('{}{} was removed because {} was removed:'.format(' ' * indent, name, primary))
causes.extend(r)
else:
causes.append((' ' * indent) + primary + (':' if secondary else ''))
for s in secondary:
r = _get_reasons_dep_was_removed(s, indent + 1)
if r:
causes.extend(r)
else:
causes.append((' ' * indent) + s)
return causes
return None
def _missing_dep_message(depName, depType):
reasons = _get_reasons_dep_was_removed(depName, 1)
if reasons:
return '{} named {} was removed:\n{}'.format(depType, depName, '\n'.join(reasons))
return '{} named {} was not found'.format(depType, depName)
def distribution(name, fatalIfMissing=True, context=None):
"""
Get the distribution for a given name. This will abort if the named distribution does
not exist and 'fatalIfMissing' is true.
:rtype: Distribution
"""
_, name = splitqualname(name)
d = _dists.get(name)
if d is None and fatalIfMissing:
abort(_missing_dep_message(name, 'distribution'), context=context)
return d
def dependency(name, fatalIfMissing=True, context=None):
"""
Get the project, library or dependency for a given name. This will abort if the dependency
not exist for 'name' and 'fatalIfMissing' is true.
"""
if isinstance(name, Dependency):
return name
suite_name, name = splitqualname(name)
if suite_name:
# reference to a distribution or library from a suite
referencedSuite = suite(suite_name, context=context)
if referencedSuite:
d = referencedSuite.dependency(name, fatalIfMissing=False, context=context)
if d:
return d
else:
if fatalIfMissing:
abort('cannot resolve ' + name + ' as a dependency defined by ' + suite_name, context=context)
return None
d = _projects.get(name)
if d is None:
d = _libs.get(name)
if d is None:
d = _jreLibs.get(name)
if d is None:
d = _jdkLibs.get(name)
if d is None:
d = _dists.get(name)
if d is None and fatalIfMissing:
if hasattr(_opts, 'ignored_projects') and name in _opts.ignored_projects:
abort('dependency named ' + name + ' is ignored', context=context)
abort(_missing_dep_message(name, 'dependency'), context=context)
return d
def project(name, fatalIfMissing=True, context=None):
"""
Get the project for a given name. This will abort if the named project does
not exist and 'fatalIfMissing' is true.
:rtype: Project
"""
_, name = splitqualname(name)
p = _projects.get(name)
if p is None and fatalIfMissing:
if name in _opts.ignored_projects:
abort('project named ' + name + ' is ignored', context=context)
abort(_missing_dep_message(name, 'project'), context=context)
return p
def library(name, fatalIfMissing=True, context=None):
"""
Gets the library for a given name. This will abort if the named library does
not exist and 'fatalIfMissing' is true.
As a convenience, if 'fatalIfMissing' is False, optional libraries that are not
available are not returned ('None' is returned instead).
:rtype: BaseLibrary
"""
l = _libs.get(name) or _jreLibs.get(name) or _jdkLibs.get(name)
if l is None and fatalIfMissing:
if _projects.get(name):
abort(name + ' is a project, not a library', context=context)
raise abort(_missing_dep_message(name, 'library'), context=context)
if not fatalIfMissing and l and l.optional and not l.is_available():
return None
return l
def classpath_entries(names=None, includeSelf=True, preferProjects=False, excludes=None):
"""
Gets the transitive set of dependencies that need to be on the class path
given the root set of projects and distributions in `names`.
:param names: a Dependency, str or list containing Dependency/str objects
:type names: list or Dependency or str
:param bool includeSelf: whether to include any of the dependencies in `names` in the returned list
:param bool preferProjects: for a JARDistribution dependency, specifies whether to include
it in the returned list (False) or to instead put its constituent dependencies on the
the return list (True)
:return: a list of Dependency objects representing the transitive set of dependencies that should
be on the class path for something depending on `names`
:rtype: list[ClasspathDependency]
"""
if names is None:
roots = set(dependencies())
else:
if isinstance(names, str):
names = [names]
elif isinstance(names, Dependency):
names = [names]
roots = [dependency(n) for n in names]
invalid = [d for d in roots if not isinstance(d, ClasspathDependency)]
if invalid:
abort('class path roots must be classpath dependencies: ' + str(invalid))
if not roots:
return []
if excludes is None:
excludes = []
else:
if isinstance(excludes, str):
excludes = [excludes]
elif isinstance(excludes, Dependency):
excludes = [excludes]
excludes = [dependency(n) for n in excludes]
assert len(set(roots) & set(excludes)) == 0
cpEntries = []
def _preVisit(dst, edge):
if not isinstance(dst, ClasspathDependency):
return False
if dst in excludes:
return False
if edge and edge.src.isLayoutJARDistribution():
return False
if dst in roots:
return True
if edge and edge.src.isJARDistribution() and edge.kind == DEP_STANDARD:
if isinstance(edge.src.suite, BinarySuite) or not preferProjects:
return dst.isJARDistribution()
else:
return dst.isProject()
return True
def _visit(dep, edge):
if preferProjects and dep.isJARDistribution() and not isinstance(dep.suite, BinarySuite):
return
if not includeSelf and dep in roots:
return
cpEntries.append(dep)
walk_deps(roots=roots, visit=_visit, preVisit=_preVisit, ignoredEdges=[DEP_ANNOTATION_PROCESSOR, DEP_BUILD])
return cpEntries
def _entries_to_classpath(cpEntries, resolve=True, includeBootClasspath=False, jdk=None, unique=False, ignoreStripped=False, cp_prefix=None, cp_suffix=None):
cp = []
jdk = jdk or get_jdk()
bcp_str = jdk.bootclasspath()
bcp = bcp_str.split(os.pathsep) if bcp_str else []
def _filecmp(a, b):
if not exists(a) or not exists(b):
return a == b
return filecmp.cmp(a, b)
def _appendUnique(cp_addition):
for new_path in cp_addition.split(os.pathsep):
if (not unique or not any((_filecmp(d, new_path) for d in cp))) \
and (includeBootClasspath or not any((_filecmp(d, new_path) for d in bcp))):
cp.append(new_path)
if includeBootClasspath:
if bcp_str:
_appendUnique(bcp_str)
if _opts.cp_prefix is not None:
_appendUnique(_opts.cp_prefix)
if cp_prefix is not None:
_appendUnique(cp_prefix)
for dep in cpEntries:
if dep.isJdkLibrary() or dep.isJreLibrary():
cp_repr = dep.classpath_repr(jdk, resolve=resolve)
elif dep.isJARDistribution() and ignoreStripped:
cp_repr = dep.original_path()
else:
cp_repr = dep.classpath_repr(resolve)
if cp_repr:
_appendUnique(cp_repr)
if cp_suffix is not None:
_appendUnique(cp_suffix)
if _opts.cp_suffix is not None:
_appendUnique(_opts.cp_suffix)
return os.pathsep.join(cp)
def classpath(names=None, resolve=True, includeSelf=True, includeBootClasspath=False, preferProjects=False, jdk=None, unique=False, ignoreStripped=False):
"""
Get the class path for a list of named projects and distributions, resolving each entry in the
path (e.g. downloading a missing library) if 'resolve' is true. If 'names' is None,
then all registered dependencies are used.
"""
cpEntries = classpath_entries(names=names, includeSelf=includeSelf, preferProjects=preferProjects)
return _entries_to_classpath(cpEntries=cpEntries, resolve=resolve, includeBootClasspath=includeBootClasspath, jdk=jdk, unique=unique, ignoreStripped=ignoreStripped)
def get_runtime_jvm_args(names=None, cp_prefix=None, cp_suffix=None, jdk=None, exclude_names=None):
"""
Get the VM arguments (e.g. classpath and system properties) for a list of named projects and
distributions. If 'names' is None, then all registered dependencies are used. 'exclude_names'
can be used to transitively exclude dependencies from the final classpath result.
"""
cpEntries = classpath_entries(names=names)
if exclude_names:
for excludeEntry in classpath_entries(names=exclude_names):
if excludeEntry in cpEntries:
cpEntries.remove(excludeEntry)
ret = ["-cp", _separatedCygpathU2W(_entries_to_classpath(cpEntries, cp_prefix=cp_prefix, cp_suffix=cp_suffix, jdk=jdk))]
def add_props(d):
if hasattr(d, "getJavaProperties"):
for key, value in sorted(d.getJavaProperties().items()):
ret.append("-D" + key + "=" + value)
for dep in cpEntries:
add_props(dep)
# also look through the individual projects inside all distributions on the classpath
if dep.isDistribution():
for project in dep.archived_deps():
add_props(project)
return ret
def classpath_walk(names=None, resolve=True, includeSelf=True, includeBootClasspath=False, jdk=None):
"""
Walks the resources available in a given classpath, yielding a tuple for each resource
where the first member of the tuple is a directory path or ZipFile object for a
classpath entry and the second member is the qualified path of the resource relative
to the classpath entry.
"""
cp = classpath(names, resolve, includeSelf, includeBootClasspath, jdk=jdk)
for entry in cp.split(os.pathsep):
if not exists(entry):
continue
if isdir(entry):
for root, dirs, files in os.walk(entry):
for d in dirs:
entryPath = join(root[len(entry) + 1:], d)
yield entry, entryPath
for f in files:
entryPath = join(root[len(entry) + 1:], f)
yield entry, entryPath
elif entry.endswith('.jar') or entry.endswith('.zip'):
with zipfile.ZipFile(entry, 'r') as zf:
for zi in zf.infolist():
entryPath = zi.filename
yield zf, entryPath
def read_annotation_processors(path):
r"""
Reads the META-INF/services/javax.annotation.processing.Processor file based
in the directory or zip file located at 'path'. Returns the list of lines
in the file or None if the file does not exist at 'path'.
From http://docs.oracle.com/javase/8/docs/api/java/util/ServiceLoader.html:
A service provider is identified by placing a provider-configuration file in
the resource directory META-INF/services. The file's name is the fully-qualified
binary name of the service's type. The file contains a list of fully-qualified
binary names of concrete provider classes, one per line. Space and tab
characters surrounding each name, as well as blank lines, are ignored.
The comment character is '#' ('\u0023', NUMBER SIGN); on each line all characters
following the first comment character are ignored. The file must be encoded in UTF-8.
"""
def parse(fp):
lines = []
for line in fp:
line = line.split('#')[0].strip()
if line:
lines.append(line)
return lines
if exists(path):
name = 'META-INF/services/javax.annotation.processing.Processor'
if isdir(path):
configFile = join(path, name.replace('/', os.sep))
if exists(configFile):
with open(configFile) as fp:
return parse(fp)
else:
assert path.endswith('.jar') or path.endswith('.zip'), path
with zipfile.ZipFile(path, 'r') as zf:
if name in zf.namelist():
with zf.open(name) as fp:
return parse(fp)
return None
def dependencies(opt_limit_to_suite=False):
"""
Gets an iterable over all the registered dependencies. If changes are made to the registered
dependencies during iteration, the behavior of the iterator is undefined. If 'types' is not
None, only dependencies of a type in 'types
"""
it = itertools.chain(_projects.values(), _libs.values(), _dists.values(), _jdkLibs.values(), _jreLibs.values())
if opt_limit_to_suite and _opts.specific_suites:
it = filter(lambda d: d.suite.name in _opts.specific_suites, it)
return it
def defaultDependencies(opt_limit_to_suite=False):
"""
Returns a tuple of removed non-default dependencies (i.e., attribute `defaultBuild=False`) and default dependencies.
"""
deps = []
removedDeps = []
for d in dependencies(opt_limit_to_suite):
if hasattr(d, "defaultBuild"):
if d.defaultBuild is False:
removedDeps.append(d)
elif d.defaultBuild is True:
deps.append(d)
else:
abort('Unsupported value "{}" {} for entry {}. The only supported values are boolean True or False.'.format(d.defaultBuild, type(d.defaultBuild), d.name))
else:
deps.append(d)
return removedDeps, deps
def walk_deps(roots=None, preVisit=None, visit=None, ignoredEdges=None, visitEdge=None):
"""
Walks a spanning tree of the dependency graph. The first time a dependency `dep` is seen, if the
`preVisit` function is None or returns a true condition, then the unvisited dependencies of `dep` are
walked. Once all the dependencies of `dep` have been visited and `visit` is not None,
it is applied with the same arguments as for `preVisit` and the return value is ignored.
Note that `visit` is not called if `preVisit` returns a false condition.
:param roots: from which to start traversing. If None, then `dependencies()` is used
:param preVisit: None or a function called the first time a `Dependency` in the graph is seen.
The arguments passed to this function are the `Dependency` being pre-visited and
a `DepEdge` object representing the last edge in the path of dependencies walked
to arrive the `Dependency`.
:param visit: None or a function with same signature as `preVisit`.
:param ignoredEdges: an iterable of values from `DEP_KINDS` specifying edge types to be ignored in the traversal.
If None, then `[DEP_ANNOTATION_PROCESSOR, DEP_EXCLUDED, DEP_BUILD]` will be used.
:param visitEdge: None or a function called for every out edge of a node in the traversed graph.
The arguments passed to this function are the source `Dependency` of the edge, the destination
`Dependency` and a `DepEdge` value for the edge that can also be used to trace the path from
a traversal root to the edge.
"""
visited = set()
for dep in dependencies() if not roots else roots:
dep.walk_deps(preVisit, visit, visited, ignoredEdges, visitEdge)
def sorted_dists():
"""
Gets distributions sorted such that each distribution comes after
any distributions it depends upon.
"""
dists = []
def add_dist(dist):
if not dist in dists:
for dep in dist.deps:
if dep.isDistribution():
add_dist(dep)
if not dist in dists:
dists.append(dist)
for d in _dists.values():
add_dist(d)
return dists
def distributions(opt_limit_to_suite=False):
sorted_dists = sorted((d for d in _dists.values() if not d.suite.internal))
if opt_limit_to_suite:
sorted_dists = _dependencies_opt_limit_to_suites(sorted_dists)
return sorted_dists
#: The HotSpot options that have an argument following them on the command line
_VM_OPTS_SPACE_SEPARATED_ARG = ['-mp', '-modulepath', '-limitmods', '-addmods', '-upgrademodulepath', '-m',
'--module-path', '--limit-modules', '--add-modules', '--upgrade-module-path',
'--module', '--module-source-path', '--add-exports', '--add-reads',
'--patch-module', '--boot-class-path', '--source-path']
def extract_VM_args(args, useDoubleDash=False, allowClasspath=False, defaultAllVMArgs=True):
"""
Partitions `args` into a leading sequence of HotSpot VM options and the rest. If
`useDoubleDash` then `args` is partitioned by the first instance of "--". If
not `allowClasspath` then mx aborts if "-cp" or "-classpath" is in `args`.
"""
for i in range(len(args)):
if useDoubleDash:
if args[i] == '--':
vmArgs = args[:i]
remainder = args[i + 1:]
return vmArgs, remainder
else:
if not args[i].startswith('-'):
if i != 0 and (args[i - 1] == '-cp' or args[i - 1] == '-classpath'):
if not allowClasspath:
abort('Cannot supply explicit class path option')
else:
continue
if i != 0 and (args[i - 1] in _VM_OPTS_SPACE_SEPARATED_ARG):
continue
vmArgs = args[:i]
remainder = args[i:]
return vmArgs, remainder
if defaultAllVMArgs:
return args, []
else:
return [], args
def _format_commands():
msg = '\navailable commands:\n'
commands = _mx_commands.commands()
sorted_commands = sorted([k for k in commands.keys() if ':' not in k]) + sorted([k for k in commands.keys() if ':' in k])
msg += _mx_commands.list_commands(sorted_commands)
return msg + '\n'
### ~~~~~~~~~~~~~ JDK
"""
A factory for creating JDKConfig objects.
"""
class JDKFactory:
def getJDKConfig(self):
nyi('getJDKConfig', self)
def description(self):
nyi('description', self)
### ~~~~~~~~~~~~~ Debugging
def is_debug_lib_file(fn):
return fn.endswith(add_debug_lib_suffix(""))
class DisableJavaDebugging(object):
""" Utility for temporarily disabling java remote debugging.
Should be used in conjunction with the ``with`` keywords, e.g.
```
with DisableJavaDebugging():
# call to JDKConfig.run_java
```
"""
_disabled = False
def __enter__(self):
self.old = DisableJavaDebugging._disabled
DisableJavaDebugging._disabled = True
def __exit__(self, t, value, traceback):
DisableJavaDebugging._disabled = self.old
class DisableJavaDebuggging(DisableJavaDebugging):
def __init__(self, *args, **kwargs):
super(DisableJavaDebuggging, self).__init__(*args, **kwargs)
if primary_suite().getMxCompatibility().excludeDisableJavaDebuggging():
abort('Class DisableJavaDebuggging is deleted in version 5.68.0 as it is misspelled.')
def is_debug_disabled():
return DisableJavaDebugging._disabled
### JDK
def addJDKFactory(tag, compliance, factory):
assert tag != DEFAULT_JDK_TAG
complianceMap = _jdkFactories.setdefault(tag, {})
complianceMap[compliance] = factory
def _getJDKFactory(tag, versionCheck):
if tag not in _jdkFactories:
return None
complianceMap = _jdkFactories[tag]
for compliance in sorted(complianceMap.keys(), reverse=True):
if not versionCheck or versionCheck(VersionSpec(str(compliance))):
return complianceMap[compliance]
return None
"""
A namedtuple for the result of get_jdk_option().
"""
TagCompliance = namedtuple('TagCompliance', ['tag', 'compliance'])
_jdk_option = None
def get_jdk_option():
"""
Gets the tag and compliance (as a TagCompliance object) derived from the --jdk option.
If the --jdk option was not specified, both fields of the returned tuple are None.
"""
global _jdk_option
if _jdk_option is None:
option = _opts.jdk
if not option:
option = os.environ.get('DEFAULT_JDK')
if not option:
jdktag = None
jdkCompliance = None
else:
tag_compliance = option.split(':')
if len(tag_compliance) == 1:
if len(tag_compliance[0]) > 0:
if tag_compliance[0][0].isdigit():
jdktag = None
jdkCompliance = JavaCompliance(tag_compliance[0])
else:
jdktag = tag_compliance[0]
jdkCompliance = None
else:
jdktag = None
jdkCompliance = None
else:
if len(tag_compliance) != 2 or not tag_compliance[0] or not tag_compliance[1]:
abort('Could not parse --jdk argument \'{}\' (should be of the form "[tag:]compliance")'.format(option))
jdktag = tag_compliance[0]
try:
jdkCompliance = JavaCompliance(tag_compliance[1])
except AssertionError as e:
raise abort('Could not parse --jdk argument \'{}\' (should be of the form "[tag:]compliance")\n{}'.format(option, e))
if jdktag and jdktag != DEFAULT_JDK_TAG:
factory = _getJDKFactory(jdktag, jdkCompliance._exact_match if jdkCompliance else None)
if not factory:
if len(_jdkFactories) == 0:
abort("No JDK providers available")
available = []
for t, m in _jdkFactories.items():
for c in m:
available.append('{}:{}'.format(t, c))
abort("No provider for '{}:{}' JDK (available: {})".format(jdktag, jdkCompliance if jdkCompliance else '*', ', '.join(available)))
_jdk_option = TagCompliance(jdktag, jdkCompliance)
return _jdk_option
DEFAULT_JDK_TAG = 'default'
_jdks_cache = {}
_canceled_jdk_requests = set()
def get_jdk(versionCheck=None, purpose=None, cancel=None, versionDescription=None, tag=None, **kwargs):
"""
Get a JDKConfig object matching the provided criteria.
The JDK is selected by consulting the --jdk option, the --java-home option,
the JAVA_HOME environment variable, the --extra-java-homes option and the
EXTRA_JAVA_HOMES environment variable in that order.
"""
cache_key = (versionCheck, tag)
if cache_key in _jdks_cache:
return _jdks_cache.get(cache_key)
# Precedence for JDK to use:
# 1. --jdk option value
# 2. JDK specified by set_java_command_default_jdk_tag
# 3. JDK selected by DEFAULT_JDK_TAG tag
default_query = versionCheck is None and tag is None
if tag is None:
jdkOpt = get_jdk_option()
if versionCheck is None and jdkOpt.compliance:
versionCheck, versionDescription = jdkOpt.compliance.as_version_check()
tag = jdkOpt.tag if jdkOpt.tag else DEFAULT_JDK_TAG
defaultJdk = default_query and not purpose
# Backwards compatibility support
if kwargs:
assert len(kwargs) == 1 and 'defaultJdk' in kwargs, 'unsupported arguments: ' + str(kwargs)
defaultJdk = kwargs['defaultJdk']
# interpret string and compliance as compliance check
if isinstance(versionCheck, str):
versionCheck = JavaCompliance(versionCheck)
if isinstance(versionCheck, JavaCompliance):
versionCheck, versionDescription = versionCheck.as_version_check()
if tag != DEFAULT_JDK_TAG:
factory = _getJDKFactory(tag, versionCheck)
if factory:
jdk = factory.getJDKConfig()
if jdk.tag is not None:
assert jdk.tag == tag
else:
jdk.tag = tag
else:
jdk = None
if jdk is not None or default_query:
_jdks_cache[cache_key] = jdk
return jdk
global _default_java_home, _extra_java_homes
if cancel and (versionDescription, purpose) in _canceled_jdk_requests:
return None
def abort_not_found():
msg = 'Could not find a JDK'
if versionDescription:
msg += ' ' + versionDescription
if purpose:
msg += ' for ' + purpose
import select_jdk
available = _filtered_jdk_configs(select_jdk.find_system_jdks(), versionCheck)
if available:
msg += '\nThe following JDKs are available:\n ' + '\n '.join(sorted([jdk.home for jdk in available]))
msg += '\nSpecify one with the --java-home or --extra-java-homes option or with the JAVA_HOME or EXTRA_JAVA_HOMES environment variable.'
p = _findPrimarySuiteMxDir()
if p:
msg += '\nOr run `{}/select_jdk.py -p {}` to set and persist these variables in {}.'.format(dirname(__file__), dirname(p), join(p, 'env'))
else:
msg += '\nOr run `{}/select_jdk.py` to set these variables.'.format(dirname(__file__))
abort(msg)
if defaultJdk:
if not _default_java_home:
_default_java_home = _find_jdk(versionCheck=versionCheck, versionDescription=versionDescription)
if not _default_java_home:
if not cancel:
abort_not_found()
assert versionDescription or purpose
_canceled_jdk_requests.add((versionDescription, purpose))
_jdks_cache[cache_key] = _default_java_home
return _default_java_home
existing_java_homes = _extra_java_homes
if _default_java_home:
existing_java_homes.append(_default_java_home)
for jdk in existing_java_homes:
if not versionCheck or versionCheck(jdk.version):
_jdks_cache[cache_key] = jdk
return jdk
jdk = _find_jdk(versionCheck=versionCheck, versionDescription=versionDescription)
if jdk:
assert jdk not in _extra_java_homes
_extra_java_homes = _sorted_unique_jdk_configs(_extra_java_homes + [jdk])
elif not cancel:
abort_not_found()
else:
assert versionDescription or purpose
_canceled_jdk_requests.add((versionDescription, purpose))
_jdks_cache[cache_key] = jdk
return jdk
_warned_about_ignoring_extra_jdks = False
def _find_jdk(versionCheck=None, versionDescription=None):
"""
Selects a JDK and returns a JDKConfig object representing it.
The selection is attempted from the --java-home option, the JAVA_HOME
environment variable, the --extra-java-homes option and the EXTRA_JAVA_HOMES
environment variable in that order.
:param versionCheck: a predicate to be applied when making the selection
:param versionDescription: a description of `versionPredicate` (e.g. ">= 1.8 and < 1.8.0u20 or >= 1.8.0u40")
:return: the JDK selected or None
"""
assert (versionDescription and versionCheck) or (not versionDescription and not versionCheck)
if not versionCheck:
versionCheck = lambda _: True
candidateJdks = []
source = ''
if _opts and _opts.java_home:
candidateJdks.append(_opts.java_home)
source = '--java-home'
elif os.environ.get('JAVA_HOME'):
candidateJdks.append(os.environ.get('JAVA_HOME'))
source = 'JAVA_HOME'
if candidateJdks:
result = _filtered_jdk_configs(candidateJdks, versionCheck, missingIsError=True, source=source)
if result:
if source == '--java-home' and os.environ.get('JAVA_HOME'):
os.environ['JAVA_HOME'] = _opts.java_home
return result[0]
javaHomeCandidateJdks = candidateJdks
candidateJdks = []
if _opts.extra_java_homes:
candidateJdks += _opts.extra_java_homes.split(os.pathsep)
source = '--extra-java-homes'
elif os.environ.get('EXTRA_JAVA_HOMES'):
candidateJdks += os.environ.get('EXTRA_JAVA_HOMES').split(os.pathsep)
source = 'EXTRA_JAVA_HOMES'
if candidateJdks:
if _use_exploded_build():
# Warning about using more than
global _warned_about_ignoring_extra_jdks
if not _warned_about_ignoring_extra_jdks:
if javaHomeCandidateJdks != candidateJdks:
warn('Ignoring JDKs specified by {} since MX_BUILD_EXPLODED=true'.format(source))
_warned_about_ignoring_extra_jdks = True
else:
result = _filtered_jdk_configs(candidateJdks, versionCheck, missingIsError=False, source=source)
if result:
return result[0]
return None
_all_jdks = None
def _get_all_jdks():
global _all_jdks
if _all_jdks is None:
if _opts and _opts.java_home:
jdks = _filtered_jdk_configs([_opts.java_home], versionCheck=None, missingIsError=True, source='--java-home')
elif os.environ.get('JAVA_HOME'):
jdks = _filtered_jdk_configs([os.environ.get('JAVA_HOME')], versionCheck=None, missingIsError=True, source='JAVA_HOME')
else:
jdks = []
if _opts.extra_java_homes:
jdks.extend(_filtered_jdk_configs(_opts.extra_java_homes.split(os.pathsep), versionCheck=None, missingIsError=False, source='--extra-java-homes'))
elif os.environ.get('EXTRA_JAVA_HOMES'):
jdks.extend(_filtered_jdk_configs(os.environ.get('EXTRA_JAVA_HOMES').split(os.pathsep), versionCheck=None, missingIsError=False, source='EXTRA_JAVA_HOMES'))
_all_jdks = jdks
return _all_jdks
def _sorted_unique_jdk_configs(configs):
path_seen = set()
unique_configs = [c for c in configs if c.home not in path_seen and not path_seen.add(c.home)]
def _compare_configs(c1, c2):
if c1 == _default_java_home:
if c2 != _default_java_home:
return 1
elif c2 == _default_java_home:
return -1
if c1 in _extra_java_homes:
if c2 not in _extra_java_homes:
return 1
elif c2 in _extra_java_homes:
return -1
return VersionSpec.__cmp__(c1.version, c2.version)
return sorted(unique_configs, key=cmp_to_key(_compare_configs), reverse=True)
def is_interactive():
if is_continuous_integration():
return False
return not sys.stdin.closed and sys.stdin.isatty()
_probed_JDKs = {}
def is_quiet():
return _opts.quiet
def _probe_JDK(home):
res = _probed_JDKs.get(home)
if not res:
try:
res = JDKConfig(home)
except JDKConfigException as e:
res = e
_probed_JDKs[home] = res
return res
def _filtered_jdk_configs(candidates, versionCheck, missingIsError=False, source=None):
filtered = []
for candidate in candidates:
jdk = _probe_JDK(candidate)
if isinstance(jdk, JDKConfigException):
if source:
message = 'Path in ' + source + ' is not pointing to a JDK (' + str(jdk) + '): ' + candidate
if is_darwin():
candidate = join(candidate, 'Contents', 'Home')
if not isinstance(_probe_JDK(candidate), JDKConfigException):
message += '. Set ' + source + ' to ' + candidate + ' instead.'
if missingIsError:
abort(message)
else:
warn(message)
else:
if not versionCheck or versionCheck(jdk.version):
filtered.append(jdk)
return filtered
def find_classpath_arg(vmArgs):
"""
Searches for the last class path argument in `vmArgs` and returns its
index and value as a tuple. If no class path argument is found, then
the tuple (None, None) is returned.
"""
# If the last argument is '-cp' or '-classpath' then it is not
# valid since the value is missing. As such, we ignore the
# last argument.
for index in reversed(range(len(vmArgs) - 1)):
if vmArgs[index] in ['-cp', '-classpath']:
return index + 1, vmArgs[index + 1]
return None, None
_java_command_default_jdk_tag = None
def set_java_command_default_jdk_tag(tag):
global _java_command_default_jdk_tag
assert _java_command_default_jdk_tag is None, 'TODO: need policy for multiple attempts to set the default JDK for the "java" command'
_java_command_default_jdk_tag = tag
### Java command
def java_command(args):
"""run the java executable in the selected JDK
The JDK is selected by consulting the --jdk option, the --java-home option,
the JAVA_HOME environment variable, the --extra-java-homes option and the
EXTRA_JAVA_HOMES environment variable in that order.
"""
run_java(args)
def run_java(args, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None, addDefaultArgs=True, jdk=None):
"""
Runs a Java program by executing the java executable in a JDK.
"""
if jdk is None:
jdk = get_jdk()
return jdk.run_java(args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout, env=env, addDefaultArgs=addDefaultArgs)
def run_java_min_heap(args, benchName='# MinHeap:', overheadFactor=1.5, minHeap=0, maxHeap=2048, repetitions=1, out=None, err=None, cwd=None, timeout=None, env=None, addDefaultArgs=True, jdk=None, run_with_heap=None):
"""computes the minimum heap size required to run a Java program within a certain overhead factor"""
assert minHeap <= maxHeap
def _run_with_heap(heap, args, timeout, suppressStderr=True, nonZeroIsFatal=False):
log('Trying with %sMB of heap...' % heap)
with open(os.devnull, 'w') as fnull:
vmArgs, pArgs = extract_VM_args(args=args, useDoubleDash=False, allowClasspath=True, defaultAllVMArgs=True)
exitCode = run_java(vmArgs + ['-Xmx%dM' % heap] + pArgs, nonZeroIsFatal=nonZeroIsFatal, out=out, err=fnull if suppressStderr else err, cwd=cwd, timeout=timeout, env=env, addDefaultArgs=addDefaultArgs, jdk=jdk)
if exitCode:
log('failed')
else:
log('succeeded')
return exitCode
run_with_heap = run_with_heap or _run_with_heap
if overheadFactor > 0:
t = time.time()
if run_with_heap(maxHeap, args, timeout, suppressStderr=False):
log('The command line is wrong, there is a bug in the program, or the reference heap (%sMB) is too low.' % maxHeap)
return 1
referenceTime = round(time.time() - t, 2)
maxTime = round(referenceTime * overheadFactor, 2)
log('Reference time = ' + str(referenceTime))
log('Maximum time = ' + str(maxTime))
else:
maxTime = None
currMin = minHeap
currMax = maxHeap
lastSuccess = None
while currMax >= currMin:
logv('Min = %s; Max = %s' % (currMin, currMax))
avg = int((currMax + currMin) / 2)
successful = 0
while successful < repetitions:
if run_with_heap(avg, args, maxTime):
break
successful += 1
if successful == repetitions:
lastSuccess = avg
currMax = avg - 1
else:
currMin = avg + 1
# We cannot bisect further. The last successful attempt is the result.
_log = out if out is not None else log
_log('%s %s' % (benchName, lastSuccess))
return 0 if lastSuccess is not None else 2
def _kill_process(pid, sig):
"""
Sends the signal `sig` to the process identified by `pid`. If `pid` is a process group
leader, then signal is sent to the process group id.
"""
try:
logvv('[{} sending {} to {}]'.format(os.getpid(), sig, pid))
pgid = os.getpgid(pid)
if pgid == pid:
os.killpg(pgid, sig)
else:
os.kill(pid, sig)
return True
except Exception as e: # pylint: disable=broad-except
log('Error killing subprocess ' + str(pid) + ': ' + str(e))
return False
def _waitWithTimeout(process, cmd_line, timeout, nonZeroIsFatal=True):
def _waitpid(pid):
while True:
try:
return os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno == errno.EINTR:
continue
raise
def _returncode(status):
if os.WIFSIGNALED(status):
return -os.WTERMSIG(status)
elif os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
end = time.time() + timeout
delay = 0.0005
while True:
(pid, status) = _waitpid(process.pid)
if pid == process.pid:
return _returncode(status)
remaining = end - time.time()
if remaining <= 0:
msg = 'Process timed out after {0} seconds: {1}'.format(timeout, cmd_line)
if nonZeroIsFatal:
abort(msg)
else:
log(msg)
_kill_process(process.pid, signal.SIGKILL)
return ERROR_TIMEOUT
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
# Makes the current subprocess accessible to the abort() function
# This is a list of tuples of the subprocess.Popen or
# multiprocessing.Process object and args.
_currentSubprocesses = []
def _addSubprocess(p, args):
entry = (p, args)
logvv('[{}: started subprocess {}: {}]'.format(os.getpid(), p.pid, args))
_currentSubprocesses.append(entry)
return entry
def _removeSubprocess(entry):
if entry and entry in _currentSubprocesses:
try:
_currentSubprocesses.remove(entry)
except:
pass
def waitOn(p):
if is_windows():
# on windows use a poll loop, otherwise signal does not get handled
retcode = None
while retcode is None:
retcode = p.poll()
time.sleep(0.05)
else:
retcode = p.wait()
return retcode
def _parse_http_proxy(envVarNames):
"""
Parses the value of the first existing environment variable named
in `envVarNames` into a host and port tuple where port is None if
it's not present in the environment variable.
"""
p = re.compile(r'(?:https?://)?([^:]+):?(\d+)?/?$')
for name in envVarNames:
value = get_env(name)
if value:
m = p.match(value)
if m:
return m.group(1), m.group(2)
else:
abort("Value of " + name + " is not valid: " + value)
return (None, None)
def _java_no_proxy(env_vars=None):
if env_vars is None:
env_vars = ['no_proxy', 'NO_PROXY']
java_items = []
for name in env_vars:
value = get_env(name)
if value:
items = value.split(',')
for item in items:
item = item.strip()
if item == '*':
java_items += [item]
elif item.startswith("."):
java_items += ["*" + item]
else:
java_items += [item]
return '|'.join(java_items)
def run_maven(args, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None):
proxyArgs = []
def add_proxy_property(name, value):
if value:
return proxyArgs.append('-D' + name + '=' + value)
host, port = _parse_http_proxy(["HTTP_PROXY", "http_proxy"])
add_proxy_property('proxyHost', host)
add_proxy_property('proxyPort', port)
host, port = _parse_http_proxy(["HTTPS_PROXY", "https_proxy"])
add_proxy_property('https.proxyHost', host)
add_proxy_property('https.proxyPort', port)
java_no_proxy = _java_no_proxy()
if is_windows():
# `no_proxy` is already set in the Maven settings file.
# To pass it here we need a reliable way to escape, e.g., the `|` separator
pass
else:
add_proxy_property('http.nonProxyHosts', java_no_proxy)
extra_args = []
if proxyArgs:
proxyArgs.append('-DproxySet=true')
extra_args.extend(proxyArgs)
if _opts.very_verbose:
extra_args += ['--debug']
custom_local_repo = os.environ.get('MAVEN_REPO_LOCAL')
if custom_local_repo:
custom_local_repo = realpath(custom_local_repo)
ensure_dir_exists(custom_local_repo)
extra_args += ['-Dmaven.repo.local=' + custom_local_repo]
mavenCommand = 'mvn'
if is_windows():
mavenCommand += '.cmd'
extra_args += ['--batch-mode'] # prevent maven to color output
mavenHome = get_env('MAVEN_HOME')
if mavenHome:
mavenCommand = join(mavenHome, 'bin', mavenCommand)
return run([mavenCommand] + extra_args + args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, timeout=timeout, env=env, cwd=cwd)
def run_mx(args, suite=None, mxpy=None, nonZeroIsFatal=True, out=None, err=None, timeout=None, env=None, quiet=False):
"""
Recursively runs mx.
:param list args: the command line arguments to pass to the recursive mx execution
:param suite: the primary suite or primary suite directory to use
:param str mxpy: path the mx module to run (None to use the current mx module)
"""
if mxpy is None:
mxpy = join(_mx_home, 'mx.py')
commands = [sys.executable, '-u', mxpy, '--java-home=' + get_jdk().home]
cwd = None
if suite:
if isinstance(suite, str):
commands += ['-p', suite]
cwd = suite
else:
commands += ['-p', suite.dir]
cwd = suite.dir
if quiet:
commands.append('--no-warning')
elif get_opts().verbose:
if get_opts().very_verbose:
commands.append('-V')
else:
commands.append('-v')
if _opts.version_conflict_resolution != 'suite':
commands += ['--version-conflict-resolution', _opts.version_conflict_resolution]
return run(commands + args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, timeout=timeout, env=env, cwd=cwd)
def _get_new_progress_group_args():
"""
Gets a tuple containing the `preexec_fn` and `creationflags` parameters to subprocess.Popen
required to create a subprocess that can be killed via os.killpg without killing the
process group of the parent process.
"""
preexec_fn = None
creationflags = 0
if is_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
else:
preexec_fn = os.setsid
return preexec_fn, creationflags
def list_to_cmd_line(args):
return _list2cmdline(args) if is_windows() else ' '.join(pipes.quote(arg) for arg in args)
def _list2cmdline(seq):
"""
From subprocess.list2cmdline(seq), adding '=' to `needquote`.
Quoting arguments that contain '=' simplifies argument parsing in cmd files, where '=' is parsed as ' '.
"""
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or ("=" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
_subprocess_start_time = None
def run(args, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None, stdin=None, cmdlinefile=None, **kwargs):
"""
Run a command in a subprocess, wait for it to complete and return the exit status of the process.
If the command times out, it kills the subprocess and returns `ERROR_TIMEOUT` if `nonZeroIsFatal`
is false, otherwise it kills all subprocesses and raises a SystemExit exception.
If the exit status of the command is non-zero, mx is exited with the same exit status if
`nonZeroIsFatal` is true, otherwise the exit status is returned.
Each line of the standard output and error streams of the subprocess are redirected to
out and err if they are callable objects.
"""
assert stdin is None or isinstance(stdin, str), "'stdin' must be a string: " + str(stdin)
assert isinstance(args, list), "'args' must be a list: " + str(args)
idx = 0
for arg in args:
if not isinstance(arg, str):
abort('Type of argument {} is not str but {}: {}\nArguments: {}'.format(idx, type(arg).__name__, arg, args))
idx = idx + 1
if env is None:
env = os.environ.copy()
vm_prefix = []
if hasattr(_opts, 'vm_prefix') and _opts.vm_prefix:
vm_prefix = _opts.vm_prefix.split()
# Ideally the command line could be communicated directly in an environment
# variable. However, since environment variables share the same resource
# space as the command line itself (on Unix at least), this would cause the
# limit to be exceeded too easily.
with tempfile.NamedTemporaryFile(suffix='', prefix='mx_subprocess_command.', mode='w', delete=False) as fp:
subprocessCommandFile = fp.name
# Don't include the vm_prefix in arguments as this can have unpredictable effects
args_to_save = args
if vm_prefix == args[:len(vm_prefix)]:
args_to_save = args_to_save[len(vm_prefix):]
for arg in args_to_save:
# TODO: handle newlines in args once there's a use case
if '\n' in arg:
abort('cannot handle new line in argument to run: "' + arg + '"')
print(arg, file=fp)
env['MX_SUBPROCESS_COMMAND_FILE'] = subprocessCommandFile
cmd_line = list_to_cmd_line(args)
if _opts.verbose or cmdlinefile or _opts.exec_log:
s = ''
if _opts.very_verbose or cwd is not None and cwd != _original_directory:
working_directory = cwd
if working_directory is None:
working_directory = _original_directory
s += '# Directory: ' + os.path.abspath(working_directory) + os.linesep
if _opts.very_verbose:
s += 'env -i ' + ' '.join([n + '=' + pipes.quote(v) for n, v in env.items()]) + ' \\' + os.linesep
else:
env_diff = [(k, env[k]) for k in env if k not in _original_environ]
if env_diff:
s += 'env ' + ' '.join([n + '=' + pipes.quote(v) for n, v in env_diff]) + ' \\' + os.linesep
s += cmd_line
if _opts.verbose:
log(s)
if cmdlinefile:
with open(cmdlinefile, 'w') as fp:
fp.write(s + os.linesep)
if _opts.exec_log:
with open(_opts.exec_log, 'a') as fp:
fp.write(s + os.linesep)
if timeout is None and _opts.ptimeout != 0:
timeout = _opts.ptimeout
sub = None
try:
if timeout or is_windows():
preexec_fn, creationflags = _get_new_progress_group_args()
else:
preexec_fn, creationflags = (None, 0)
def redirect(stream, f):
for line in iter(stream.readline, b''):
f(_decode(line))
stream.close()
stdout = out if not callable(out) else subprocess.PIPE
stderr = err if not callable(err) else subprocess.PIPE
stdin_pipe = None if stdin is None else subprocess.PIPE
global _subprocess_start_time
_subprocess_start_time = datetime.now()
p = subprocess.Popen(cmd_line if is_windows() else args, cwd=cwd, stdout=stdout, stderr=stderr, preexec_fn=preexec_fn, creationflags=creationflags, env=env, stdin=stdin_pipe, **kwargs) #pylint: disable=subprocess-popen-preexec-fn
sub = _addSubprocess(p, args)
joiners = []
if callable(out):
t = Thread(target=redirect, args=(p.stdout, out))
# Don't make the reader thread a daemon otherwise output can be dropped
t.start()
joiners.append(t)
if callable(err):
t = Thread(target=redirect, args=(p.stderr, err))
# Don't make the reader thread a daemon otherwise output can be dropped
t.start()
joiners.append(t)
if isinstance(stdin, str):
p.stdin.write(_encode(stdin))
p.stdin.close()
if timeout is None or timeout == 0:
while True:
try:
retcode = waitOn(p)
break
except KeyboardInterrupt:
if is_windows():
p.terminate()
else:
# Propagate SIGINT to subprocess. If the subprocess does not
# handle the signal, it will terminate and this loop exits.
_kill_process(p.pid, signal.SIGINT)
else:
if is_windows():
abort('Use of timeout not (yet) supported on Windows')
retcode = _waitWithTimeout(p, cmd_line, timeout, nonZeroIsFatal)
while any([t.is_alive() for t in joiners]):
# Need to use timeout otherwise all signals (including CTRL-C) are blocked
# see: http://bugs.python.org/issue1167930
for t in joiners:
t.join(10)
except OSError as e:
if not nonZeroIsFatal:
raise e
abort('Error executing: {}{}{}'.format(cmd_line, os.linesep, e))
except KeyboardInterrupt:
abort(1, killsig=signal.SIGINT)
finally:
_removeSubprocess(sub)
os.remove(subprocessCommandFile)
if retcode and nonZeroIsFatal:
if _opts.verbose:
if _opts.very_verbose:
raise subprocess.CalledProcessError(retcode, cmd_line)
log('[exit code: ' + str(retcode) + ']')
abort(retcode)
return retcode
def get_last_subprocess_start_time():
return _subprocess_start_time
@suite_context_free
def quiet_run(args):
"""run a command in a subprocess, redirect stdout and stderr to a file, and print it in case of failure"""
parser = ArgumentParser(prog='mx quiet-run')
parser.add_argument('output_file', metavar='FILE', action='store', help='file to redirect the output to')
parser.add_argument('cmd', metavar='CMD', nargs=PARSER, help='command to be executed')
parser.add_argument('-i', '--ignore-exit-code', action='store_true',
help='ignores exit code of the command and always succeeds')
parsed_args = parser.parse_args(args)
with open(parsed_args.output_file, 'w') as out:
out.write('$ {}\n'.format(' '.join(pipes.quote(c) for c in parsed_args.cmd)))
out.flush()
retcode = run(parsed_args.cmd, nonZeroIsFatal=False, out=out, err=out)
if retcode:
with open(parsed_args.output_file, 'r') as out:
print("From '{}':".format(out.name))
shutil.copyfileobj(out, sys.stdout)
if parsed_args.ignore_exit_code:
return 0
return retcode
def cmd_suffix(name):
"""
Gets the platform specific suffix for a cmd file
"""
if is_windows():
return name + '.cmd'
return name
def exe_suffix(name):
"""
Gets the platform specific suffix for an executable
"""
if is_windows():
return name + '.exe'
return name
def add_lib_prefix(name):
"""
Adds the platform specific library prefix to a name
"""
if is_darwin() or is_linux() or is_openbsd() or is_sunos():
return 'lib' + name
return name
def add_static_lib_prefix(name):
return add_lib_prefix(name)
def add_lib_suffix(name):
"""
Adds the platform specific library suffix to a name
"""
if is_windows():
return name + '.dll'
if is_linux() or is_openbsd() or is_sunos():
return name + '.so'
if is_darwin():
return name + '.dylib'
return name
def add_static_lib_suffix(name):
"""
Adds the platform specific library suffix to a name
"""
if is_windows():
return name + '.lib'
if is_linux() or is_openbsd() or is_sunos() or is_darwin():
return name + '.a'
return name
def add_debug_lib_suffix(name):
"""
Adds the platform specific library suffix to a name
"""
if is_windows():
return name + '.pdb'
if is_linux() or is_openbsd() or is_sunos():
return name + '.debuginfo'
if is_darwin():
return name + '.dylib.dSYM'
return name
mx_subst.results_substitutions.register_with_arg('lib', lambda lib: add_lib_suffix(add_lib_prefix(lib)))
mx_subst.results_substitutions.register_with_arg('staticlib', lambda lib: add_static_lib_suffix(add_static_lib_prefix(lib)))
mx_subst.results_substitutions.register_with_arg('libdebug', lambda lib: add_debug_lib_suffix(add_lib_prefix(lib)))
mx_subst.results_substitutions.register_with_arg('libsuffix', add_lib_suffix)
mx_subst.results_substitutions.register_with_arg('staticlibsuffix', add_static_lib_suffix)
mx_subst.results_substitutions.register_with_arg('cmd', cmd_suffix)
mx_subst.results_substitutions.register_with_arg('exe', exe_suffix)
def get_mxbuild_dir(dependency, **kwargs):
return dependency.get_output_base()
mx_subst.results_substitutions.register_no_arg('mxbuild', get_mxbuild_dir, keywordArgs=True)
_this_year = str(datetime.now().year)
mx_subst.string_substitutions.register_no_arg('year', lambda: _this_year)
"""
Utility for filtering duplicate lines.
"""
class DuplicateSuppressingStream:
"""
Creates an object that will suppress duplicate lines sent to `out`.
The lines considered for suppression are those that contain one of the
strings in `restrictTo` if it is not None.
"""
def __init__(self, restrictTo=None, out=sys.stdout):
self.restrictTo = restrictTo
self.seen = set()
self.out = out
self.currentFilteredLineCount = 0
self.currentFilteredTime = None
def isSuppressionCandidate(self, line):
if self.restrictTo:
for p in self.restrictTo:
if p in line:
return True
return False
else:
return True
def write(self, line):
if self.isSuppressionCandidate(line):
if line in self.seen:
self.currentFilteredLineCount += 1
if self.currentFilteredTime:
if time.time() - self.currentFilteredTime > 1 * 60:
self.out.write(" Filtered " + str(self.currentFilteredLineCount) + " repeated lines...\n")
self.currentFilteredTime = time.time()
else:
self.currentFilteredTime = time.time()
return
self.seen.add(line)
self.currentFilteredLineCount = 0
self.out.write(line)
self.currentFilteredTime = None
"""
A version specification as defined in JSR-56
"""
class VersionSpec(Comparable):
def __init__(self, versionString):
validChar = r'[\x21-\x25\x27-\x29\x2c\x2f-\x5e\x60-\x7f]'
separator = r'[.\-_]'
m = re.match("^" + validChar + '+(' + separator + validChar + '+)*$', versionString)
assert m is not None, 'not a recognized version string: ' + versionString
self.versionString = versionString
self.parts = tuple((int(f) if f.isdigit() else f for f in re.split(separator, versionString)))
i = len(self.parts)
while i > 0 and self.parts[i - 1] == 0:
i -= 1
self.strippedParts = tuple(list(self.parts)[:i])
self._loom = False
def __str__(self):
return self.versionString
def __cmp__(self, other):
return compare(self.strippedParts, other.strippedParts)
def __hash__(self):
return self.parts.__hash__()
def __eq__(self, other):
return isinstance(other, VersionSpec) and self.strippedParts == other.strippedParts
def _filter_non_existant_paths(paths):
if paths:
return os.pathsep.join([path for path in _separatedCygpathW2U(paths).split(os.pathsep) if exists(path)])
return None
class JDKConfigException(Exception):
def __init__(self, value):
Exception.__init__(self, value)
# For example: -agentlib:jdwp=transport=dt_socket,server=y,address=8000,suspend=y
def java_debug_args():
debug_args = []
attach = None
if _opts.attach is not None:
attach = 'server=n,address=' + _opts.attach
else:
if _opts.java_dbg_port is not None:
attach = 'server=y,address=' + str(_opts.java_dbg_port)
if attach is not None:
debug_args += ['-agentlib:jdwp=transport=dt_socket,' + attach + ',suspend=y']
return debug_args
_use_command_mapper_hooks = True
def apply_command_mapper_hooks(command, hooks):
"""Takes `command` and passes it through each hook function to modify it
:param command: the command to modify
:param list[tuple] hooks: the list of hooks to apply
:return: the modified command
:rtype: list[str]
"""
new_cmd = command
if _use_command_mapper_hooks:
if hooks:
for hook in reversed(hooks):
hook_name, hook_func, suite = hook[:3]
logv("Applying command mapper hook '{}'".format(hook_name))
new_cmd = hook_func(new_cmd, suite)
logv("New command: {}".format(new_cmd))
else:
log("Skipping command mapper hooks as they were disabled explicitly.")
return new_cmd
def disable_command_mapper_hooks():
global _use_command_mapper_hooks
_use_command_mapper_hooks = False
def enable_command_mapper_hooks():
global _use_command_mapper_hooks
_use_command_mapper_hooks = True
class JDKConfig(Comparable):
"""
A JDKConfig object encapsulates info about an installed or deployed JDK.
"""
def __init__(self, home, tag=None):
home = realpath(home)
self.home = home
self.tag = tag
self.jar = self.exe_path('jar')
self.java = self.exe_path('java')
self.javac = self.exe_path('javac')
self.javah = self.exe_path('javah')
self.javap = self.exe_path('javap')
self.javadoc = self.exe_path('javadoc')
self.pack200 = self.exe_path('pack200')
self.include_dirs = [join(self.home, 'include'),
join(self.home, 'include', 'win32' if is_windows() else get_os())]
self.toolsjar = join(self.home, 'lib', 'tools.jar')
if not exists(self.toolsjar):
self.toolsjar = None
self._classpaths_initialized = False
self._bootclasspath = None
self._extdirs = None
self._endorseddirs = None
self._knownJavacLints = None
self._javacXModuleOptionExists = False
if not exists(self.java):
raise JDKConfigException('Java launcher does not exist: ' + self.java)
if not exists(self.javac):
raise JDKConfigException('Javac launcher does not exist: ' + self.java)
if not exists(self.javah):
# javah is removed as of JDK 10
self.javah = None
self.java_args = shlex.split(_opts.java_args) if _opts.java_args else []
self.java_args_pfx = sum(map(shlex.split, _opts.java_args_pfx), [])
self.java_args_sfx = sum(map(shlex.split, _opts.java_args_sfx), [])
# Prepend the -d64 VM option only if the java command supports it
try:
output = _check_output_str([self.java, '-d64', '-version'], stderr=subprocess.STDOUT)
self.java_args = ['-d64'] + self.java_args
except OSError as e:
raise JDKConfigException('{}: {}'.format(e.errno, e.strerror))
except subprocess.CalledProcessError as e:
try:
output = _check_output_str([self.java, '-version'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise JDKConfigException('{}: {}'.format(e.returncode, e.output))
def _checkOutput(out):
return 'java version' in out
self._is_openjdk = 'openjdk' in output.lower()
# Once Loom is merged into the JDK, the JDK version
# number will be used to determine if the JDK includes Loom.
self._is_loom = 'loom' in output.lower()
# hotspot can print a warning, e.g. if there's a .hotspot_compiler file in the cwd
output = output.split('\n')
version = None
for o in output:
if _checkOutput(o):
assert version is None
version = o
def _checkOutput0(out):
return 'version' in out
# fall back: check for 'version' if there is no 'java version' string
if not version:
for o in output:
if _checkOutput0(o):
assert version is None
version = o
self.version = VersionSpec(version.split()[2].strip('"'))
ver = self.version.parts[1] if self.version.parts[0] == 1 else self.version.parts[0]
self.javaCompliance = JavaCompliance(ver)
if self._is_loom:
self.javaCompliance._loom = True
self.version._loom = True
self.debug_args = java_debug_args()
def is_openjdk_based(self):
return self._is_openjdk
def exe_path(self, name, sub_dir='bin'):
"""
Gets the full path to the executable in this JDK whose base name is `name`
and is located in `sub_dir` (relative to self.home).
:param str sub_dir:
"""
return exe_suffix(join(self.home, sub_dir, name))
def _init_classpaths(self):
if not self._classpaths_initialized:
if self.javaCompliance <= JavaCompliance(8):
_, binDir = _compile_mx_class('ClasspathDump', jdk=self)
remaining_attempts = 2
while remaining_attempts != 0:
remaining_attempts -= 1
try:
self._bootclasspath, self._extdirs, self._endorseddirs = [x if x != 'null' else None for x in _check_output_str([self.java, '-cp', _cygpathU2W(binDir), 'ClasspathDump'], stderr=subprocess.PIPE).split('|')]
except subprocess.CalledProcessError as e:
if remaining_attempts == 0:
abort('{}{}Command output:{}{}'.format(str(e), os.linesep, e.output, os.linesep))
warn('{}{}Command output:{}{}'.format(str(e), os.linesep, e.output, os.linesep))
# All 3 system properties accessed by ClasspathDump are expected to exist
if not self._bootclasspath or not self._extdirs or not self._endorseddirs:
warn("Could not find all classpaths: boot='" + str(self._bootclasspath) + "' extdirs='" + str(self._extdirs) + "' endorseddirs='" + str(self._endorseddirs) + "'")
self._bootclasspath_unfiltered = self._bootclasspath
self._bootclasspath = _filter_non_existant_paths(self._bootclasspath)
self._extdirs = _filter_non_existant_paths(self._extdirs)
self._endorseddirs = _filter_non_existant_paths(self._endorseddirs)
else:
self._bootclasspath = ''
self._extdirs = None
self._endorseddirs = None
self._classpaths_initialized = True
def __repr__(self):
return "JDKConfig(" + str(self.home) + ")"
def __str__(self):
return "Java " + str(self.version) + " (" + str(self.javaCompliance) + ") from " + str(self.home)
def __hash__(self):
return hash(self.home)
def __cmp__(self, other):
if other is None:
return False
if isinstance(other, JDKConfig):
complianceCmp = compare(self.javaCompliance, other.javaCompliance)
if complianceCmp:
return complianceCmp
versionCmp = compare(self.version, other.version)
if versionCmp:
return versionCmp
return compare(self.home, other.home)
raise TypeError()
def processArgs(self, args, addDefaultArgs=True):
"""
Returns a list composed of the arguments specified by the -P, -J and -A options (in that order)
prepended to `args` if `addDefaultArgs` is true otherwise just return `args`.
"""
def add_debug_args():
if not self.debug_args or is_debug_disabled():
return []
return self.debug_args
def add_coverage_args(args):
agent_path = mx_gate.get_jacoco_agent_path(False)
if any(arg.startswith('-javaagent') and agent_path in arg for arg in args):
return []
# jacoco flags might change in-process -> do not cache
return mx_gate.get_jacoco_agent_args() or []
if addDefaultArgs:
return self.java_args_pfx + self.java_args + add_debug_args() + add_coverage_args(args) + self.java_args_sfx + args
return args
def run_java(self, args, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None, addDefaultArgs=True, command_mapper_hooks=None):
cmd = self.generate_java_command(args, addDefaultArgs=addDefaultArgs)
cmd = apply_command_mapper_hooks(cmd, command_mapper_hooks)
return run(cmd, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout, env=env)
def generate_java_command(self, args, addDefaultArgs=True):
"""
Similar to `OutputCapturingJavaVm.generate_java_command` such that generated commands can be
retrieved without being executed.
"""
return [self.java] + self.processArgs(args, addDefaultArgs=addDefaultArgs)
def bootclasspath(self, filtered=True):
"""
Gets the value of the ``sun.boot.class.path`` system property. This will be
the empty string if this JDK is version 9 or later.
:param bool filtered: specifies whether to exclude non-existant paths from the returned value
"""
self._init_classpaths()
return _separatedCygpathU2W(self._bootclasspath if filtered else self._bootclasspath_unfiltered)
def javadocLibOptions(self, args):
"""
Adds javadoc style options for the library paths of this JDK.
"""
self._init_classpaths()
if args is None:
args = []
if self._bootclasspath:
args.append('-bootclasspath')
args.append(_separatedCygpathU2W(self._bootclasspath))
if self._extdirs:
args.append('-extdirs')
args.append(_separatedCygpathU2W(self._extdirs))
return args
def javacLibOptions(self, args):
"""
Adds javac style options for the library paths of this JDK.
"""
args = self.javadocLibOptions(args)
if self._endorseddirs:
args.append('-endorseddirs')
args.append(_separatedCygpathU2W(self._endorseddirs))
return args
def hasJarOnClasspath(self, jar):
"""
Determines if `jar` is available on the boot class path or in the
extension/endorsed directories of this JDK.
:param str jar: jar file name (without directory component)
:return: the absolute path to the jar file in this JDK matching `jar` or None
"""
self._init_classpaths()
if self._bootclasspath:
for e in self._bootclasspath.split(os.pathsep):
if basename(e) == jar:
return e
if self._extdirs:
for d in self._extdirs.split(os.pathsep):
if len(d) and jar in os.listdir(d):
return join(d, jar)
if self._endorseddirs:
for d in self._endorseddirs.split(os.pathsep):
if len(d) and jar in os.listdir(d):
return join(d, jar)
return None
def getKnownJavacLints(self):
"""
Gets the lint warnings supported by this JDK.
"""
if self._knownJavacLints is None:
try:
out = _check_output_str([self.javac, '-X'], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.output:
log(e.output)
raise e
if self.javaCompliance < JavaCompliance(9):
lintre = re.compile(r"-Xlint:\{([a-z-]+(?:,[a-z-]+)*)\}")
m = lintre.search(out)
if not m:
self._knownJavacLints = []
else:
self._knownJavacLints = m.group(1).split(',')
else:
self._knownJavacLints = []
lines = out.split(os.linesep)
inLintSection = False
for line in lines:
if not inLintSection:
if '-Xmodule' in line:
self._javacXModuleOptionExists = True
elif line.strip() in ['-Xlint:key,...', '-Xlint:<key>(,<key>)*']:
inLintSection = True
else:
if line.startswith(' '):
warning = line.split()[0]
self._knownJavacLints.append(warning)
self._knownJavacLints.append('-' + warning)
elif line.strip().startswith('-X'):
return self._knownJavacLints
warn('Did not find lint warnings in output of "javac -X"')
return self._knownJavacLints
def get_modules(self):
"""
Gets the modules in this JDK.
:return: a tuple of `JavaModuleDescriptor` objects for modules in this JDK
:rtype: tuple
"""
if self.javaCompliance < '9':
return ()
if not hasattr(self, '.modules'):
jdkModules = join(self.home, 'lib', 'modules')
cache = join(ensure_dir_exists(join(primary_suite().get_output_root(), '.jdk' + str(self.version))), 'listmodules')
cache_source = cache + '.source'
isJDKImage = exists(jdkModules)
def _use_cache():
if not isJDKImage:
return False
if not exists(cache):
return False
if not exists(cache_source):
return False
with open(cache_source) as fp:
source = fp.read()
if source != self.home:
return False
if TimeStampFile(jdkModules).isNewerThan(cache) or TimeStampFile(__file__).isNewerThan(cache):
return False
return True
if not _use_cache():
addExportsArg = '--add-exports=java.base/jdk.internal.module=ALL-UNNAMED'
_, binDir = _compile_mx_class('ListModules', jdk=self, extraJavacArgs=[addExportsArg])
out = LinesOutputCapture()
run([self.java, '-cp', _cygpathU2W(binDir), addExportsArg, 'ListModules'], out=out)
lines = out.lines
if isJDKImage:
for dst, content in [(cache_source, self.home), (cache, '\n'.join(lines))]:
try:
with open(dst, 'w') as fp:
fp.write(content)
except IOError as e:
warn('Error writing to ' + dst + ': ' + str(e))
os.remove(dst)
else:
with open(cache) as fp:
lines = fp.read().split('\n')
modules = {}
name = None
requires = {}
exports = {}
provides = {}
uses = set()
opens = {}
packages = set()
boot = None
for line in lines:
parts = line.strip().split()
assert len(parts) > 0, '>>>'+line+'<<<'
if len(parts) == 1:
if name is not None:
assert name not in modules, 'duplicate module: ' + name
modules[name] = JavaModuleDescriptor(name, exports, requires, uses, provides, packages, boot=boot, jdk=self, opens=opens)
name = parts[0]
requires = {}
exports = {}
provides = {}
opens = {}
uses = set()
packages = set()
boot = None
else:
assert name, 'cannot parse module descriptor line without module name: ' + line
a = parts[0]
if a == 'requires':
module = parts[-1]
modifiers = parts[1:-2] if len(parts) > 2 else []
requires[module] = modifiers
elif a == 'boot':
boot = parts[1] == 'true'
elif a == 'exports':
source = parts[1]
if len(parts) > 2:
assert parts[2] == 'to'
targets = parts[3:]
else:
targets = []
exports[source] = targets
elif a == 'uses':
uses.update(parts[1:])
elif a == 'opens':
opens.update(parts[1:])
elif a == 'package':
packages.update(parts[1:])
elif a == 'provides':
assert len(parts) == 4 and parts[2] == 'with'
service = parts[1]
provider = parts[3]
provides.setdefault(service, []).append(provider)
else:
abort('Cannot parse module descriptor line: ' + str(parts))
if name is not None:
assert name not in modules, 'duplicate module: ' + name
modules[name] = JavaModuleDescriptor(name, exports, requires, uses, provides, packages, boot=boot, jdk=self, opens=opens)
setattr(self, '.modules', tuple(modules.values()))
return getattr(self, '.modules')
def get_root_modules(self):
"""
Gets the default set of root modules for the unnamed module.
From http://openjdk.java.net/jeps/261:
When the compiler compiles code in the unnamed module, the default set of
root modules for the unnamed module is computed as follows:
The java.se module is a root, if it exists. If it does not exist then every
java.* module on the upgrade module path or among the system modules that
exports at least one package, without qualification, is a root.
Every non-java.* module on the upgrade module path or among the system
modules that exports at least one package, without qualification, is also a root.
:return: list of JavaModuleDescriptor
"""
modules = self.get_modules()
result = [m for m in modules if m.name == 'java.se']
has_java_dot_se = len(result) != 0
for mod in modules:
# no java.se => add all java.*
if not mod.name.startswith('java.') or not has_java_dot_se:
if any((len(to) == 0 for _, to in mod.exports.items())):
result.append(mod)
return result
def get_transitive_requires_keyword(self):
"""
Gets the keyword used to denote transitive dependencies. This can also effectively
be used to determine if this is JDK contains the module changes made by
https://bugs.openjdk.java.net/browse/JDK-8169069.
"""
if self.javaCompliance < '9':
abort('Cannot call get_transitive_requires_keyword() for pre-9 JDK ' + str(self))
return 'transitive'
def get_automatic_module_name(self, modulejar):
"""
Derives the name of an automatic module from an automatic module jar according to
specification of ``java.lang.module.ModuleFinder.of(Path... entries)``.
:param str modulejar: the path to a jar file treated as an automatic module
:return: the name of the automatic module derived from `modulejar`
"""
if self.javaCompliance < '9':
abort('Cannot call get_transitive_requires_keyword() for pre-9 JDK ' + str(self))
# Drop directory prefix and .jar (or .zip) suffix
name = os.path.basename(modulejar)[0:-4]
# Find first occurrence of -${NUMBER}. or -${NUMBER}$
m = re.search(r'-(\d+(\.|$))', name)
if m:
name = name[0:m.start()]
# Finally clean up the module name (see jdk.internal.module.ModulePath.cleanModuleName())
name = re.sub(r'[^A-Za-z0-9]', '.', name) # replace non-alphanumeric
name = re.sub(r'(\.)(\1)+', '.', name) # collapse repeating dots
name = re.sub(r'^\.', '', name) # drop leading dots
return re.sub(r'\.$', '', name) # drop trailing dots
def get_boot_layer_modules(self):
"""
Gets the modules in the boot layer of this JDK.
:return: a list of `JavaModuleDescriptor` objects for boot layer modules in this JDK
:rtype: list
"""
return [jmd for jmd in self.get_modules() if jmd.boot]
def check_get_env(key):
"""
Gets an environment variable, aborting with a useful message if it is not set.
"""
value = get_env(key)
if value is None:
abort('Required environment variable ' + key + ' must be set')
return value
def get_env(key, default=None):
"""
Gets an environment variable.
:param default: default values if the environment variable is not set.
:type default: str | None
"""
value = os.getenv(key, default)
return value
### ~~~~~~~~~~~~~ Logging
def logv(msg=None, end='\n'):
if vars(_opts).get('verbose') is None:
def _deferrable():
logv(msg, end=end)
_opts_parsed_deferrables.append(_deferrable)
return
if _opts.verbose:
log(msg, end=end)
def logvv(msg=None, end='\n'):
if vars(_opts).get('very_verbose') is None:
def _deferrable():
logvv(msg, end=end)
_opts_parsed_deferrables.append(_deferrable)
return
if _opts.very_verbose:
log(msg, end=end)
def log(msg=None, end='\n'):
"""
Write a message to the console.
All script output goes through this method thus allowing a subclass
to redirect it.
"""
if vars(_opts).get('quiet'):
return
if msg is None:
print()
else:
# https://docs.python.org/2/reference/simple_stmts.html#the-print-statement
# > A '\n' character is written at the end, unless the print statement
# > ends with a comma.
#
# In CPython, the normal print statement (without comma) is compiled to
# two bytecode instructions: PRINT_ITEM, followed by PRINT_NEWLINE.
# Each of these bytecode instructions is executed atomically, but the
# interpreter can suspend the thread between the two instructions.
#
# If the print statement is followed by a comma, the PRINT_NEWLINE
# instruction is omitted. By manually adding the newline to the string,
# there is only a single PRINT_ITEM instruction which is executed
# atomically, but still prints the newline.
print(str(msg), end=end)
# https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
_ansi_color_table = {
'black' : '30',
'red' : '31',
'green' : '32',
'yellow' : '33',
'blue' : '34',
'magenta' : '35',
'cyan' : '36'
}
def colorize(msg, color='red', bright=True, stream=sys.stderr):
"""
Wraps `msg` in ANSI escape sequences to make it print to `stream` with foreground font color
`color` and brightness `bright`. This method returns `msg` unchanged if it is None,
if it already starts with the designated escape sequence or the execution environment does
not support color printing on `stream`.
"""
if msg is None:
return None
code = _ansi_color_table.get(color, None)
if code is None:
abort('Unsupported color: ' + color + '.\nSupported colors are: ' + ', '.join(_ansi_color_table.keys()))
if bright:
code += ';1'
color_on = '\033[' + code + 'm'
if not msg.startswith(color_on):
isUnix = sys.platform.startswith('linux') or sys.platform in ['darwin', 'freebsd']
if isUnix and hasattr(stream, 'isatty') and stream.isatty():
return color_on + msg + '\033[0m'
return msg
def log_error(msg=None):
"""
Write an error message to the console.
All script output goes through this method thus allowing a subclass
to redirect it.
"""
if msg is None:
print(file=sys.stderr)
else:
print(colorize(str(msg), stream=sys.stderr), file=sys.stderr)
def log_deprecation(msg=None):
"""
Write an deprecation warning to the console.
"""
if msg is None:
print(file=sys.stderr)
else:
print(colorize(str("[MX DEPRECATED] {}".format(msg)), color='yellow', stream=sys.stderr), file=sys.stderr)
### ~~~~~~~~~~~~~ Project
def expand_project_in_class_path_arg(cpArg, jdk=None):
"""
Replaces each "@" prefixed element in the class path `cpArg` with
the class path for the dependency named by the element without the "@" prefix.
"""
if '@' not in cpArg:
return cpArg
cp = []
if jdk is None:
jdk = get_jdk(tag='default')
for part in cpArg.split(os.pathsep):
if part.startswith('@'):
cp += classpath(part[1:], jdk=jdk).split(os.pathsep)
else:
cp.append(part)
return os.pathsep.join(cp)
def expand_project_in_args(args, insitu=True, jdk=None):
"""
Looks for the first -cp or -classpath argument in `args` and
calls expand_project_in_class_path_arg on it. If `insitu` is true,
then `args` is updated in place otherwise a copy of `args` is modified.
The updated object is returned.
"""
for i in range(len(args)):
if args[i] == '-cp' or args[i] == '-classpath':
if i + 1 < len(args):
if not insitu:
args = list(args) # clone args
args[i + 1] = expand_project_in_class_path_arg(args[i + 1], jdk=jdk)
break
return args
_flock_cmd = '<uninitialized>'
def flock_cmd():
global _flock_cmd
if _flock_cmd == '<uninitialized>':
out = OutputCapture()
try:
flock_ret_code = run(['flock', '--version'], nonZeroIsFatal=False, err=out, out=out)
except OSError as e:
flock_ret_code = e
if flock_ret_code == 0:
_flock_cmd = 'flock'
else:
logvv('Could not find flock command')
_flock_cmd = None
return _flock_cmd
_gmake_cmd = '<uninitialized>'
def gmake_cmd(context=None):
global _gmake_cmd
if _gmake_cmd == '<uninitialized>':
for a in ['make', 'gmake', 'gnumake']:
try:
output = _check_output_str([a, '--version'], stderr=subprocess.STDOUT)
if 'GNU' in output:
_gmake_cmd = a
break
except:
pass
if _gmake_cmd == '<uninitialized>':
abort('Could not find a GNU make executable on the current path.', context=context)
return _gmake_cmd
def expandvars_in_property(value):
result = expandvars(value)
if '$' in result:
abort('Property contains an undefined environment variable: ' + value)
return result
### ~~~~~~~~~~~~~ commands
# Builtin commands
def _defaultEcjPath():
jdt = get_env('JDT')
# Treat empty string the same as undefined
if jdt:
return jdt
return None
def _before_fork():
try:
# Try to initialize _scproxy on the main thread to work around issue on macOS:
# https://bugs.python.org/issue30837
from _scproxy import _get_proxy_settings, _get_proxies
_get_proxy_settings()
_get_proxies()
except ImportError:
pass
def _resolve_ecj_jar(jdk, java_project_compliance, spec):
"""
Resolves `spec` to the path of a local jar file containing the Eclipse batch compiler.
"""
ecj = spec
max_jdt_version = None
min_jdt_version = None
if jdk:
if jdk.javaCompliance <= '10':
# versions greater than 3.26 require at least JDK 11
max_jdt_version = VersionSpec('3.26')
elif jdk.javaCompliance <= '17':
min_jdt_version = VersionSpec('3.27')
if java_project_compliance and java_project_compliance > '16':
return None
if spec.startswith('builtin'):
available = {VersionSpec(lib.maven['version']): lib for lib in _libs.values() if lib.suite is _mx_suite and lib.name.startswith('ECJ_')
and (max_jdt_version is None or VersionSpec(lib.maven['version']) <= max_jdt_version)
and (min_jdt_version is None or VersionSpec(lib.maven['version']) >= min_jdt_version)}
assert available, 'no compatible ECJ libraries in the mx suite'
if spec == 'builtin':
ecj_lib = sorted(available.items(), reverse=True)[0][1]
else:
if not spec.startswith('builtin:'):
abort('Invalid value for JDT: "{}"'.format(spec))
available_desc = 'Available ECJ version(s): ' + ', '.join((str(v) for v in sorted(available.keys())))
if spec == 'builtin:list':
log(available_desc)
abort(0)
version = VersionSpec(spec.split(':', 1)[1])
ecj_lib = available.get(version)
if ecj_lib is None:
abort('Specified ECJ version is not available: {}\n{}'.format(version, available_desc))
ecj = ecj_lib.get_path(resolve=True)
if not ecj.endswith('.jar'):
abort('Path for Eclipse batch compiler does not look like a jar file: ' + ecj)
if not exists(ecj):
abort('Eclipse batch compiler jar does not exist: ' + ecj)
else:
with zipfile.ZipFile(ecj, 'r') as zf:
if 'org/eclipse/jdt/internal/compiler/apt/' not in zf.namelist():
abort('Specified Eclipse compiler does not include annotation processing support. ' +
'Ensure you are using a stand alone ecj.jar, not org.eclipse.jdt.core_*.jar ' +
'from within the plugins/ directory of an Eclipse IDE installation.')
return ecj
def build(cmd_args, parser=None):
"""builds the artifacts of one or more dependencies"""
suppliedParser = parser is not None
if not suppliedParser:
parser = ArgumentParser(prog='mx build')
parser = parser if parser is not None else ArgumentParser(prog='mx build')
parser.add_argument('-f', action='store_true', dest='force', help='force build (disables timestamp checking)')
parser.add_argument('-c', action='store_true', dest='clean', help='removes existing build output')
parallelize = parser.add_mutually_exclusive_group()
parallelize.add_argument('-n', '--serial', action='store_const', const=False, dest='parallelize', help='serialize Java compilation')
parallelize.add_argument('-p', action='store_const', const=True, dest='parallelize', help='parallelize Java compilation (default)')
parser.add_argument('-s', '--shallow-dependency-checks', action='store_const', const=True, help="ignore modification times "
"of output files for each of P's dependencies when determining if P should be built. That "
"is, only P's sources, suite.py of its suite and whether any of P's dependencies have "
"been built are considered. This is useful when an external tool (such as Eclipse) performs incremental "
"compilation that produces finer grained modification times than mx's build system. Shallow "
"dependency checking only applies to non-native projects. This option can be also set by defining"
"the environment variable MX_BUILD_SHALLOW_DEPENDENCY_CHECKS to true.")
parser.add_argument('--source', dest='compliance', help='Java compliance level for projects without an explicit one')
parser.add_argument('--Wapi', action='store_true', dest='warnAPI', help='show warnings about using internal APIs')
dependencies_group = parser.add_mutually_exclusive_group()
dependencies_group.add_argument('--dependencies', '--projects', action='store', help='comma separated dependencies to build (omit to build all dependencies)', metavar='<names>')
dependencies_group.add_argument('--only', action='store', help='comma separated dependencies to build, without checking their dependencies (omit to build all dependencies)')
parser.add_argument('--no-java', action='store_false', dest='java', help='do not build Java projects')
parser.add_argument('--no-native', action='store_false', dest='native', help='do not build native projects')
parser.add_argument('--no-javac-crosscompile', action='store_false', dest='javac_crosscompile', help="does nothing as cross compilation is no longer supported (preserved for compatibility)")
parser.add_argument('--warning-as-error', '--jdt-warning-as-error', action='store_true', help='convert all Java compiler warnings to errors')
parser.add_argument('--force-deprecation-as-warning', action='store_true', help='never treat deprecation warnings as errors irrespective of --warning-as-error')
parser.add_argument('--force-deprecation-as-warning-for-dependencies', action='store_true', help='never treat deprecation warnings as errors irrespective of --warning-as-error for projects outside of the primary suite')
parser.add_argument('--jdt-show-task-tags', action='store_true', help='show task tags as Eclipse batch compiler warnings')
parser.add_argument('--alt-javac', dest='alt_javac', help='path to alternative javac executable', metavar='<path>')
parser.add_argument('-A', dest='extra_javac_args', action='append', help='pass <flag> directly to Java source compiler', metavar='<flag>', default=[])
parser.add_argument('--no-daemon', action='store_true', dest='no_daemon', help='disable use of daemon Java compiler (if available)')
parser.add_argument('--all', action='store_true', help='build all dependencies (not just default targets)')
parser.add_argument('--print-timing', action='store_true', help='print start/end times and duration for each build task', default=is_continuous_integration())
compilerSelect = parser.add_mutually_exclusive_group()
compilerSelect.add_argument('--error-prone', dest='error_prone', help='path to error-prone.jar', metavar='<path>')
compilerSelect.add_argument('--jdt', help='path to a stand alone Eclipse batch compiler jar (e.g. ecj.jar). '
'Use the value "builtin:<version>" (e.g. "builtin:3.25") to use the ECJ_<version> library defined in the mx suite. '
'Specifying "builtin" will use the latest version, and "builtin:list" will list the available built-in versions. '
'This can also be specified with the JDT environment variable.', default=_defaultEcjPath(), metavar='<path>')
compilerSelect.add_argument('--force-javac', action='store_true', dest='force_javac', help='use javac even if an Eclipse batch compiler jar is specified')
if suppliedParser:
parser.add_argument('remainder', nargs=REMAINDER, metavar='...')
args = parser.parse_args(cmd_args[:])
env_gc_after_build_varname = 'MX_GC_AFTER_BUILD'
env_gc_after_build = get_env(env_gc_after_build_varname) if 'com.oracle.mxtool.compilerserver' not in cmd_args else None
if env_gc_after_build:
warn('Will run `mx gc-dists {}` after building ({} is set)'.format(env_gc_after_build, env_gc_after_build_varname))
deps_w_deprecation_errors = []
deprecation_as_error_args = args
if args.force_deprecation_as_warning_for_dependencies:
args.force_deprecation_as_warning = True
deprecation_as_error_args = parser.parse_args(cmd_args[:])
deprecation_as_error_args.force_deprecation_as_warning = False
primary_java_projects = [p for p in primary_suite().projects if p.isJavaProject()]
primary_java_project_dists = [d for d in primary_suite().dists if any([p in d.deps for p in primary_java_projects])]
deps_w_deprecation_errors = [e.name for e in primary_java_projects + primary_java_project_dists]
logv("Deprecations are only errors for " + ", ".join(deps_w_deprecation_errors))
if is_windows() and sys.version_info[0] < 3:
if args.parallelize:
warn('parallel builds are not supported on windows: can not use -p')
args.parallelize = False
else:
if args.parallelize is None:
# Enable parallel compilation by default
args.parallelize = True
if not args.force_javac and args.jdt is not None:
# fail early but in the end we need to resolve with JDK version
_resolve_ecj_jar(None, None, args.jdt)
onlyDeps = None
removed = []
if args.only is not None:
# N.B. This build will not respect any dependencies (including annotation processor dependencies)
onlyDeps = set(args.only.split(','))
roots = [dependency(name) for name in onlyDeps]
elif args.dependencies is not None:
if len(args.dependencies) == 0:
abort('The value of the --dependencies argument cannot be the empty string')
names = args.dependencies.split(',')
roots = [dependency(name) for name in names]
else:
# This is the normal case for build (e.g. `mx build`) so be
# clear about JDKs being used ...
log('JAVA_HOME: ' + get_env('JAVA_HOME', ''))
if _opts.java_home and _opts.java_home != get_env('JAVA_HOME', ''):
log('--java-home: ' + _opts.java_home)
if get_env('EXTRA_JAVA_HOMES') or _opts.extra_java_homes:
log('EXTRA_JAVA_HOMES: ' + '\n '.join(get_env('EXTRA_JAVA_HOMES', '').split(os.pathsep)))
if _opts.extra_java_homes and _opts.extra_java_homes != get_env('EXTRA_JAVA_HOMES', ''):
log('--extra-java-homes: ' + '\n '.join(_opts.extra_java_homes.split(os.pathsep)))
# ... and the dependencies that *will not* be built
if _removedDeps:
if _opts.verbose:
log('Dependencies removed from build:')
for _, reason in _removedDeps.items():
if isinstance(reason, tuple):
reason, _ = reason
log(' {}'.format(reason))
else:
log('{} unsatisfied dependencies were removed from build (use -v to list them)'.format(len(_removedDeps)))
removed, deps = ([], dependencies()) if args.all else defaultDependencies()
if removed:
if _opts.verbose:
log('Non-default dependencies removed from build (use mx build --all to build them):')
for d in removed:
log(' {}'.format(d))
else:
log('{} non-default dependencies were removed from build (use -v to list them, mx build --all to build them)'.format(len(removed)))
# Omit all libraries so that only the ones required to build other dependencies are downloaded
roots = [d for d in deps if not d.isBaseLibrary()]
if roots:
roots = _dependencies_opt_limit_to_suites(roots)
# N.B. Limiting to a suite only affects the starting set of dependencies. Dependencies in other suites will still be built
sortedTasks = []
taskMap = {}
depsMap = {}
def _createTask(dep, edge):
if dep.name in deps_w_deprecation_errors:
task = dep.getBuildTask(deprecation_as_error_args)
else:
task = dep.getBuildTask(args)
if task.subject in taskMap:
return
taskMap[dep] = task
if onlyDeps is None or task.subject.name in onlyDeps:
if dep in removed:
warn("Adding non-default dependency {} as it is needed by {} {}".format(dep, edge.kind, edge.src))
sortedTasks.append(task)
lst = depsMap.setdefault(task.subject, [])
for d in lst:
task.deps.append(taskMap[d])
def _registerDep(src, dst, edge):
lst = depsMap.setdefault(src, [])
lst.append(dst)
walk_deps(visit=_createTask, visitEdge=_registerDep, roots=roots, ignoredEdges=[DEP_EXCLUDED])
if _opts.very_verbose:
log("++ Serialized build plan ++")
for task in sortedTasks:
if task.deps:
log(str(task) + " [depends on " + ', '.join([str(t.subject) for t in task.deps]) + ']')
else:
log(str(task))
log("-- Serialized build plan --")
if len(sortedTasks) == 1:
# Spinning up a daemon for a single task doesn't make sense
if not args.no_daemon:
logv('[Disabling use of compile daemon for single build task]')
args.no_daemon = True
daemons = {}
if args.parallelize and onlyDeps is None:
_before_fork()
def joinTasks(tasks):
failed = []
for t in tasks:
t.proc.join()
_removeSubprocess(t.sub)
if t.proc.exitcode != 0:
failed.append(t)
# Release the pipe file descriptors ASAP (only available on Python 3.7+)
if hasattr(t.proc, 'close'):
t.proc.close()
return failed
def checkTasks(tasks):
active = []
failed = []
for t in tasks:
if t.proc.is_alive():
active.append(t)
else:
t.pullSharedMemoryState()
t.cleanSharedMemoryState()
t._finished = True
t._end_time = time.time()
if t.proc.exitcode != 0:
failed.append(t)
_removeSubprocess(t.sub)
# Release the pipe file descriptors ASAP (only available on Python 3.7+)
if hasattr(t.proc, 'close'):
t.proc.close()
return active, failed
def remainingDepsDepth(task):
if task._d is None:
incompleteDeps = [d for d in task.deps if d.proc is None or not d._finished]
if len(incompleteDeps) == 0:
task._d = 0
else:
task._d = max([remainingDepsDepth(t) for t in incompleteDeps]) + 1
return task._d
cpus = cpu_count()
def sortWorklist(tasks):
for t in tasks:
if t.parallelism > cpus:
abort('{} requires more parallelism ({}) than available CPUs ({})'.format(t, t.parallelism, cpus))
t._d = None
return sorted(tasks, key=remainingDepsDepth)
worklist = sortWorklist(sortedTasks)
active = []
failed = []
def _activeCpus(_active):
cpus = 0
for t in _active:
cpus += t.parallelism
return cpus
while len(worklist) != 0:
while True:
active, failed = checkTasks(active)
if len(failed) != 0:
break
if _activeCpus(active) >= cpus:
# Sleep for 0.2 second
time.sleep(0.2)
else:
break
if len(failed) != 0:
break
def executeTask(task):
if not isinstance(task.proc, Thread):
# Clear sub-process list cloned from parent process
del _currentSubprocesses[:]
task.execute()
task.pushSharedMemoryState()
def depsDone(task):
for d in task.deps:
if d.proc is None or not d._finished:
return False
return True
added_new_tasks = False
worklist.sort(key=lambda task: task.build_time, reverse=True)
for task in worklist:
if depsDone(task) and _activeCpus(active) + task.parallelism <= cpus:
worklist.remove(task)
task.initSharedMemoryState()
task.prepare(daemons)
task.proc = multiprocessing.Process(target=executeTask, args=(task,))
task._start_time = time.time()
task._finished = False
task.proc.start()
active.append(task)
task.sub = None if isinstance(task.proc, Thread) else _addSubprocess(task.proc, [str(task)])
added_new_tasks = True
if _activeCpus(active) >= cpus:
break
if not added_new_tasks:
time.sleep(0.2)
worklist = sortWorklist(worklist)
failed += joinTasks(active)
def dump_task_stats(f):
"""
Dump task statistics CSV. Use R with following commands for visualization:
d <- read.csv("input.csv", header=F)
names(d) <- c("content", "start", "end")
d$id <- 1:nrow(d)
d <- d[(d$end-d$start > 5),]
d$start <- as.POSIXct(d$start, origin="1970-01-01")
d$end <- as.POSIXct(d$end, origin="1970-01-01")
timevis(d)
"""
for task in sortedTasks:
try:
f.write("{},{},{}\n".format(str(task).replace(',', '_'), task._start_time, task._end_time))
except:
pass
if _opts.dump_task_stats == '-':
log("Printing task stats:")
dump_task_stats(sys.stdout)
elif _opts.dump_task_stats is not None:
log("Writing task stats to {}".format(_opts.dump_task_stats))
with open(_opts.dump_task_stats, 'wa') as f:
dump_task_stats(f)
if len(failed):
for t in failed:
log_error('{0} failed'.format(t))
for daemon in daemons.values():
daemon.shutdown()
abort('{0} build tasks failed'.format(len(failed)))
else: # not parallelize
for t in sortedTasks:
t.prepare(daemons)
t.execute()
for daemon in daemons.values():
daemon.shutdown()
if env_gc_after_build:
warn('Running `mx gc-dists {}` after building ({} is set)'.format(env_gc_after_build, env_gc_after_build_varname))
mx_gc.gc_dists(env_gc_after_build.split())
# TODO check for distributions overlap (while loading suites?)
if suppliedParser:
return args
return None
def build_suite(s):
"""build all projects in suite (for dynamic import)"""
# Note we must use the "build" method in "s" and not the one
# in the dict. If there isn't one we use mx.build
project_names = [p.name for p in s.projects]
if hasattr(s.extensions, 'build'):
build_command = s.extensions.build
else:
build_command = build
build_command(['--dependencies', ','.join(project_names)])
def _chunk_files_for_command_line(files, limit=None, separator=' ', pathFunction=lambda f: f):
"""
Gets a generator for splitting up a list of files into chunks such that the
size of the `separator` separated file paths in a chunk is less than `limit`.
This is used to work around system command line length limits.
:param list files: list of files to chunk
:param int limit: the maximum number of characters in a chunk. If None, then a limit is derived from host OS limits.
:param str separator: the separator between each file path on the command line
:param pathFunction: a function for converting each entry in `files` to a path name
:return: a generator yielding the list of files in each chunk
"""
chunkSize = 0
chunkStart = 0
if limit is None:
if is_windows():
# The CreateProcess function on Windows limits the length of a command line to
# 32,768 characters (http://msdn.microsoft.com/en-us/library/ms682425%28VS.85%29.aspx)
limit = 32768
else:
limit = os.sysconf('SC_ARG_MAX')
if limit == -1:
limit = 262144 # we could use sys.maxint but we prefer a more robust smaller value
# Reduce the limit by 20% to account for the space required by environment
# variables and other things that use up the command line limit.
# This is not an exact calculation as calculating the exact requirements
# is complex (https://www.in-ulm.de/~mascheck/various/argmax/)
limit = limit * 0.8
for i in range(len(files)):
path = pathFunction(files[i])
size = len(path) + len(separator)
assert size < limit
if chunkSize + size < limit:
chunkSize += size
else:
assert i > chunkStart
yield files[chunkStart:i]
chunkStart = i
chunkSize = 0
if chunkStart == 0:
assert chunkSize < limit
yield files
elif chunkStart < len(files):
yield files[chunkStart:]
def processorjars():
for s in suites(True):
_processorjars_suite(s)
def _processorjars_suite(s):
"""
Builds all distributions in this suite that define one or more annotation processors.
Returns the jar files for the built distributions.
"""
apDists = [d for d in s.dists if d.isJARDistribution() and d.definedAnnotationProcessors]
if not apDists:
return []
names = [ap.name for ap in apDists]
build(['--dependencies', ",".join(names)])
return [ap.path for ap in apDists]
@no_suite_loading
def autopep8(args):
"""run the autopep8 formatter (if available) over Python source files"""
parser = ArgumentParser(prog='mx autopep8')
_add_command_primary_option(parser)
parser.add_argument('--check', action='store_true', help='don\'t write the files back but just return the status.')
parser.add_argument('--walk', action='store_true', help='use tree walk find .py files')
parser.add_argument('--all', action='store_true', help='check all files, not just files in the mx.* directory.')
args = parser.parse_args(args)
try:
output = _check_output_str(['autopep8', '--version'], stderr=subprocess.STDOUT)
except OSError as e:
log_error('autopep8 is not available: ' + str(e))
return -1
m = re.search(r'^autopep8 (\d+)\.(\d+)\.(\d+).*', output, re.MULTILINE)
if not m:
log_error('could not detect autopep8 version from ' + output)
major, minor, micro = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
log("Detected autopep8 version: {0}.{1}.{2}".format(major, minor, micro))
if (major, minor) != (1, 5):
log_error('autopep8 version must be 1.5.x')
return -1
pyfiles = _find_pyfiles(args.all, args.primary, args.walk)
env = _get_env_with_pythonpath()
if args.check:
log('Running pycodestyle on ' + ' '.join(pyfiles) + '...')
run(['pycodestyle'] + pyfiles, env=env)
else:
for pyfile in pyfiles:
log('Running autopep8 --in-place on ' + pyfile + '...')
run(['autopep8', '--in-place', pyfile], env=env)
return 0
pylint_ver_map = {
(1, 1): {
'rcfile': '.pylintrc11',
'additional_options': []
},
(1, 9): {
'rcfile': '.pylintrc19',
'additional_options': ['--score=n']
},
(2, 2): {
'rcfile': '.pylintrc22',
'additional_options': ['--score=n']
},
(2, 4): {
'rcfile': '.pylintrc24',
'additional_options': ['--score=n']
}
}
@no_suite_loading
def pylint(args):
"""run pylint (if available) over Python source files (found by '<vc> locate' or by tree walk with --walk)"""
parser = ArgumentParser(prog='mx pylint')
_add_command_primary_option(parser)
parser.add_argument('--walk', action='store_true', help='use tree walk to find .py files')
parser.add_argument('--all', action='store_true', help='check all files, not just files in the mx.* directory.')
parser.add_argument('-f', '--force', action='store_true', help='force processing of files that have not changed since last successful pylint')
args = parser.parse_args(args)
ver = (-1, -1)
pylint_exe = None
output = None
exc = None
for candidate in ['pylint2', 'pylint-2', 'pylint']:
try:
output = _check_output_str([candidate, '--version'], stderr=subprocess.STDOUT)
pylint_exe = candidate
break
except OSError as e:
exc = e
else:
log_error('pylint is not available: ' + str(exc))
return -1
m = re.search(r'^pylint-?2? (\d+)\.(\d+)\.(\d+),?', output, re.MULTILINE)
if not m:
log_error('could not determine pylint version from ' + output)
return -1
major, minor, micro = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
log("Detected pylint version: {0}.{1}.{2}".format(major, minor, micro))
ver = (major, minor)
if ver not in pylint_ver_map:
log_error('pylint version must be one of {3} (got {0}.{1}.{2})'.format(major, minor, micro, list(pylint_ver_map.keys())))
return -1
rcfile = join(dirname(__file__), pylint_ver_map[ver]['rcfile'])
if not exists(rcfile):
log_error('pylint configuration file does not exist: ' + rcfile)
return -1
additional_options = pylint_ver_map[ver]['additional_options']
pyfiles = _find_pyfiles(args.all, args.primary, args.walk)
env = _get_env_with_pythonpath()
suite = primary_suite()
timestamps_dir = None
if suite:
timestamps_dir = join(suite.get_mx_output_dir(), 'pylint-timestamps')
if args.force:
rmtree(timestamps_dir)
ensure_dir_exists(timestamps_dir)
for pyfile in pyfiles:
if timestamps_dir:
ts = TimeStampFile(join(timestamps_dir, pyfile.replace(os.sep, '_') + '.timestamp'))
if ts.exists() and ts.isNewerThan(pyfile):
log('Skip pylinting ' + pyfile + ' as it has not changed')
continue
log('Running pylint on ' + pyfile + '...')
run([pylint_exe, '--reports=n', '--rcfile=' + rcfile, pyfile] + additional_options, env=env)
if timestamps_dir:
ts.touch()
return 0
def _find_pyfiles(find_all, primary, walk):
"""
Find files ending in `.py`.
:param find_all: If `True`, finds all files, not just those in the `mx.*` directory
:param primary: If `True`, limit the search to the primary suite
:param walk: If `True`, use a tree walk instead of `<vc> locate`
:return: List of `.py` files
"""
def walk_suite(suite):
for root, dirs, files in os.walk(suite.dir if find_all else suite.mxDir):
for f in files:
if f.endswith('.py'):
pyfile = join(root, f)
pyfiles.append(pyfile)
if 'bin' in dirs:
dirs.remove('bin')
if 'lib' in dirs:
# avoids downloaded .py files
dirs.remove('lib')
def findfiles_by_walk(pyfiles):
for suite in suites(True, includeBinary=False):
if primary and not suite.primary:
continue
walk_suite(suite)
def findfiles_by_vc(pyfiles):
for suite in suites(True, includeBinary=False):
if primary and not suite.primary:
continue
if not suite.vc:
walk_suite(suite)
continue
suite_location = os.path.relpath(suite.dir if find_all else suite.mxDir, suite.vc_dir)
files = suite.vc.locate(suite.vc_dir, [join(suite_location, '**.py')])
compat = suite.getMxCompatibility()
if compat.makePylintVCInputsAbsolute():
files = [join(suite.vc_dir, f) for f in files]
for pyfile in files:
if exists(pyfile):
pyfiles.append(pyfile)
pyfiles = []
# Process mxtool's own py files only if mx is the primary suite
if primary_suite() is _mx_suite:
for root, _, files in os.walk(dirname(__file__)):
for f in files:
if f.endswith('.py'):
pyfile = join(root, f)
pyfiles.append(pyfile)
else:
if walk:
findfiles_by_walk(pyfiles)
else:
findfiles_by_vc(pyfiles)
return pyfiles
def _get_env_with_pythonpath():
env = os.environ.copy()
pythonpath = dirname(__file__)
for suite in suites(True):
pythonpath = os.pathsep.join([pythonpath, suite.mxDir])
env['PYTHONPATH'] = pythonpath
return env
class NoOpContext(object):
def __init__(self, value=None):
self.value = value
def __enter__(self):
return self.value
def __exit__(self, exc_type, exc_value, traceback):
pass
class TempDir(object):
def __init__(self, parent_dir=None):
self.parent_dir = parent_dir
def __enter__(self):
self.tmp_dir = mkdtemp(dir=self.parent_dir)
return self.tmp_dir
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.tmp_dir)
class TempDirCwd(TempDir):
def __init__(self, parent_dir=None): #pylint: disable=useless-super-delegation
super(TempDirCwd, self).__init__(parent_dir)
def __enter__(self):
super(TempDirCwd, self).__enter__()
self.prev_dir = os.getcwd()
os.chdir(self.tmp_dir)
return self.tmp_dir
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self.prev_dir)
super(TempDirCwd, self).__exit__(exc_type, exc_value, traceback)
class SafeFileCreation(object):
"""
Context manager for creating a file that tries hard to handle races between processes/threads
creating the same file. It tries to ensure that the file is created with the content provided
by exactly one process/thread but makes no guarantee about which process/thread wins.
Note that truly atomic file copying is hard (http://stackoverflow.com/a/28090883/6691595)
:Example:
with SafeFileCreation(dst) as sfc:
shutil.copy(src, sfc.tmpPath)
"""
def __init__(self, path, companion_patterns=None):
self.path = path
self.companion_patterns = companion_patterns or []
def __enter__(self):
if self.path is not None:
path_dir = dirname(self.path)
ensure_dir_exists(path_dir)
# Temporary file must be on the same file system as self.path for os.rename to be atomic.
fd, tmp = tempfile.mkstemp(suffix=basename(self.path), dir=path_dir)
self.tmpFd = fd
self.tmpPath = tmp
else:
self.tmpFd = None
self.tmpPath = None
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.path is None:
return
# Windows will complain about tmp being in use by another process
# when calling os.rename if we don't close the file descriptor.
os.close(self.tmpFd)
def _handle_file(tmpPath, path):
if exists(tmpPath):
if exc_value:
# If an error occurred, delete the temp file
# instead of renaming it
os.remove(tmpPath)
else:
# Correct the permissions on the temporary file which is created with restrictive permissions
os.chmod(tmpPath, 0o666 & ~currentUmask)
# Atomic if self.path does not already exist.
if is_windows() and exists(path):
# Needed on Windows
os.remove(path)
os.rename(tmpPath, path)
_handle_file(self.tmpPath, self.path)
for companion_pattern in self.companion_patterns:
_handle_file(companion_pattern.format(path=self.tmpPath), companion_pattern.format(path=self.path))
class SafeDirectoryUpdater(object):
"""
Multi-thread safe context manager for creating/updating a directory.
:Example:
# Compiles `sources` into `dst` with javac. If multiple threads/processes are
# performing this compilation concurrently, the contents of `dst`
# will reflect the complete results of one of the compilations
# from the perspective of other threads/processes.
with SafeDirectoryUpdater(dst) as sdu:
mx.run([jdk.javac, '-d', sdu.directory, sources])
"""
def __init__(self, directory, create=False):
"""
:param directory: the target directory that will be created/updated within the context.
The working copy of the directory is accessed via `self.directory`
within the context.
"""
self.target = directory
self._workspace = None
self.directory = None
self.create = create
def __enter__(self):
parent = dirname(self.target)
self._workspace = tempfile.mkdtemp(dir=parent)
self.directory = join(self._workspace, basename(self.target))
if self.create:
ensure_dir_exists(self.directory)
self.target_timestamp = TimeStampFile(self.target)
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
rmtree(self._workspace)
raise
# Try delete the target directory if it existed prior to creating
# self.workspace and has not been modified in between.
if self.target_timestamp.timestamp is not None and self.target_timestamp.timestamp == TimeStampFile(self.target).timestamp:
old_target = join(self._workspace, 'to_delete_' + basename(self.target))
try:
os.rename(self.target, old_target)
except:
# Silently assume another process won the race to rename dst_jdk_dir
pass
# Try atomically move self.directory to self.target
try:
os.rename(self.directory, self.target)
except:
if not exists(self.target):
raise
# Silently assume another process won the race to create self.target
rmtree(self._workspace)
def _derived_path(base_path, suffix, prefix='.', prepend_dirname=True):
"""
Gets a path derived from `base_path` by prepending `prefix` and appending `suffix` to
to the base name of `base_path`.
:param bool prepend_dirname: if True, `dirname(base_path)` is prepended to the derived base file
:param bool delete: if True and the derived
"""
derived = prefix + basename(base_path) + suffix
if prepend_dirname:
derived = join(dirname(base_path), derived)
return derived
class Archiver(SafeFileCreation):
"""
Utility for creating and updating a zip or tar file atomically.
"""
def __init__(self, path, kind='zip', reset_user_group=False, duplicates_action=None, context=None, compress=False):
SafeFileCreation.__init__(self, path)
self.kind = kind
self.zf = None
self._add_f = None
self._add_str = None
self._add_link = None
self.reset_user_group = reset_user_group
self.compress = compress
assert duplicates_action in [None, 'warn', 'abort']
self.duplicates_action = duplicates_action
self._provenance_map = {} if duplicates_action else None
self.context = context
def _add_zip(self, filename, archive_name, provenance):
self._add_provenance(archive_name, provenance)
self.zf.write(filename, archive_name)
def _add_str_zip(self, data, archive_name, provenance):
self._add_provenance(archive_name, provenance)
self.zf.writestr(archive_name, data)
def _add_link_zip(self, target, archive_name, provenance):
abort("Can not add symlinks in ZIP archives!", context=self.context)
def _add_tar(self, filename, archive_name, provenance):
self._add_provenance(archive_name, provenance)
self.zf.add(filename, archive_name, filter=self._tarinfo_filter, recursive=False)
def _add_str_tar(self, data, archive_name, provenance):
self._add_provenance(archive_name, provenance)
binary_data = _encode(data)
tarinfo = self.zf.tarinfo()
tarinfo.name = archive_name
tarinfo.size = len(binary_data)
tarinfo.mtime = calendar.timegm(datetime.now().utctimetuple())
self.zf.addfile(self._tarinfo_filter(tarinfo), BytesIO(binary_data))
def _add_link_tar(self, target, archive_name, provenance):
self._add_provenance(archive_name, provenance)
tarinfo = self.zf.tarinfo()
tarinfo.name = archive_name
tarinfo.type = tarfile.SYMTYPE
tarinfo.linkname = target
tarinfo.mtime = calendar.timegm(datetime.now().utctimetuple())
self.zf.addfile(self._tarinfo_filter(tarinfo))
def _tarinfo_filter(self, tarinfo):
if self.reset_user_group:
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = "root"
return tarinfo
def _add_provenance(self, archive_name, provenance):
if self._provenance_map is None:
return
if archive_name in self._provenance_map: # pylint: disable=unsupported-membership-test
old_provenance = self._provenance_map[archive_name]
nl = os.linesep
msg = "Duplicate archive entry: '{}'".format(archive_name) + nl
msg += ' old provenance: ' + ('<unknown>' if not old_provenance else old_provenance) + nl
msg += ' new provenance: ' + ('<unknown>' if not provenance else provenance)
abort_or_warn(msg, self.duplicates_action == 'abort', context=self.context)
self._provenance_map[archive_name] = provenance # pylint: disable=unsupported-assignment-operation
def __enter__(self):
if self.path:
SafeFileCreation.__enter__(self)
if self.kind == 'zip' or self.kind == 'jar':
self.zf = zipfile.ZipFile(self.tmpPath, 'w', compression=zipfile.ZIP_DEFLATED if self.compress else zipfile.ZIP_STORED)
self._add_f = self._add_zip
self._add_str = self._add_str_zip
self._add_link = self._add_link_zip
elif self.kind == 'tar':
if self.compress:
warn("Archiver created with compress=True and kind=tar, ignoring compression setting")
self.zf = tarfile.open(self.tmpPath, 'w')
self._add_f = self._add_tar
self._add_str = self._add_str_tar
self._add_link = self._add_link_tar
elif self.kind == 'tgz':
if self.compress:
warn("Archiver created with compress=False and kind=tgz, ignoring compression setting")
self.zf = tarfile.open(self.tmpPath, 'w:gz')
self._add_f = self._add_tar
self._add_str = self._add_str_tar
self._add_link = self._add_link_tar
else:
abort('unsupported archive kind: ' + self.kind, context=self.context)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.path:
if self.zf:
self.zf.close()
SafeFileCreation.__exit__(self, exc_type, exc_value, traceback)
def add(self, filename, archive_name, provenance):
self._add_f(filename, archive_name, provenance)
def add_str(self, data, archive_name, provenance):
self._add_str(data, archive_name, provenance)
def add_link(self, target, archive_name, provenance):
self._add_link(target, archive_name, provenance)
class NullArchiver(Archiver):
def add(self, filename, archive_name, provenance):
pass
def add_str(self, data, archive_name, provenance):
pass
def add_link(self, target, archive_name, provenance):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def make_unstrip_map(dists):
"""
Gets the contents of a map file that can be used with the `unstrip` command to deobfuscate stack
traces containing code from the stripped versions of `dists`.
:return: None if none of the entries in `dists` are stripped or none of them have
existing unstripping map files (likely because they have not been built
with --strip-jars enabled)
"""
content = ''
for d in dists:
if d.is_stripped():
map_file = d.path + '.map'
if exists(map_file):
with open(map_file) as fp:
content += fp.read()
return None if len(content) == 0 else content
def _unstrip(args):
"""use stripping mappings of a file to unstrip the contents of another file
Arguments are mapping file and content file.
Directly passes the arguments to proguard-retrace.jar. For more details see: http://proguard.sourceforge.net/manual/retrace/usage.html"""
unstrip(args)
return 0
def unstrip(args, **run_java_kwargs):
proguard_cp = _get_proguard_cp()
# A slightly more general pattern for matching stack traces than the default.
# This version does not require the "at " prefix.
regex = r'(?:.*?\s+%c\.%m\s*\(%s(?::%l)?\)\s*(?:~\[.*\])?)|(?:(?:.*?[:"]\s+)?%c(?::.*)?)'
unstrip_command = ['-cp', proguard_cp, 'proguard.retrace.ReTrace', '-regex', regex]
mapfiles = []
inputfiles = []
temp_files = []
try:
for arg in args:
if os.path.isdir(arg):
mapfiles += glob.glob(join(arg, '*' + JARDistribution._strip_map_file_suffix))
elif arg.endswith(JARDistribution._strip_map_file_suffix):
mapfiles.append(arg)
else:
# ReTrace does not (yet) understand JDK9+ stack traces where a module name
# is prefixed to a class name. As a workaround, we separate the module name
# prefix from the class name with a space. For example, this converts:
#
# com.oracle.graal.graal_enterprise/com.oracle.graal.enterprise.a.b(stripped:22)
#
# to:
#
# com.oracle.graal.graal_enterprise/ com.oracle.graal.enterprise.a.b(stripped:22)
#
with open(arg) as fp:
contents = fp.read()
new_contents = re.sub(r'(\s+(?:[a-z][a-zA-Z_$]*\.)*[a-z][a-zA-Z\d_$]*/)', r'\1 ', contents)
if contents != new_contents:
temp_file = arg + '.' + str(os.getpid())
with open(temp_file, 'w') as fp:
fp.write(new_contents)
inputfiles.append(temp_file)
temp_files.append(temp_file)
else:
inputfiles.append(arg)
with tempfile.NamedTemporaryFile(mode='w', delete=False) as catmapfile:
_merge_file_contents(mapfiles, catmapfile)
catmapfile.close()
temp_files.append(catmapfile.name)
run_java(unstrip_command + [catmapfile.name] + inputfiles, **run_java_kwargs)
finally:
for temp_file in temp_files:
os.unlink(temp_file)
def _archive(args):
"""create jar files for projects and distributions"""
archive(args)
return 0
def archive(args):
parser = ArgumentParser(prog='mx archive')
parser.add_argument('--parsable', action='store_true', dest='parsable', help='Outputs results in a stable parsable way (one archive per line, <ARCHIVE>=<path>)')
parser.add_argument('names', nargs=REMAINDER, metavar='[<project>|@<distribution>]...')
args = parser.parse_args(args)
archives = []
for name in args.names:
if name.startswith('@'):
dname = name[1:]
d = distribution(dname)
if isinstance(d.suite, BinarySuite):
abort('Cannot re-build archive for distribution {} from binary suite {}'.format(dname, d.suite.name))
d.make_archive()
archives.append(d.path)
if args.parsable:
print('{0}={1}'.format(dname, d.path))
else:
p = project(name)
path = p.make_archive()
archives.append(path)
if args.parsable:
print('{0}={1}'.format(name, path))
if not args.parsable:
logv("generated archives: " + str(archives))
return archives
def checkoverlap(args):
"""check all distributions for overlap
The exit code of this command reflects how many projects are included in more than one distribution."""
projToDist = {}
for d in sorted_dists():
if d.internal:
continue
for p in d.archived_deps():
if p.isProject():
if p in projToDist:
projToDist[p].append(d)
else:
projToDist[p] = [d]
count = 0
for p in projToDist:
ds = projToDist[p]
if len(ds) > 1:
remove = []
for d in ds:
overlaps = d.overlapped_distributions()
if len([o for o in ds if o in overlaps]) != 0:
remove.append(d)
ds = [d for d in ds if d not in remove]
if len(ds) > 1:
print('{} is in more than one distribution: {}'.format(p, [d.name for d in ds]))
count += 1
return count
def canonicalizeprojects(args):
"""check all project specifications for canonical dependencies
The exit code of this command reflects how many projects have non-canonical dependencies."""
nonCanonical = []
for s in suites(True, includeBinary=False):
for p in (p for p in s.projects if p.isJavaProject()):
if p.suite.getMxCompatibility().check_package_locations():
errors = []
for source, package in p.mismatched_imports().items():
if package:
errors.append('{} declares a package that does not match its location: {}'.format(source, package))
else:
errors.append('{} does not declare a package that matches its location'.format(source))
if errors:
p.abort('\n'.join(errors))
if p.is_test_project():
continue
if p.checkPackagePrefix:
for pkg in p.defined_java_packages():
if not pkg.startswith(p.name):
p.abort('package in {0} does not have prefix matching project name: {1}'.format(p, pkg))
ignoredDeps = {d for d in p.deps if d.isJavaProject()}
for pkg in p.imported_java_packages():
for dep in p.deps:
if not dep.isJavaProject():
ignoredDeps.discard(dep)
else:
if pkg in dep.defined_java_packages():
ignoredDeps.discard(dep)
if pkg in dep.extended_java_packages():
ignoredDeps.discard(dep)
genDeps = frozenset([dependency(name, context=p) for name in getattr(p, "generatedDependencies", [])])
incorrectGenDeps = genDeps - ignoredDeps
ignoredDeps -= genDeps
if incorrectGenDeps:
p.abort('{0} should declare following as normal dependencies, not generatedDependencies: {1}'.format(p, ', '.join([d.name for d in incorrectGenDeps])))
if len(ignoredDeps) != 0:
candidates = set()
# Compute candidate dependencies based on projects required by p
for d in dependencies():
if d.isJavaProject() and not d.defined_java_packages().isdisjoint(p.imported_java_packages()):
candidates.add(d)
# Remove non-canonical candidates
for c in list(candidates):
c.walk_deps(visit=lambda dep, edge: candidates.discard(dep) if dep.isJavaProject() else None)
candidates = [d.name for d in candidates]
msg = 'Non-generated source code in {0} does not use any packages defined in these projects: {1}\nIf the above projects are only ' \
'used in generated sources, declare them in a "generatedDependencies" attribute of {0}.\nComputed project dependencies: {2}'
p.abort(msg.format(
p, ', '.join([d.name for d in ignoredDeps]), ','.join(candidates)))
excess = frozenset([d for d in p.deps if d.isJavaProject()]) - set(p.canonical_deps())
if len(excess) != 0:
nonCanonical.append(p)
for d in s.dists:
different_test_status = [pp for pp in d.archived_deps() if pp.isProject() and pp.is_test_project() != d.is_test_distribution()]
if different_test_status:
project_list_str = '\n'.join((' - ' + pp.name for pp in different_test_status))
should_abort = d.suite.getMxCompatibility().enforceTestDistributions()
if d.is_test_distribution():
abort_or_warn("{} is a test distribution but it contains non-test projects:\n{}".format(d.name, project_list_str), should_abort)
else:
abort_or_warn("{} is not a test distribution but it contains test projects:\n{}".format(d.name, project_list_str), should_abort)
if len(nonCanonical) != 0:
for p in nonCanonical:
canonicalDeps = p.canonical_deps()
if len(canonicalDeps) != 0:
log(p.__abort_context__() + ':\nCanonical dependencies for project ' + p.name + ' are: [')
for d in canonicalDeps:
name = d.suite.name + ':' + d.name if d.suite is not p.suite else d.name
log(' "' + name + '",')
log(' ],')
else:
log(p.__abort_context__() + ':\nCanonical dependencies for project ' + p.name + ' are: []')
return len(nonCanonical)
"""
Represents a file and its modification time stamp at the time the TimeStampFile is created.
"""
class TimeStampFile:
def __init__(self, path, followSymlinks=True):
"""
:type path: str
:type followSymlinks: bool | str
"""
assert isinstance(path, str), path + ' # type=' + str(type(path))
self.path = path
if exists(path):
if followSymlinks == 'newest':
self.timestamp = max(getmtime(path), lstat(path).st_mtime)
elif followSymlinks:
self.timestamp = getmtime(path)
else:
self.timestamp = lstat(path).st_mtime
else:
self.timestamp = None
@staticmethod
def newest(paths):
"""
Creates a TimeStampFile for the file in `paths` with the most recent modification time.
Entries in `paths` that do not correspond to an existing file are ignored.
"""
ts = None
for path in paths:
if exists(path):
if not ts:
ts = TimeStampFile(path)
elif ts.isOlderThan(path):
ts = TimeStampFile(path)
return ts
def isOlderThan(self, arg):
if not self.timestamp:
return True
if isinstance(arg, (int, float)):
return self.timestamp < arg
if isinstance(arg, TimeStampFile):
if arg.timestamp is None:
return False
else:
return arg.timestamp > self.timestamp
if isinstance(arg, list):
files = arg
else:
files = [arg]
for f in files:
if not os.path.exists(f):
return True
if getmtime(f) > self.timestamp:
return True
return False
def isNewerThan(self, arg):
"""
Returns True if self represents an existing file whose modification time
is more recent than the modification time(s) represented by `arg`. If `arg`
is a list, then it's treated as a list of path names.
"""
if not self.timestamp:
return False
if isinstance(arg, (int, float)):
return self.timestamp > arg
if isinstance(arg, TimeStampFile):
if arg.timestamp is None:
return False
else:
return arg.timestamp < self.timestamp
if isinstance(arg, list):
files = arg
else:
files = [arg]
for f in files:
if self.timestamp < getmtime(f):
return False
return True
def exists(self):
return exists(self.path)
def __str__(self):
if self.timestamp:
ts = time.strftime('[%Y-%m-%d %H:%M:%S]', time.localtime(self.timestamp))
else:
ts = '[does not exist]'
return self.path + ts
def touch(self):
if exists(self.path):
os.utime(self.path, None)
else:
ensure_dir_exists(dirname(self.path))
open(self.path, 'a')
self.timestamp = getmtime(self.path)
### ~~~~~~~~~~~~~ commands
def checkstyle(args):
"""run Checkstyle on the Java sources
Run Checkstyle over the Java sources. Any errors or warnings
produced by Checkstyle result in a non-zero exit code."""
parser = ArgumentParser(prog='mx checkstyle')
parser.add_argument('-f', action='store_true', dest='force', help='force checking (disables timestamp checking)')
parser.add_argument('--primary', action='store_true', help='limit checks to primary suite')
parser.add_argument('--filelist', type=FileType("r"), help='only check the files listed in the given file')
args = parser.parse_args(args)
filelist = None
if args.filelist:
filelist = [os.path.abspath(line.strip()) for line in args.filelist.readlines()]
args.filelist.close()
totalErrors = 0
class Batch:
def __init__(self, config, suite):
self.suite = suite
config_relative_to_root = os.path.relpath(os.path.abspath(config), os.sep)
self.timestamp = TimeStampFile(join(suite.get_mx_output_dir(), 'checkstyle-timestamps',
config_relative_to_root + '.timestamp'))
self.sources = []
self.projects = []
batches = {}
for p in projects(opt_limit_to_suite=True):
if not p.isJavaProject():
continue
if args.primary and not p.suite.primary:
continue
sourceDirs = p.source_dirs()
config, checkstyleVersion, _ = p.get_checkstyle_config()
if not config:
logv('[No Checkstyle configuration found for {0} - skipping]'.format(p))
continue
# skip checking this Java project if its Java compliance level is "higher" than the configured JDK
jdk = get_jdk(p.javaCompliance)
assert jdk
key = (config, checkstyleVersion)
batch = batches.setdefault(key, Batch(config, p.suite))
batch.projects.append(p)
for sourceDir in sourceDirs:
javafilelist = []
for root, _, files in os.walk(sourceDir):
for f in [join(root, name) for name in files if name.endswith('.java') if name != 'package-info.java']:
if filelist is None or f in filelist:
javafilelist.append(f)
if len(javafilelist) == 0:
logv('[no Java sources in {0} - skipping]'.format(sourceDir))
continue
mustCheck = False
if not args.force and batch.timestamp.exists():
mustCheck = (config and batch.timestamp.isOlderThan(config)) or batch.timestamp.isOlderThan(javafilelist) # pylint: disable=consider-using-ternary
else:
mustCheck = True
if not mustCheck:
if _opts.verbose:
log('[all Java sources in {0} already checked - skipping]'.format(sourceDir))
continue
exclude = join(p.dir, '.checkstyle.exclude')
if exists(exclude):
with open(exclude) as f:
# Convert patterns to OS separators
patterns = [name.rstrip().replace('/', os.sep) for name in f.readlines()]
def match(name):
for p in patterns:
if p in name:
if _opts.verbose:
log('excluding: ' + name)
return True
return False
javafilelist = [name for name in javafilelist if not match(name)]
batch.sources.extend(javafilelist)
for key, batch in batches.items():
if len(batch.sources) == 0:
continue
config, checkstyleVersion = key
checkstyleLibrary = library('CHECKSTYLE_' + checkstyleVersion).get_path(True)
auditfileName = join(batch.suite.dir, 'checkstyleOutput.txt')
log('Running Checkstyle [{0}] on {1} using {2}...'.format(checkstyleVersion, ', '.join([p.name for p in batch.projects]), config))
try:
for chunk in _chunk_files_for_command_line(batch.sources):
try:
run_java(['-Xmx1g', '-jar', checkstyleLibrary, '-f', 'xml', '-c', config, '-o', auditfileName] + chunk, nonZeroIsFatal=False)
finally:
if exists(auditfileName):
errors = []
source = [None]
def start_element(name, attrs):
if name == 'file':
source[0] = attrs['name']
elif name == 'error':
errors.append(u'{0}:{1}: {2}'.format(source[0], attrs['line'], attrs['message']))
xp = xml.parsers.expat.ParserCreate()
xp.StartElementHandler = start_element
with open(auditfileName, 'rb') as fp:
xp.ParseFile(fp)
if len(errors) != 0:
for e in errors:
log_error(e)
totalErrors = totalErrors + len(errors)
else:
batch.timestamp.touch()
finally:
if exists(auditfileName):
os.unlink(auditfileName)
return totalErrors
def help_(args):
"""show detailed help for mx or a given command
With no arguments, print a list of commands and short help for each command.
Given a command name, print help for that command."""
if len(args) == 0:
_argParser.print_help()
return
name = args[0]
if name not in _mx_commands.commands():
hits = [c for c in _mx_commands.commands().keys() if c.startswith(name)]
if len(hits) == 1:
name = hits[0]
elif len(hits) == 0:
abort('mx: unknown command \'{0}\'\n{1}use "mx help" for more options'.format(name, _format_commands()))
else:
abort('mx: command \'{0}\' is ambiguous\n {1}'.format(name, ' '.join(hits)))
command = _mx_commands.commands()[name]
print(command.get_doc())
def _parse_multireleasejar_version(value):
try:
mrjVersion = int(value)
if mrjVersion < 9:
raise ArgumentTypeError('multi-release jar version ({}) must be greater than 8'.format(value))
return mrjVersion
except ValueError:
raise ArgumentTypeError('multi-release jar version ({}) must be an int value greater than 8'.format(value))
def verifyMultiReleaseProjects(args):
"""verifies properties of multi-release projects"""
for p in projects():
if hasattr(p, 'multiReleaseJarVersion') or hasattr(p, 'overlayTarget'):
compat = p.suite.getMxCompatibility()
if compat.verify_multirelease_projects():
# This will abort if there's an error in getting the map
p.get_overlay_flatten_map()
def flattenMultiReleaseSources(args):
"""print map for flattening multi-release sources
Prints space separated (versioned_dir, base_dir) pairs where versioned_dir contains versioned sources
for a multi-release jar and base_dir contains the corresponding non-versioned (or base versioned)
sources.
"""
parser = ArgumentParser(prog='mx flattenmultireleasesources')
parser.add_argument('-c', '--commands', action='store_true', help='format the output as a series of commands to copy '\
'the versioned sources to the location of the non-versioned sources')
parser.add_argument('version', type=int, help='major version of the Java release for which flattened sources will be produced')
args = parser.parse_args(args)
versions = {}
for p in projects():
if p.isJavaProject() and hasattr(p, 'multiReleaseJarVersion') or hasattr(p, 'overlayTarget'):
if hasattr(p, 'multiReleaseJarVersion'):
version = _parse_multireleasejar_version(getattr(p, 'multiReleaseJarVersion'))
else:
version = p.javaCompliance.value
if version <= args.version:
versions.setdefault(version, []).append(p.get_overlay_flatten_map())
else:
# Ignore overlays for versions higher than the one requested
pass
# Process versioned overlays in ascending order such that higher versions
# override lower versions. This corresponds with how versioned classes in
# multi-release jars are resolved.
for version, maps in sorted(versions.items()):
for flatten_map in maps:
for src_dir, dst_dir in flatten_map.items():
if not args.commands:
print(src_dir, dst_dir)
else:
if not exists(dst_dir):
print('mkdir -p {}'.format(dst_dir))
print('cp {}{}* {}'.format(src_dir, os.sep, dst_dir))
def projectgraph(args, suite=None):
"""create graph for project structure ("mx projectgraph | dot -Tpdf -oprojects.pdf" or "mx projectgraph --igv")"""
parser = ArgumentParser(prog='mx projectgraph')
parser.add_argument('--dist', action='store_true', help='group projects by distribution')
parser.add_argument('--ignore', action='append', help='dependencies to ignore', default=[])
args = parser.parse_args(args)
def should_ignore(name):
return any((ignored in name for ignored in args.ignore))
def print_edge(from_dep, to_dep, attributes=None):
edge_str = ''
attributes = attributes or {}
def node_str(_dep):
_node_str = '"' + _dep.name
if args.dist and _dep.isDistribution():
_node_str += ':DUMMY'
_node_str += '"'
return _node_str
edge_str += node_str(from_dep)
edge_str += '->'
edge_str += node_str(to_dep)
if args.dist and from_dep.isDistribution() or to_dep.isDistribution():
attributes['color'] = 'blue'
if to_dep.isDistribution():
attributes['lhead'] = 'cluster_' + to_dep.name
if from_dep.isDistribution():
attributes['ltail'] = 'cluster_' + from_dep.name
if attributes:
edge_str += ' [' + ', '.join((k + '="' + v + '"' for k, v in attributes.items())) + ']'
edge_str += ';'
print(edge_str)
print('digraph projects {')
print('rankdir=BT;')
print('node [shape=rect];')
print('splines=true;')
print('ranksep=1;')
if args.dist:
print('compound=true;')
started_dists = set()
used_libraries = set()
for p in projects(opt_limit_to_suite=True):
if should_ignore(p.name):
continue
for dep in p.deps:
if dep.isLibrary():
used_libraries.add(dep)
for d in distributions(opt_limit_to_suite=True):
if should_ignore(d.name):
continue
for dep in d.excludedLibs:
used_libraries.add(dep)
for l in used_libraries:
if not should_ignore(l.name):
print('"' + l.name + '";')
def print_distribution(_d):
if should_ignore(_d.name):
return
if _d in started_dists:
warn("projectgraph does not support non-strictly nested distributions, result may be inaccurate around " + _d.name)
return
started_dists.add(_d)
print('subgraph "cluster_' + _d.name + '" {')
print('label="' + _d.name + '";')
print('color=blue;')
print('"' + _d.name + ':DUMMY" [shape=point, style=invis];')
if _d.isDistribution():
overlapped_deps = set()
for overlapped in _d.overlapped_distributions():
print_distribution(overlapped)
overlapped_deps.update(overlapped.archived_deps())
for p in _d.archived_deps():
if p.isProject() and p not in overlapped_deps:
if should_ignore(p.name):
continue
print('"' + p.name + '";')
print('"' + _d.name + ':DUMMY"->"' + p.name + '" [style="invis"];')
print('}')
for dep in _d.deps:
if dep.isDistribution():
print_edge(_d, dep)
for dep in _d.excludedLibs:
print_edge(_d, dep)
in_overlap = set()
for d in distributions(opt_limit_to_suite=True):
in_overlap.update(d.overlapped_distributions())
for d in distributions(opt_limit_to_suite=True):
if d not in started_dists and d not in in_overlap:
print_distribution(d)
for p in projects(opt_limit_to_suite=True):
if should_ignore(p.name):
continue
for dep in p.deps:
if should_ignore(dep.name):
continue
print_edge(p, dep)
if p.isJavaProject():
for apd in p.declaredAnnotationProcessors:
if should_ignore(apd.name):
continue
print_edge(p, apd, {"style": "dashed"})
if not args.dist:
for d in distributions(opt_limit_to_suite=True):
if should_ignore(d.name):
continue
for dep in d.deps:
if should_ignore(dep.name):
continue
print_edge(d, dep)
print('}')
def add_ide_envvar(name, value=None):
"""
Adds a given name to the set of environment variables that will
be captured in generated IDE configurations. If `value` is not
None, then it will be the captured value. Otherwise the result of
get_env(name) is not None as capturing time, it will be used.
Otherwise no value is captured.
"""
mx_ideconfig.add_ide_envvar(name, value=value)
def verifysourceinproject(args):
"""find any Java source files that are outside any known Java projects
Returns the number of suites with requireSourceInProjects == True that have Java sources not in projects.
"""
unmanagedSources = {}
suiteDirs = set()
suiteVcDirs = {}
suiteWhitelists = {}
def ignorePath(path, whitelist):
if whitelist is None:
return True
for entry in whitelist:
if fnmatch.fnmatch(path, entry):
return True
return False
for suite in suites(True, includeBinary=False):
projectDirs = [p.dir for p in suite.projects]
distIdeDirs = [d.get_ide_project_dir() for d in suite.dists if d.isJARDistribution() and d.get_ide_project_dir() is not None]
suiteDirs.add(suite.dir)
# all suites in the same repository must have the same setting for requiresSourceInProjects
if suiteVcDirs.get(suite.vc_dir) is None:
suiteVcDirs[suite.vc_dir] = suite.vc
whitelistFile = join(suite.vc_dir, '.nonprojectsources')
if exists(whitelistFile):
with open(whitelistFile) as fp:
suiteWhitelists[suite.vc_dir] = [l.strip() for l in fp.readlines()]
whitelist = suiteWhitelists.get(suite.vc_dir)
for dirpath, dirnames, files in os.walk(suite.dir):
if dirpath == suite.dir:
# no point in traversing vc metadata dir, lib, .workspace
# if there are nested source suites must not scan those now, as they are not in projectDirs (but contain .project files)
omitted = [suite.mxDir, 'lib', '.workspace', 'mx.imports']
if suite.vc:
omitted.append(suite.vc.metadir())
dirnames[:] = [d for d in dirnames if d not in omitted]
elif dirpath == suite.get_output_root():
# don't want to traverse output dir
dirnames[:] = []
continue
elif dirpath == suite.mxDir:
# don't want to traverse mx.name as it contains a .project
dirnames[:] = []
continue
elif dirpath in projectDirs:
# don't traverse subdirs of an existing project in this suite
dirnames[:] = []
continue
elif dirpath in distIdeDirs:
# don't traverse subdirs of an existing distribution in this suite
dirnames[:] = []
continue
elif 'pom.xml' in files:
# skip maven suites
dirnames[:] = []
continue
elif not suite.vc:
# skip suites not in a vcs repository
dirnames[:] = []
continue
elif ignorePath(os.path.relpath(dirpath, suite.vc_dir), whitelist):
# skip whitelisted directories
dirnames[:] = []
continue
javaSources = [x for x in files if x.endswith('.java')]
if len(javaSources) != 0:
javaSources = [os.path.relpath(join(dirpath, i), suite.vc_dir) for i in javaSources]
javaSourcesInVC = [x for x in suite.vc.locate(suite.vc_dir, javaSources) if not ignorePath(x, whitelist)]
if len(javaSourcesInVC) > 0:
unmanagedSources.setdefault(suite.vc_dir, []).extend(javaSourcesInVC)
# also check for files that are outside of suites
for vcDir, vc in suiteVcDirs.items():
for dirpath, dirnames, files in os.walk(vcDir):
if dirpath in suiteDirs:
# skip known suites
dirnames[:] = []
elif exists(join(dirpath, 'mx.' + basename(dirpath), 'suite.py')):
# skip unknown suites
dirnames[:] = []
elif 'pom.xml' in files:
# skip maven suites
dirnames[:] = []
elif not vc:
# skip suites not in a vcs repository
dirnames[:] = []
else:
javaSources = [x for x in files if x.endswith('.java')]
if len(javaSources) != 0:
javaSources = [os.path.relpath(join(dirpath, i), vcDir) for i in javaSources]
javaSourcesInVC = [x for x in vc.locate(vcDir, javaSources) if not ignorePath(x, whitelist)]
if len(javaSourcesInVC) > 0:
unmanagedSources.setdefault(vcDir, []).extend(javaSourcesInVC)
retcode = 0
if len(unmanagedSources) > 0:
log('The following files are managed but not in any project:')
for vc_dir, sources in unmanagedSources.items():
for source in sources:
log(source)
if suiteWhitelists.get(vc_dir) is not None:
retcode += 1
log('Since {} has a .nonprojectsources file, all Java source files must be \n'\
'part of a project in a suite or the files must be listed in the .nonprojectsources.'.format(vc_dir))
return retcode
def _find_packages(project, onlyPublic=True, included=None, excluded=None, packageInfos=None):
"""
Finds the set of packages defined by a project.
:param JavaProject project: the Java project to process
:param bool onlyPublic: specifies if only packages containing a ``package-info.java`` file are to be considered
:param set included: if not None or empty, only consider packages in this set
:param set excluded: if not None or empty, do not consider packages in this set
"""
sourceDirs = project.source_dirs()
def is_visible(folder, names):
for name in names:
if onlyPublic:
if name == 'package-info.java':
return True
elif name.endswith('.java'):
pubClassPattern = re.compile(r"^public\s+((abstract|final)\s+)?(class|(@)?interface|enum)\s*" + splitext(name)[0] + r"\W.*", re.MULTILINE)
with open(join(folder, name)) as f:
for l in f.readlines():
if pubClassPattern.match(l):
return True
return False
packages = set()
for sourceDir in sourceDirs:
for root, _, files in os.walk(sourceDir):
package = root[len(sourceDir) + 1:].replace(os.sep, '.')
if is_visible(root, files):
if not included or package in included:
if not excluded or package not in excluded:
packages.add(package)
if packageInfos is not None:
for name in files:
if name == 'package-info.java':
packageInfos.add(package)
return packages
def _get_javadoc_module_args(projects, jdk):
additional_javadoc_args = []
if jdk.javaCompliance >= JavaCompliance(11):
jdk_excluded_modules = {'jdk.internal.vm.compiler', 'jdk.internal.vm.compiler.management'}
additional_javadoc_args = [
'--limit-modules',
','.join([module.name for module in jdk.get_modules() if not module.name in jdk_excluded_modules])
]
class NonLocal:
requiresJVMCI = False
def visit(dep, edge):
if dep in projects:
return
if hasattr(dep, 'module') and dep.module == 'jdk.internal.vm.ci':
NonLocal.requiresJVMCI = True
for p in projects:
p.walk_deps(visit=visit)
if NonLocal.requiresJVMCI:
for module in jdk.get_modules():
if module.name == 'jdk.internal.vm.ci':
for package in module.packages:
additional_javadoc_args.extend([
'--add-exports', module.name + '/' + package + '=ALL-UNNAMED'
])
additional_javadoc_args.extend(['--add-modules', 'jdk.internal.vm.ci'])
break
return additional_javadoc_args
_javadocRefNotFound = re.compile("Tag @link(plain)?: reference not found: ")
def javadoc(args, parser=None, docDir='javadoc', includeDeps=True, stdDoclet=True, mayBuild=True, quietForNoPackages=False):
"""generate javadoc for some/all Java projects"""
parser = ArgumentParser(prog='mx javadoc') if parser is None else parser
parser.add_argument('-d', '--base', action='store', help='base directory for output')
parser.add_argument('--unified', action='store_true', help='put javadoc in a single directory instead of one per project')
parser.add_argument('--implementation', action='store_true', help='include also implementation packages')
parser.add_argument('--force', action='store_true', help='(re)generate javadoc even if package-list file exists')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
parser.add_argument('--Wapi', action='store_true', dest='warnAPI', help='show warnings about using internal APIs')
parser.add_argument('--argfile', action='store', help='name of file containing extra javadoc options')
parser.add_argument('--arg', action='append', dest='extra_args', help='extra Javadoc arguments (e.g. --arg @-use)', metavar='@<arg>', default=[])
parser.add_argument('-m', '--memory', action='store', help='-Xmx value to pass to underlying JVM')
parser.add_argument('--packages', action='store', help='comma separated packages to process (omit to process all packages)')
parser.add_argument('--exclude-packages', action='store', help='comma separated packages to exclude')
parser.add_argument('--allow-warnings', action='store_true', help='Exit normally even if warnings were found')
args = parser.parse_args(args)
# build list of projects to be processed
if args.projects is not None:
partialJavadoc = True
candidates = [project(name) for name in args.projects.split(',')]
else:
partialJavadoc = False
candidates = projects_opt_limit_to_suites()
# optionally restrict packages within a project
include_packages = None
if args.packages is not None:
include_packages = frozenset(args.packages.split(','))
exclude_packages = None
if args.exclude_packages is not None:
exclude_packages = frozenset(args.exclude_packages.split(','))
def outDir(p):
if args.base is None:
return join(p.dir, docDir)
return join(args.base, p.name, docDir)
def check_package_list(p):
return not exists(join(outDir(p), 'package-list'))
def is_multirelease_jar_overlay(p):
return hasattr(p, 'overlayTarget')
def assess_candidate(p, projects):
if p in projects:
return False, 'Already visited'
if not args.implementation and p.is_test_project():
return False, 'Test project'
if is_multirelease_jar_overlay(p):
return False, 'Multi release JAR overlay project'
if args.force or args.unified or check_package_list(p):
projects.append(p)
return True, None
return False, 'package-list file exists'
projects = []
""" :type: list[JavaProject]"""
snippetsPatterns = set()
verifySincePresent = []
for p in candidates:
if p.isJavaProject():
if hasattr(p.suite, 'snippetsPattern'):
snippetsPatterns.add(p.suite.snippetsPattern)
if p.suite.primary:
verifySincePresent = p.suite.getMxCompatibility().verifySincePresent()
if includeDeps:
p.walk_deps(visit=lambda dep, edge: assess_candidate(dep, projects)[0] if dep.isJavaProject() else None)
added, reason = assess_candidate(p, projects)
if not added:
logv('[{0} - skipping {1}]'.format(reason, p.name))
snippets = []
for s in set((p.suite for p in projects)):
assert isinstance(s, SourceSuite)
for p in s.projects:
if p.isJavaProject() and not is_multirelease_jar_overlay(p):
snippets += p.source_dirs()
snippets = os.pathsep.join(snippets)
snippetslib = library('CODESNIPPET-DOCLET_0.81').get_path(resolve=True)
ap = []
for sp in snippetsPatterns:
ap += ['-snippetclasses', sp]
snippetsPatterns = ap
if not projects:
log('All projects were skipped.')
if not _opts.verbose:
log('Re-run with global -v option to see why.')
return
extraArgs = [a.lstrip('@') for a in args.extra_args]
if args.argfile is not None:
extraArgs += ['@' + args.argfile]
memory = '2g'
if args.memory is not None:
memory = args.memory
memory = '-J-Xmx' + memory
if mayBuild:
# The project must be built to ensure javadoc can find class files for all referenced classes
build(['--no-native', '--dependencies', ','.join((p.name for p in projects))])
if not args.unified:
for p in projects:
assert p.isJavaProject()
pkgs = _find_packages(p, False, include_packages, exclude_packages)
jdk = get_jdk(p.javaCompliance)
links = ['-linkoffline', 'http://docs.oracle.com/javase/' + str(jdk.javaCompliance.value) + '/docs/api/', _mx_home + '/javadoc/jdk']
out = outDir(p)
def visit(dep, edge):
if dep == p:
return
if dep.isProject() and not is_multirelease_jar_overlay(dep):
depOut = outDir(dep)
links.append('-link')
links.append(os.path.relpath(depOut, out))
p.walk_deps(visit=visit)
cp = classpath(p.name, includeSelf=True, jdk=jdk)
sp = os.pathsep.join(p.source_dirs())
overviewFile = join(p.dir, 'overview.html')
delOverviewFile = False
if not exists(overviewFile):
with open(overviewFile, 'w') as fp:
print('<html><body>Documentation for the <code>' + p.name + '</code> project.</body></html>', file=fp)
delOverviewFile = True
nowarnAPI = []
if not args.warnAPI:
nowarnAPI.append('-XDignore.symbol.file')
if not pkgs:
if quietForNoPackages:
continue
abort('No packages to generate javadoc for!')
# windowTitle onloy applies to the standard doclet processor
windowTitle = []
if stdDoclet:
windowTitle = ['-windowtitle', p.name + ' javadoc']
try:
log('Generating {2} for {0} in {1}'.format(p.name, out, docDir))
# Once https://bugs.openjdk.java.net/browse/JDK-8041628 is fixed,
# this should be reverted to:
# javadocExe = get_jdk().javadoc
# we can then also respect _opts.relatex_compliance
javadocExe = jdk.javadoc
run([javadocExe, memory,
'-XDignore.symbol.file',
'-classpath', cp,
'-quiet',
'-notimestamp',
'-d', out,
'-overview', overviewFile,
'-sourcepath', sp,
'-doclet', 'org.apidesign.javadoc.codesnippet.Doclet',
'-docletpath', snippetslib,
'-snippetpath', snippets,
'-hiddingannotation', 'java.lang.Deprecated',
'-source', str(jdk.javaCompliance)] +
_get_javadoc_module_args([p], jdk) +
snippetsPatterns +
jdk.javadocLibOptions([]) +
([] if jdk.javaCompliance < JavaCompliance(8) else ['-Xdoclint:none']) +
links +
extraArgs +
nowarnAPI +
windowTitle +
list(pkgs))
logv('Generated {2} for {0} in {1}'.format(p.name, out, docDir))
finally:
if delOverviewFile:
os.remove(overviewFile)
else:
jdk = get_jdk()
pkgs = set()
sproots = []
names = []
classpath_deps = set()
for p in projects:
pkgs.update(_find_packages(p, not args.implementation, include_packages, exclude_packages))
sproots += p.source_dirs()
names.append(p.name)
for dep in p.deps:
if dep.isJavaProject():
if dep not in projects:
classpath_deps.add(dep)
elif dep.isLibrary() or dep.isJARDistribution() or dep.isMavenProject() or dep.isJdkLibrary():
classpath_deps.add(dep)
elif dep.isJreLibrary():
pass
elif dep.isTARDistribution() or dep.isNativeProject() or dep.isArchivableProject():
logv("Ignoring dependency from {} to {}".format(p.name, dep.name))
else:
abort("Dependency not supported: {0} ({1})".format(dep, dep.__class__.__name__))
links = ['-linkoffline', 'http://docs.oracle.com/javase/' + str(jdk.javaCompliance.value) + '/docs/api/', _mx_home + '/javadoc/jdk']
overviewFile = os.sep.join([primary_suite().dir, primary_suite().name, 'overview.html'])
out = join(primary_suite().dir, docDir)
if args.base is not None:
out = join(args.base, docDir)
if jdk.javaCompliance <= JavaCompliance(8):
cp = classpath(classpath_deps, jdk=jdk)
else:
cp = classpath(projects, includeSelf=True, jdk=jdk)
sp = os.pathsep.join(sproots)
nowarnAPI = []
if not args.warnAPI:
nowarnAPI.append('-XDignore.symbol.file')
def find_group(pkg):
for p in sproots:
info = p + os.path.sep + pkg.replace('.', os.path.sep) + os.path.sep + 'package-info.java'
if exists(info):
f = open(info, "r")
for line in f:
m = re.search('group="(.*)"', line)
if m:
return m.group(1)
return None
groups = OrderedDict()
for p in pkgs:
g = find_group(p)
if g is None:
continue
if g not in groups:
groups[g] = set()
groups[g].add(p)
groupargs = list()
for k, v in groups.items():
if len(v) == 0:
continue
groupargs.append('-group')
groupargs.append(k)
groupargs.append(':'.join(v))
if not pkgs:
if quietForNoPackages:
return
else:
abort('No packages to generate javadoc for!')
log('Generating {2} for {0} in {1}'.format(', '.join(names), out, docDir))
class WarningCapture:
def __init__(self, prefix, forward, ignoreBrokenRefs):
self.prefix = prefix
self.forward = forward
self.ignoreBrokenRefs = ignoreBrokenRefs
self.warnings = 0
def __call__(self, msg):
shouldPrint = self.forward
if ': warning - ' in msg:
if not self.ignoreBrokenRefs or not _javadocRefNotFound.search(msg):
self.warnings += 1
shouldPrint = not args.allow_warnings
else:
shouldPrint = False
if shouldPrint:
warn(self.prefix + msg.rstrip('\r\n'))
else:
logv(self.prefix + msg.rstrip('\r\n'))
captureOut = WarningCapture('stdout: ', False, partialJavadoc)
captureErr = WarningCapture('stderr: ', True, partialJavadoc)
run([get_jdk().javadoc, memory,
'-classpath', cp,
'-quiet',
'-notimestamp',
'-d', out,
'-doclet', 'org.apidesign.javadoc.codesnippet.Doclet',
'-docletpath', snippetslib,
'-snippetpath', snippets,
'-hiddingannotation', 'java.lang.Deprecated',
'-sourcepath', sp] +
_get_javadoc_module_args(projects, jdk) +
verifySincePresent +
snippetsPatterns +
([] if jdk.javaCompliance < JavaCompliance(8) else ['-Xdoclint:none']) +
(['-overview', overviewFile] if exists(overviewFile) else []) +
groupargs +
links +
extraArgs +
nowarnAPI +
list(pkgs), True, captureOut, captureErr)
if not args.allow_warnings and captureErr.warnings:
abort('Error: Warnings in the javadoc are not allowed!')
if args.allow_warnings and not captureErr.warnings:
logv("Warnings were allowed but there was none")
logv('Generated {2} for {0} in {1}'.format(', '.join(names), out, docDir))
def site(args):
"""creates a website containing javadoc and the project dependency graph"""
parser = ArgumentParser(prog='site')
parser.add_argument('-d', '--base', action='store', help='directory for generated site', required=True, metavar='<dir>')
parser.add_argument('--tmp', action='store', help='directory to use for intermediate results', metavar='<dir>')
parser.add_argument('--name', action='store', help='name of overall documentation', required=True, metavar='<name>')
parser.add_argument('--overview', action='store', help='path to the overview content for overall documentation', required=True, metavar='<path>')
parser.add_argument('--projects', action='store', help='comma separated projects to process (omit to process all projects)')
parser.add_argument('--jd', action='append', help='extra Javadoc arguments (e.g. --jd @-use)', metavar='@<arg>', default=[])
parser.add_argument('--exclude-packages', action='store', help='comma separated packages to exclude', metavar='<pkgs>')
parser.add_argument('--dot-output-base', action='store', help='base file name (relative to <dir>/all) for project dependency graph .svg and .jpg files generated by dot (omit to disable dot generation)', metavar='<path>')
parser.add_argument('--title', action='store', help='value used for -windowtitle and -doctitle javadoc args for overall documentation (default: "<name>")', metavar='<title>')
args = parser.parse_args(args)
args.base = os.path.abspath(args.base)
tmpbase = args.tmp if args.tmp else mkdtemp(prefix=basename(args.base) + '.', dir=dirname(args.base))
unified = join(tmpbase, 'all')
exclude_packages_arg = []
if args.exclude_packages is not None:
exclude_packages_arg = ['--exclude-packages', args.exclude_packages]
projects_arg = []
if args.projects is not None:
projects_arg = ['--projects', args.projects]
projects = [project(name) for name in args.projects.split(',')]
else:
projects = []
walk_deps(visit=lambda dep, edge: projects.append(dep) if dep.isProject() else None, ignoredEdges=[DEP_EXCLUDED])
extra_javadoc_args = []
for a in args.jd:
extra_javadoc_args.append('--arg')
extra_javadoc_args.append('@' + a)
try:
# Create javadoc for each project
javadoc(['--base', tmpbase] + exclude_packages_arg + projects_arg + extra_javadoc_args)
# Create unified javadoc for all projects
with open(args.overview) as fp:
content = fp.read()
idx = content.rfind('</body>')
if idx != -1:
args.overview = join(tmpbase, 'overview_with_projects.html')
with open(args.overview, 'w') as fp2:
print(content[0:idx], file=fp2)
print("""<div class="contentContainer">, file=fp2
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Projects table">
<caption><span>Projects</span><span class="tabEnd"> </span></caption>
<tr><th class="colFirst" scope="col">Project</th><th class="colLast" scope="col"> </th></tr>
<tbody>""")
color = 'row'
for p in projects:
print('<tr class="{1}Color"><td class="colFirst"><a href="../{0}/javadoc/index.html",target = "_top">{0}</a></td><td class="colLast"> </td></tr>'.format(p.name, color), file=fp2)
color = 'row' if color == 'alt' else 'alt'
print('</tbody></table></div>', file=fp2)
print(content[idx:], file=fp2)
title = args.title if args.title is not None else args.name
javadoc(['--base', tmpbase,
'--unified',
'--arg', '@-windowtitle', '--arg', '@' + title,
'--arg', '@-doctitle', '--arg', '@' + title,
'--arg', '@-overview', '--arg', '@' + args.overview] + exclude_packages_arg + projects_arg + extra_javadoc_args)
if exists(unified):
shutil.rmtree(unified)
os.rename(join(tmpbase, 'javadoc'), unified)
# Generate dependency graph with Graphviz
if args.dot_output_base is not None:
dotErr = None
try:
if 'version' not in _check_output_str(['dot', '-V'], stderr=subprocess.STDOUT):
dotErr = 'dot -V does not print a string containing "version"'
except subprocess.CalledProcessError as e:
dotErr = 'error calling "dot -V": {0}'.format(e)
except OSError as e:
dotErr = 'error calling "dot -V": {0}'.format(e)
if dotErr is not None:
abort('cannot generate dependency graph: ' + dotErr)
dot = join(tmpbase, 'all', str(args.dot_output_base) + '.dot')
svg = join(tmpbase, 'all', str(args.dot_output_base) + '.svg')
jpg = join(tmpbase, 'all', str(args.dot_output_base) + '.jpg')
html = join(tmpbase, 'all', str(args.dot_output_base) + '.html')
with open(dot, 'w') as fp:
dim = len(projects)
print('digraph projects {', file=fp)
print('rankdir=BT;', file=fp)
print('size = "' + str(dim) + ',' + str(dim) + '";', file=fp)
print('node [shape=rect, fontcolor="blue"];', file=fp)
# print('edge [color="green"];', file=fp)
for p in projects:
print('"' + p.name + '" [URL = "../' + p.name + '/javadoc/index.html", target = "_top"]', file=fp)
for dep in p.canonical_deps():
if dep in [proj.name for proj in projects]:
print('"' + p.name + '" -> "' + dep + '"', file=fp)
depths = dict()
for p in projects:
d = p.max_depth()
depths.setdefault(d, list()).append(p.name)
print('}', file=fp)
run(['dot', '-Tsvg', '-o' + svg, '-Tjpg', '-o' + jpg, dot])
# Post-process generated SVG to remove title elements which most browsers
# render as redundant (and annoying) tooltips.
with open(svg, 'r') as fp:
content = fp.read()
content = re.sub('<title>.*</title>', '', content)
content = re.sub('xlink:title="[^"]*"', '', content)
with open(svg, 'w') as fp:
fp.write(content)
# Create HTML that embeds the svg file in an <object> frame
with open(html, 'w') as fp:
print('<html><body><object data="{0}.svg" type="image/svg+xml"></object></body></html>'.format(args.dot_output_base), file=fp)
if args.tmp:
copytree(tmpbase, args.base)
else:
shutil.move(tmpbase, args.base)
print('Created website - root is ' + join(args.base, 'all', 'index.html'))
finally:
if not args.tmp and exists(tmpbase):
rmtree(tmpbase)
def _kwArg(kwargs):
if len(kwargs) > 0:
return kwargs.pop(0)
return None
@suite_context_free
def sclone(args):
"""clone a suite repository, and its imported suites"""
parser = ArgumentParser(prog='mx sclone')
parser.add_argument('--source', help='url/path of repo containing suite', metavar='<url>')
parser.add_argument('--subdir', help='sub-directory containing the suite in the repository (suite name)')
parser.add_argument('--dest', help='destination directory (default basename of source)', metavar='<path>')
parser.add_argument('--revision', help='revision to checkout')
parser.add_argument("--no-imports", action='store_true', help='do not clone imported suites')
parser.add_argument("--kind", help='vc kind for URL suites', default='hg')
parser.add_argument('--ignore-version', action='store_true', help='ignore version mismatch for existing suites')
parser.add_argument('nonKWArgs', nargs=REMAINDER, metavar='source [dest]...')
args = parser.parse_args(args)
warn("The sclone command is deprecated and is scheduled for removal.")
# check for non keyword args
if args.source is None:
args.source = _kwArg(args.nonKWArgs)
if args.dest is None:
args.dest = _kwArg(args.nonKWArgs)
if len(args.nonKWArgs) > 0:
abort('unrecognized args: ' + ' '.join(args.nonKWArgs))
revision = args.revision if args.revision else "master"
if args.source is None:
# must be primary suite and dest is required
if primary_suite() is None:
abort('--source missing and no primary suite found')
if args.dest is None:
abort('--dest required when --source is not given')
source = primary_suite().vc_dir
if source != primary_suite().dir:
subdir = os.path.relpath(source, primary_suite().dir)
if args.subdir and args.subdir != subdir:
abort("--subdir should be '{}'".format(subdir))
args.subdir = subdir
else:
source = args.source
if args.dest is not None:
dest = args.dest
else:
dest = basename(source.rstrip('/'))
if dest.endswith('.git'):
dest = dest[:-len('.git')]
dest = os.path.abspath(dest)
dest_dir = join(dest, args.subdir) if args.subdir else dest
source = mx_urlrewrites.rewriteurl(source)
vc = vc_system(args.kind)
vc.clone(source, rev=revision, dest=dest)
mxDir = _is_suite_dir(dest_dir)
if not mxDir:
warn("'{}' is not an mx suite".format(dest_dir))
return
if not args.no_imports:
_discover_suites(mxDir, load=False, register=False)
@suite_context_free
def scloneimports(args):
"""clone the imports of an existing suite"""
parser = ArgumentParser(prog='mx scloneimports')
parser.add_argument('--source', help='path to primary suite')
parser.add_argument('--manual', action='store_true', help='this option has no effect, it is deprecated')
parser.add_argument('--ignore-version', action='store_true', help='ignore version mismatch for existing suites')
parser.add_argument('nonKWArgs', nargs=REMAINDER, metavar='source')
args = parser.parse_args(args)
warn("The scloneimports command is deprecated and is scheduled for removal.")
# check for non keyword args
if args.source is None:
args.source = _kwArg(args.nonKWArgs)
if not args.source:
abort('scloneimports: path to primary suite missing')
if not os.path.isdir(args.source):
abort(args.source + ' is not a directory')
if args.nonKWArgs:
warn("Some extra arguments were ignored: " + ' '.join((pipes.quote(a) for a in args.nonKWArgs)))
if args.manual:
warn("--manual argument is deprecated and has been ignored")
if args.ignore_version:
_opts.version_conflict_resolution = 'ignore'
source = realpath(args.source)
mxDir = _is_suite_dir(source)
if not mxDir:
abort("'{}' is not an mx suite".format(source))
_discover_suites(mxDir, load=False, register=False, update_existing=True)
def _supdate_import_visitor(s, suite_import, **extra_args):
_supdate(suite(suite_import.name), suite_import)
def _supdate(s, suite_import):
s.visit_imports(_supdate_import_visitor)
if s.vc:
s.vc.update(s.vc_dir)
@no_suite_loading
def supdate(args):
"""update primary suite and all its imports"""
parser = ArgumentParser(prog='mx supdate')
args = parser.parse_args(args)
_supdate(primary_suite(), None)
def _sbookmark_visitor(s, suite_import):
imported_suite = suite(suite_import.name)
if imported_suite.vc and isinstance(imported_suite, SourceSuite):
imported_suite.vc.bookmark(imported_suite.vc_dir, s.name + '-import', suite_import.version)
@no_suite_loading
def sbookmarkimports(args):
"""place bookmarks on the imported versions of suites in version control"""
parser = ArgumentParser(prog='mx sbookmarkimports')
parser.add_argument('--all', action='store_true', help='operate on all suites (default: primary suite only)')
args = parser.parse_args(args)
if args.all:
for s in suites():
s.visit_imports(_sbookmark_visitor)
else:
primary_suite().visit_imports(_sbookmark_visitor)
def _scheck_imports_visitor(s, suite_import, bookmark_imports, ignore_uncommitted, warn_only):
"""scheckimports visitor for Suite.visit_imports"""
_scheck_imports(s, suite(suite_import.name), suite_import, bookmark_imports, ignore_uncommitted, warn_only)
def _scheck_imports(importing_suite, imported_suite, suite_import, bookmark_imports, ignore_uncommitted, warn_only):
importedVersion = imported_suite.version()
if imported_suite.vc and imported_suite.isDirty() and not ignore_uncommitted:
msg = 'uncommitted changes in {}, please commit them and re-run scheckimports'.format(imported_suite.name)
if isinstance(imported_suite, SourceSuite) and imported_suite.vc and imported_suite.vc.kind == 'hg':
msg = '{}\nIf the only uncommitted change is an updated imported suite version, then you can run:\n\nhg -R {} commit -m "updated imported suite version"'.format(msg, imported_suite.vc_dir)
abort(msg)
if importedVersion != suite_import.version and suite_import.version is not None:
mismatch = 'imported version of {} in {} ({}) does not match parent ({})'.format(imported_suite.name, importing_suite.name, suite_import.version, importedVersion)
if warn_only:
warn(mismatch)
else:
print(mismatch)
if exists(importing_suite.suite_py()) and ask_yes_no('Update ' + importing_suite.suite_py()):
with open(importing_suite.suite_py()) as fp:
contents = fp.read()
if contents.count(str(suite_import.version)) >= 1:
oldVersion = suite_import.version
newContents = contents.replace(oldVersion, str(importedVersion))
if not update_file(importing_suite.suite_py(), newContents, showDiff=True):
abort("Updating {} failed: update didn't change anything".format(importing_suite.suite_py()))
# Update the SuiteImport instances of this suite
def _update_suite_import(s, si):
if si.version == oldVersion:
si.version = importedVersion
importing_suite.visit_imports(_update_suite_import)
if bookmark_imports:
_sbookmark_visitor(importing_suite, suite_import)
else:
print('Could not find the substring {} in {}'.format(suite_import.version, importing_suite.suite_py()))
@no_suite_loading
def scheckimports(args):
"""check that suite import versions are up to date"""
parser = ArgumentParser(prog='mx scheckimports')
parser.add_argument('-b', '--bookmark-imports', action='store_true', help="keep the import bookmarks up-to-date when updating the suites.py file")
parser.add_argument('-i', '--ignore-uncommitted', action='store_true', help="Ignore uncommitted changes in the suite")
parser.add_argument('-w', '--warn-only', action='store_true', help="Only warn imports not matching the checked out revision (no modification)")
parsed_args = parser.parse_args(args)
# check imports of all suites
for s in suites():
s.visit_imports(_scheck_imports_visitor, bookmark_imports=parsed_args.bookmark_imports, ignore_uncommitted=parsed_args.ignore_uncommitted, warn_only=parsed_args.warn_only)
_suitemodel.verify_imports(suites(), args)
@no_suite_discovery
def sforceimports(args):
"""force working directory revision of imported suites to match primary suite imports"""
parser = ArgumentParser(prog='mx sforceimports')
parser.add_argument('--strict-versions', action='store_true', help='DEPRECATED/IGNORED strict version checking')
args = parser.parse_args(args)
if args.strict_versions:
warn("'--strict-versions' argument is deprecated and ignored. For version conflict resolution, see mx's '--version-conflict-resolution' flag.")
_discover_suites(primary_suite().mxDir, load=False, register=False, update_existing=True)
def _spull_import_visitor(s, suite_import, update_versions, only_imports, update_all, no_update):
"""pull visitor for Suite.visit_imports"""
_spull(s, suite(suite_import.name), suite_import, update_versions, only_imports, update_all, no_update)
def _spull(importing_suite, imported_suite, suite_import, update_versions, only_imports, update_all, no_update):
# suite_import is None if importing_suite is primary suite
primary = suite_import is None
# proceed top down to get any updated version ids first
if not primary or not only_imports:
# skip pull of primary if only_imports = True
vcs = imported_suite.vc
if not vcs:
abort('spull requires suites to be in a vcs repository')
# by default we pull to the revision id in the import, but pull head if update_versions = True
rev = suite_import.version if not update_versions and suite_import and suite_import.version else None
if rev and vcs.kind != suite_import.kind:
abort('Wrong VC type for {} ({}), expecting {}, got {}'.format(imported_suite.name, imported_suite.dir, suite_import.kind, imported_suite.vc.kind))
vcs.pull(imported_suite.vc_dir, rev, update=not no_update)
if not primary and update_versions:
importedVersion = vcs.parent(imported_suite.vc_dir)
if importedVersion != suite_import.version:
if exists(importing_suite.suite_py()):
with open(importing_suite.suite_py()) as fp:
contents = fp.read()
if contents.count(str(suite_import.version)) == 1:
newContents = contents.replace(suite_import.version, str(importedVersion))
log('Updating "version" attribute in import of suite ' + suite_import.name + ' in ' + importing_suite.suite_py() + ' to ' + importedVersion)
update_file(importing_suite.suite_py(), newContents, showDiff=True)
else:
log('Could not update as the substring {} does not appear exactly once in {}'.format(suite_import.version, importing_suite.suite_py()))
log('Please update "version" attribute in import of suite ' + suite_import.name + ' in ' + importing_suite.suite_py() + ' to ' + importedVersion)
suite_import.version = importedVersion
imported_suite.re_init_imports()
if not primary and not update_all:
update_versions = False
imported_suite.visit_imports(_spull_import_visitor, update_versions=update_versions, only_imports=only_imports, update_all=update_all, no_update=no_update)
@no_suite_loading
def spull(args):
"""pull primary suite and all its imports"""
parser = ArgumentParser(prog='mx spull')
parser.add_argument('--update-versions', action='store_true', help='pull tip of directly imported suites and update suite.py')
parser.add_argument('--update-all', action='store_true', help='pull tip of all imported suites (transitively)')
parser.add_argument('--only-imports', action='store_true', help='only pull imported suites, not the primary suite')
parser.add_argument('--no-update', action='store_true', help='only pull, without updating')
args = parser.parse_args(args)
warn("The spull command is deprecated and is scheduled for removal.")
if args.update_all and not args.update_versions:
abort('--update-all can only be used in conjuction with --update-versions')
_spull(primary_suite(), primary_suite(), None, args.update_versions, args.only_imports, args.update_all, args.no_update)
def _sincoming_import_visitor(s, suite_import, **extra_args):
_sincoming(suite(suite_import.name), suite_import)
def _sincoming(s, suite_import):
s.visit_imports(_sincoming_import_visitor)
if s.vc:
output = s.vc.incoming(s.vc_dir)
if output:
print(output)
else:
print('No version control info for suite ' + s.name)
@no_suite_loading
def sincoming(args):
"""check incoming for primary suite and all imports"""
parser = ArgumentParser(prog='mx sincoming')
args = parser.parse_args(args)
warn("The sincoming command is deprecated and is scheduled for removal.")
_sincoming(primary_suite(), None)
### ~~~~~~~~~~~~~ Mercurial
def _hg_command_import_visitor(s, suite_import, **extra_args):
_hg_command(suite(suite_import.name), suite_import, **extra_args)
def _hg_command(s, suite_import, **extra_args):
s.visit_imports(_hg_command_import_visitor, **extra_args)
if isinstance(s.vc, HgConfig):
out = s.vc.hg_command(s.vc_dir, extra_args['args'])
print(out)
@no_suite_loading
def hg_command(args):
"""Run a Mercurial command in every suite"""
warn("The hg command is deprecated and is scheduled for removal.")
_hg_command(primary_suite(), None, args=args)
def _stip_import_visitor(s, suite_import, **extra_args):
_stip(suite(suite_import.name), suite_import)
def _stip(s, suite_import):
s.visit_imports(_stip_import_visitor)
if not s.vc:
print('No version control info for suite ' + s.name)
else:
print('tip of ' + s.name + ': ' + s.vc.tip(s.vc_dir))
@no_suite_loading
def stip(args):
"""check tip for primary suite and all imports"""
parser = ArgumentParser(prog='mx stip')
args = parser.parse_args(args)
warn("The tip command is deprecated and is scheduled for removal.")
_stip(primary_suite(), None)
def _sversions_rev(rev, isdirty, with_color):
if with_color:
label = colorize(rev[0:12], color='yellow')
else:
label = rev[0:12]
return label + ' +'[int(isdirty)]
@no_suite_loading
def sversions(args):
"""print working directory revision for primary suite and all imports"""
parser = ArgumentParser(prog='mx sversions')
parser.add_argument('--color', action='store_true', help='color the short form part of the revision id')
args = parser.parse_args(args)
with_color = args.color
visited = set()
def _sversions_import_visitor(s, suite_import, **extra_args):
_sversions(suite(suite_import.name), suite_import)
def _sversions(s, suite_import):
if s.dir in visited:
return
visited.add(s.dir)
if s.vc is None:
print('No version control info for suite ' + s.name)
else:
print(_sversions_rev(s.vc.parent(s.vc_dir), s.vc.isDirty(s.vc_dir), with_color) + ' ' + s.name + ' ' + s.vc_dir)
s.visit_imports(_sversions_import_visitor)
if not isinstance(primary_suite(), MXSuite):
_sversions(primary_suite(), None)
### ~~~~~~~~~~~~~ Java Compiler
def findclass(args, logToConsole=True, resolve=True, matcher=lambda string, classname: string in classname):
"""find all classes matching a given substring"""
matches = []
for entry, filename in classpath_walk(includeBootClasspath=True, resolve=resolve, jdk=get_jdk()):
if filename.endswith('.class'):
if isinstance(entry, zipfile.ZipFile):
classname = filename.replace('/', '.')
else:
classname = filename.replace(os.sep, '.')
classname = classname[:-len('.class')]
for a in args:
if matcher(a, classname):
if classname not in matches:
matches.append(classname)
if logToConsole:
log(classname)
return matches
def select_items(items, descriptions=None, allowMultiple=True):
"""
Presents a command line interface for selecting one or more (if allowMultiple is true) items.
"""
if len(items) <= 1:
return items
else:
assert is_interactive()
numlen = str(len(str(len(items))))
if allowMultiple:
log(('[{0:>' + numlen + '}] <all>').format(0))
for i in range(0, len(items)):
if descriptions is None:
log(('[{0:>' + numlen + '}] {1}').format(i + 1, items[i]))
else:
assert len(items) == len(descriptions)
wrapper = textwrap.TextWrapper(subsequent_indent=' ')
log('\n'.join(wrapper.wrap(('[{0:>' + numlen + '}] {1} - {2}').format(i + 1, items[i], descriptions[i]))))
while True:
if allowMultiple:
s = input('Enter number(s) of selection (separate multiple choices with spaces): ').split()
else:
s = [input('Enter number of selection: ')]
try:
s = [int(x) for x in s]
except:
log('Selection contains non-numeric characters: "' + ' '.join(s) + '"')
continue
if allowMultiple and 0 in s:
return items
indexes = []
for n in s:
if n not in range(1, len(items) + 1):
log('Invalid selection: ' + str(n))
continue
indexes.append(n - 1)
if allowMultiple:
return [items[i] for i in indexes]
if len(indexes) == 1:
return items[indexes[0]]
return None
def exportlibs(args):
"""export libraries to an archive file"""
parser = ArgumentParser(prog='exportlibs')
parser.add_argument('-b', '--base', action='store', help='base name of archive (default: libs)', default='libs', metavar='<path>')
parser.add_argument('-a', '--include-all', action='store_true', help="include all defined libaries")
parser.add_argument('--arc', action='store', choices=['tgz', 'tbz2', 'tar', 'zip'], default='tgz', help='the type of the archive to create')
parser.add_argument('--no-sha1', action='store_false', dest='sha1', help='do not create SHA1 signature of archive')
parser.add_argument('--no-md5', action='store_false', dest='md5', help='do not create MD5 signature of archive')
parser.add_argument('--include-system-libs', action='store_true', help='include system libraries (i.e., those not downloaded from URLs)')
parser.add_argument('extras', nargs=REMAINDER, help='extra files and directories to add to archive', metavar='files...')
args = parser.parse_args(args)
def createArchive(addMethod):
entries = {}
def add(path, arcname):
apath = os.path.abspath(path)
if arcname not in entries:
entries[arcname] = apath
logv('[adding ' + path + ']')
addMethod(path, arcname=arcname)
elif entries[arcname] != apath:
logv('[warning: ' + apath + ' collides with ' + entries[arcname] + ' as ' + arcname + ']')
else:
logv('[already added ' + path + ']')
libsToExport = set()
if args.include_all:
for lib in _libs.values():
libsToExport.add(lib)
else:
def isValidLibrary(dep):
if dep in _libs.keys():
lib = _libs[dep]
if len(lib.urls) != 0 or args.include_system_libs:
return lib
return None
# iterate over all project dependencies and find used libraries
for p in _projects.values():
for dep in p.deps:
r = isValidLibrary(dep)
if r:
libsToExport.add(r)
# a library can have other libraries as dependency
size = 0
while size != len(libsToExport):
size = len(libsToExport)
for lib in libsToExport.copy():
for dep in lib.deps:
r = isValidLibrary(dep)
if r:
libsToExport.add(r)
for lib in libsToExport:
add(lib.get_path(resolve=True), lib.path)
if lib.sha1:
add(lib.get_path(resolve=True) + ".sha1", lib.path + ".sha1")
if lib.sourcePath:
add(lib.get_source_path(resolve=True), lib.sourcePath)
if lib.sourceSha1:
add(lib.get_source_path(resolve=True) + ".sha1", lib.sourcePath + ".sha1")
if args.extras:
for e in args.extras:
if os.path.isdir(e):
for root, _, filenames in os.walk(e):
for name in filenames:
f = join(root, name)
add(f, f)
else:
add(e, e)
if args.arc == 'zip':
path = args.base + '.zip'
with zipfile.ZipFile(path, 'w') as zf:
createArchive(zf.write)
else:
path = args.base + '.tar'
mode = 'w'
if args.arc != 'tar':
sfx = args.arc[1:]
mode = mode + ':' + sfx
path = path + '.' + sfx
with tarfile.open(path, mode) as tar:
createArchive(tar.add)
log('created ' + path)
def digest(enabled, path, factory, suffix):
if enabled:
d = factory()
with open(path, 'rb') as f:
while True:
buf = f.read(4096)
if not buf:
break
d.update(buf)
with open(path + '.' + suffix, 'w') as fp:
print(d.hexdigest(), file=fp)
log('created ' + path + '.' + suffix)
digest(args.sha1, path, hashlib.sha1, 'sha1')
digest(args.md5, path, hashlib.md5, 'md5')
def javap(args):
"""disassemble classes matching given pattern with javap"""
parser = ArgumentParser(prog='mx javap')
parser.add_argument('-r', '--resolve', action='store_true', help='perform eager resolution (e.g., download missing jars) of class search space')
parser.add_argument('classes', nargs=REMAINDER, metavar='<class name patterns...>')
args = parser.parse_args(args)
jdk = get_jdk()
javapExe = jdk.javap
if not exists(javapExe):
abort('The javap executable does not exist: ' + javapExe)
else:
candidates = findclass(args.classes, resolve=args.resolve, logToConsole=False)
if len(candidates) == 0:
log('no matches')
selection = select_items(candidates)
run([javapExe, '-private', '-verbose', '-classpath', classpath(resolve=args.resolve, jdk=jdk)] + selection)
### ~~~~~~~~~~~~~ commands
def suite_init_cmd(args):
"""create a suite
usage: mx init [-h] [--repository REPOSITORY] [--subdir]
[--repository-kind REPOSITORY_KIND]
name
positional arguments:
name the name of the suite
optional arguments:
-h, --help show this help message and exit
--repository REPOSITORY
directory for the version control repository
--subdir creates the suite in a sub-directory of the repository
(requires --repository)
--repository-kind REPOSITORY_KIND
The kind of repository to create ('hg', 'git' or
'none'). Defaults to 'git'
"""
parser = ArgumentParser(prog='mx init')
parser.add_argument('--repository', help='directory for the version control repository', default=None)
parser.add_argument('--subdir', action='store_true', help='creates the suite in a sub-directory of the repository (requires --repository)')
parser.add_argument('--repository-kind', help="The kind of repository to create ('hg', 'git' or 'none'). Defaults to 'git'", default='git')
parser.add_argument('name', help='the name of the suite')
args = parser.parse_args(args)
if args.subdir and not args.repository:
abort('When using --subdir, --repository needs to be specified')
if args.repository:
vc_dir = args.repository
else:
vc_dir = args.name
if args.repository_kind != 'none':
vc = vc_system(args.repository_kind)
vc.init(vc_dir)
suite_dir = vc_dir
if args.subdir:
suite_dir = join(suite_dir, args.name)
suite_mx_dir = join(suite_dir, _mxDirName(args.name))
ensure_dir_exists(suite_mx_dir)
if os.listdir(suite_mx_dir):
abort('{} is not empty'.format(suite_mx_dir))
suite_py = join(suite_mx_dir, 'suite.py')
suite_skeleton_str = """suite = {
"name" : "NAME",
"mxversion" : "VERSION",
"imports" : {
"suites": [
]
},
"libraries" : {
},
"projects" : {
},
}
""".replace('NAME', args.name).replace('VERSION', str(version))
with open(suite_py, 'w') as f:
f.write(suite_skeleton_str)
def show_projects(args):
"""show all projects"""
for s in suites():
if len(s.projects) != 0:
print(s.suite_py())
for p in s.projects:
print('\t' + p.name)
def show_jar_distributions(args):
parser = ArgumentParser(prog='mx jar-distributions', description='List jar distributions')
parser.add_argument('--sources', action='store_true', help='Show the path to the source bundle of jar distributions when available.')
parser.add_argument('--sources-only', action='store_true', help='Only show the path to the sources for jar distributions.')
parser.add_argument('--dependencies', action='store_true', help='Also list dependencies (path to jar only).')
parser.add_argument('--no-tests', action='store_false', dest='tests', help='Filter out test distributions.')
args = parser.parse_args(args)
if args.sources_only:
args.sources = True
all_jars = set()
for s in suites(opt_limit_to_suite=True):
jars = [d for d in s.dists if d.isJARDistribution() and (args.tests or not d.is_test_distribution())]
for jar in jars:
sources = None
if args.sources:
sources = jar.sourcesPath
if args.sources_only:
if not sources:
raise abort("Could not find sources for {}".format(jar))
print(sources)
else:
path = jar.path
if sources:
print("{}:{}\t{}\t{}".format(s.name, jar.name, path, sources))
else:
print("{}:{}\t{}".format(s.name, jar.name, path))
all_jars.update(jars)
if args.dependencies and all_jars:
for e in classpath(all_jars, includeSelf=False, includeBootClasspath=True, unique=True).split(os.pathsep):
print(e)
def show_suites(args):
"""show all suites
usage: mx suites [-h] [--locations] [--licenses]
optional arguments:
-h, --help show this help message and exit
--locations show element locations on disk
--licenses show element licenses
"""
parser = ArgumentParser(prog='mx suites')
parser.add_argument('-p', '--locations', action='store_true', help='show element locations on disk')
parser.add_argument('-l', '--licenses', action='store_true', help='show element licenses')
parser.add_argument('-a', '--archived-deps', action='store_true', help='show archived deps for distributions')
args = parser.parse_args(args)
def _location(e):
if args.locations:
if isinstance(e, Suite):
return e.mxDir
if isinstance(e, Library):
return join(e.suite.dir, e.path)
if isinstance(e, Distribution):
return e.path
if isinstance(e, Project):
return e.dir
return None
def _show_section(name, section):
if section:
print(' ' + name + ':')
for e in section:
location = _location(e)
out = ' ' + e.name
data = []
if location:
data.append(location)
if args.licenses:
if e.theLicense:
l = e.theLicense.name
else:
l = '??'
data.append(l)
if data:
out += ' (' + ', '.join(data) + ')'
print(out)
if name == 'distributions' and args.archived_deps:
for a in e.archived_deps():
print(' ' + a.name)
for s in suites(True):
location = _location(s)
if location:
print('{} ({})'.format(s.name, location))
else:
print(s.name)
_show_section('libraries', s.libs)
_show_section('jrelibraries', s.jreLibs)
_show_section('jdklibraries', s.jdkLibs)
_show_section('projects', s.projects)
_show_section('distributions', s.dists)
_show_paths_examples = """
- `mx paths DEPENDENCY` selects the "main" product of `DEPENDENCY`
- `mx paths DEPENDENCY/*.zip` selects products of `DEPENDENCY` that match `*.zip`
- `mx paths suite:DEPENDENCY` selects `DEPENDENCY` in suite `suite`"""
def show_paths(args):
"""usage: mx paths [-h] dependency-spec
Shows on-disk path to dependencies such as libraries, distributions, etc.
positional arguments:
dependency-spec Dependency specification in the same format as `dependency:` sources in a layout distribution.
optional arguments:
-h, --help show this help message and exit
--download Downloads the dependency (only for libraries)."""
parser = ArgumentParser(prog='mx paths', description="Shows on-disk path to dependencies such as libraries, distributions, etc.", epilog=_show_paths_examples, formatter_class=RawTextHelpFormatter)
parser.add_argument('--download', action='store_true', help='Downloads the dependency (only for libraries).')
parser.add_argument('--output', action='store_true', help='Show output location rather than archivable result (only for distributions).')
parser.add_argument('spec', help='Dependency specification in the same format as `dependency:` sources in a layout distribution.', metavar='dependency-spec')
args = parser.parse_args(args)
spec = args.spec
spec_dict = LayoutDistribution._as_source_dict('dependency:' + spec, 'NO_DIST', 'NO_DEST')
d = dependency(spec_dict['dependency'])
if args.download:
if not d.isResourceLibrary() and not d.isLibrary():
abort("--download can only be used with libraries")
d.get_path(resolve=True)
if args.output:
if not isinstance(d, AbstractDistribution):
abort("--output can only be used with distributions")
print(d.get_output())
else:
include = spec_dict.get('path')
for source_file, arcname in d.getArchivableResults(single=include is None):
if include is None or glob_match(include, arcname):
print(source_file)
show_paths.__doc__ += '\n' + _show_paths_examples
def verify_library_urls(args):
"""verify that all suite libraries are reachable from at least one of the URLs
usage: mx verifylibraryurls [--include-mx]
"""
parser = ArgumentParser(prog='mx verifylibraryurls')
parser.add_argument('--include-mx', help='', action='store_true', default=primary_suite() == _mx_suite)
args = parser.parse_args(args)
ok = True
_suites = suites(True)
if args.include_mx:
_suites.append(_mx_suite)
for s in _suites:
for lib in s.libs:
log('Verifying connection to URLs for ' + lib.name)
# Due to URL rewriting, URL list may have duplicates so perform deduping now
urls = list(set(lib.get_urls()))
if (lib.isLibrary() or lib.isResourceLibrary()) and len(lib.get_urls()) != 0 and not download(os.devnull, urls, verifyOnly=True, abortOnError=False, verbose=_opts.verbose):
ok = False
log_error('Library {} not available from {}'.format(lib.qualifiedName(), lib.get_urls()))
if not ok:
abort('Some libraries are not reachable')
_java_package_regex = re.compile(r"^\s*package\s+(?P<package>[a-zA-Z_][\w\.]*)\s*;$", re.MULTILINE)
### ~~~~~~~~~~~~~ CI
def suite_ci_files(suite, ci_path=None, extension=(".hocon", ".jsonnet", '.libsonnet')):
"""
Get the list of ci files for the given suite
:param suite: SourceSuite
:param ci_path: str or None
:param extension: str | tuple[str] | list[str] | set[str]
:return:
"""
assert isinstance(suite, SourceSuite), "suite must be a SourceSuite"
assert extension is not None, "extension cannot be None, must be a string or iterable over strings like '.ext'."
if isinstance(extension, str):
extension = [extension]
extension = set(extension)
ci_files = os.listdir(join(suite.dir, ci_path)) if ci_path else os.listdir(suite.dir)
return [join(ci_path, name) if ci_path else name
for name in ci_files
if os.path.splitext(name)[-1] in extension]
def verify_ci(args, base_suite, dest_suite, common_file=None, common_dirs=None,
extension=(".hocon", ".jsonnet", '.libsonnet')):
"""
Verify CI configuration
:type args: list[str] or None
:type base_suite: SourceSuite
:type dest_suite: SourceSuite
:type common_file: str | list[str] | None
:type common_dirs: list[str] | None
:type extension: str | tuple[str] | list[str] | set[str]
"""
parser = ArgumentParser(prog='mx verify-ci')
parser.add_argument('-s', '--sync', action='store_true', help='synchronize with graal configuration')
parser.add_argument('-q', '--quiet', action='store_true', help='Only produce output if something is changed')
args = parser.parse_args(args)
if not isinstance(dest_suite, SourceSuite) or not isinstance(base_suite, SourceSuite):
raise abort("Can not use verify-ci on binary suites: {0} and {1} need to be source suites".format(
base_suite.name, dest_suite.name))
assert extension is not None, "extension cannot be None, must be a string or iterable over strings like '.ext'."
if isinstance(extension, str):
extension = [extension]
extension = set(extension)
if isinstance(common_file, str):
common_file = [common_file]
common_dirs = common_dirs or []
def _handle_error(msg, base_file, dest_file):
if args.sync:
log("Overriding {1} from {0}".format(os.path.normpath(base_file), os.path.normpath(dest_file)))
shutil.copy(base_file, dest_file)
else:
log(msg + ": " + os.path.normpath(dest_file))
log("Try synchronizing:")
log(" " + base_file)
log(" " + dest_file)
log("Or execute 'mx verify-ci' with the '--sync' option.")
abort(1)
def _common_string_end(s1, s2):
l = 0
while s1[l-1] == s2[l-1]:
l -= 1
return s1[:l]
def _verify_file(base_file, dest_file):
if not os.path.isfile(base_file) or not os.path.isfile(dest_file):
_handle_error('Common CI file not found', base_file, dest_file)
if not filecmp.cmp(base_file, dest_file):
_handle_error('Common CI file mismatch', base_file, dest_file)
logv("CI File '{0}' matches.".format(_common_string_end(base_file, dest_file)))
for d in common_dirs:
base_dir = join(base_suite.dir, d)
dest_dir = join(dest_suite.dir, d)
for root, _, files in os.walk(base_dir):
rel_root = os.path.relpath(root, base_dir)
for f in files:
if os.path.splitext(f)[-1] in extension:
community_file = join(base_dir, rel_root, f)
enterprise_file = join(dest_dir, rel_root, f)
_verify_file(community_file, enterprise_file)
if common_file:
for f in common_file:
base_common = join(base_suite.vc_dir, f)
dest_common = join(dest_suite.vc_dir, f)
_verify_file(base_common, dest_common)
if not args.quiet:
log("CI setup is fine.")
_warn_test_results_pattern_collision = False
_test_results_patter_xxx = re.compile('XXXX*')
def maybe_generate_test_results_path(key=None):
pattern = get_env('MX_TEST_RESULTS_PATTERN')
if not pattern:
return None
if 'XXX' not in pattern:
global _warn_test_results_pattern_collision
if _warn_test_results_pattern_collision:
warn("MX_TEST_RESULTS_PATTERN doesn't contain `XXX` but it seems to be used multiple times.\n"
"Results will probably be overwritten")
_warn_test_results_pattern_collision = True
return pattern
if key:
identifier = key + "-"
else:
identifier = ""
identifier += datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
identifier += "-{:08x}".format(random.getrandbits(32))
return _test_results_patter_xxx.sub(identifier, pattern, count=1)
_test_results_tags = {get_os(), get_arch()}
def user_env_test_results_tags():
return get_env("MX_TEST_RESULT_TAGS")
def test_results_tags():
tags = _test_results_tags
from_env = user_env_test_results_tags()
if from_env:
tags = tags.union(from_env.split(','))
return tags
### ~~~~~~~~~~~~~ Java Compiler
__compile_mx_class_lock = multiprocessing.Lock()
def _compile_mx_class(javaClassNames, classpath=None, jdk=None, myDir=None, extraJavacArgs=None, as_jar=False):
if not isinstance(javaClassNames, list):
javaClassNames = [javaClassNames]
myDir = join(_mx_home, 'java') if myDir is None else myDir
binDir = join(_mx_suite.get_output_root(), 'bin' if not jdk else '.jdk' + str(jdk.version))
javaSources = [join(myDir, n + '.java') for n in javaClassNames]
javaClasses = [join(binDir, n + '.class') for n in javaClassNames]
if as_jar:
output = join(_mx_suite.get_output_root(), ('' if not jdk else 'jdk' + str(jdk.version)) + '-' + '-'.join(javaClassNames) + '.jar')
else:
assert len(javaClassNames) == 1, 'can only compile multiple sources when producing a jar'
output = javaClasses[0]
if not exists(output) or TimeStampFile(output).isOlderThan(javaSources):
with __compile_mx_class_lock:
ensure_dir_exists(binDir)
javac = jdk.javac if jdk else get_jdk(tag=DEFAULT_JDK_TAG).javac
cmd = [javac, '-d', _cygpathU2W(binDir)]
if classpath:
cmd.extend(['-cp', _separatedCygpathU2W(binDir + os.pathsep + classpath)])
if extraJavacArgs:
cmd.extend(extraJavacArgs)
cmd += [_cygpathU2W(s) for s in javaSources]
try:
subprocess.check_call(cmd)
if as_jar:
classfiles = []
for root, _, filenames in os.walk(binDir):
for n in filenames:
if n.endswith('.class'):
# Get top level class name
if '$' in n:
className = n[0:n.find('$')]
else:
className = n[:-len('.class')]
if className in javaClassNames:
classfiles.append(os.path.relpath(join(root, n), binDir))
subprocess.check_call([jdk.jar, 'cfM', _cygpathU2W(output)] + classfiles, cwd=_cygpathU2W(binDir))
logv('[created/updated ' + output + ']')
except subprocess.CalledProcessError as e:
abort('failed to compile ' + str(javaSources) + ' or create ' + output + ': ' + str(e))
return myDir, output if as_jar else binDir
def _add_command_primary_option(parser):
parser.add_argument('--primary', action='store_true', help='limit checks to primary suite')
### ~~~~~~~~~~~~~ commands
def checkcopyrights(args):
"""run copyright check on the sources"""
class CP(ArgumentParser):
def format_help(self):
return ArgumentParser.format_help(self) + self._get_program_help()
def _get_program_help(self):
help_output = _check_output_str([get_jdk().java, '-cp', classpath('com.oracle.mxtool.checkcopy'), 'com.oracle.mxtool.checkcopy.CheckCopyright', '--help'])
return '\nother arguments preceded with --, e.g. mx checkcopyright --primary -- --all\n' + help_output
# ensure compiled form of code is up to date
build(['--no-daemon', '--dependencies', 'com.oracle.mxtool.checkcopy'])
parser = CP(prog='mx checkcopyrights')
_add_command_primary_option(parser)
parser.add_argument('remainder', nargs=REMAINDER, metavar='...')
args = parser.parse_args(args)
remove_doubledash(args.remainder)
result = 0
# copyright checking is suite specific as each suite may have different overrides
for s in suites(True):
if args.primary and not s.primary:
continue
custom_copyrights = _cygpathU2W(join(s.mxDir, 'copyrights'))
custom_args = []
if exists(custom_copyrights):
custom_args = ['--custom-copyright-dir', custom_copyrights]
rc = run([get_jdk().java, '-cp', classpath('com.oracle.mxtool.checkcopy'), 'com.oracle.mxtool.checkcopy.CheckCopyright', '--copyright-dir', _mx_home] + custom_args + args.remainder, cwd=s.dir, nonZeroIsFatal=False)
result = result if rc == 0 else rc
return result
### ~~~~~~~~~~~~~ Maven
def mvn_local_install(group_id, artifact_id, path, version, repo=None):
if not exists(path):
abort('File ' + path + ' does not exists')
repoArgs = ['-Dmaven.repo.local=' + repo] if repo else []
run_maven(['install:install-file', '-DgroupId=' + group_id, '-DartifactId=' + artifact_id, '-Dversion=' +
version, '-Dpackaging=jar', '-Dfile=' + path, '-DcreateChecksum=true'] + repoArgs)
def maven_install(args):
"""install the primary suite in a local maven repository for testing"""
parser = ArgumentParser(prog='mx maven-install')
parser.add_argument('--no-checks', action='store_true', help='checks on status are disabled')
parser.add_argument('--test', action='store_true', help='print info about JARs to be installed')
parser.add_argument('--repo', action='store', help='path to local Maven repository to install to')
parser.add_argument('--only', action='store', help='comma separated set of distributions to install')
parser.add_argument('--version-string', action='store', help='Provide custom version string for installment')
parser.add_argument('--all-suites', action='store_true', help='Deploy suite and the distributions it depends on in other suites')
args = parser.parse_args(args)
_mvn.check()
if args.all_suites:
_suites = suites()
else:
_suites = [primary_suite()]
for s in _suites:
nolocalchanges = args.no_checks or not s.vc or s.vc.can_push(s.vc_dir, strict=False)
version = args.version_string if args.version_string else s.vc.parent(s.vc_dir)
releaseVersion = s.release_version(snapshotSuffix='SNAPSHOT')
arcdists = []
only = args.only.split(',') if args.only is not None else None
dists = [d for d in s.dists if _dist_matcher(d, None, False, only, None, False)]
for dist in dists:
# ignore non-exported dists
if not dist.internal and not dist.name.startswith('COM_ORACLE') and hasattr(dist, 'maven') and dist.maven:
arcdists.append(dist)
mxMetaName = _mx_binary_distribution_root(s.name)
s.create_mx_binary_distribution_jar()
mxMetaJar = s.mx_binary_distribution_jar_path()
if not args.test:
if nolocalchanges:
mvn_local_install(_mavenGroupId(s.name), _map_to_maven_dist_name(mxMetaName), mxMetaJar, version, args.repo)
else:
print('Local changes found, skipping install of ' + version + ' version')
mvn_local_install(_mavenGroupId(s.name), _map_to_maven_dist_name(mxMetaName), mxMetaJar, releaseVersion, args.repo)
for dist in arcdists:
if nolocalchanges:
mvn_local_install(dist.maven_group_id(), dist.maven_artifact_id(), dist.path, version, args.repo)
mvn_local_install(dist.maven_group_id(), dist.maven_artifact_id(), dist.path, releaseVersion, args.repo)
else:
print('jars to deploy manually for version: ' + version)
print('name: ' + _map_to_maven_dist_name(mxMetaName) + ', path: ' + os.path.relpath(mxMetaJar, s.dir))
for dist in arcdists:
print('name: ' + dist.maven_artifact_id() + ', path: ' + os.path.relpath(dist.path, s.dir))
### ~~~~~~~~~~~~~ commands
def show_version(args):
"""print mx version"""
parser = ArgumentParser(prog='mx version')
parser.add_argument('--oneline', action='store_true', help='show mx revision and version in one line')
args = parser.parse_args(args)
if args.oneline:
vc = VC.get_vc(_mx_home, abortOnError=False)
if vc is None:
print('No version control info for mx %s' % version)
else:
print(_sversions_rev(vc.parent(_mx_home), vc.isDirty(_mx_home), False) + ' mx %s' % version)
return
print(version)
@suite_context_free
def update(args):
"""update mx to the latest version"""
parser = ArgumentParser(prog='mx update')
parser.add_argument('-n', '--dry-run', action='store_true', help='show incoming changes without applying them')
args = parser.parse_args(args)
vc = VC.get_vc(_mx_home, abortOnError=False)
if isinstance(vc, GitConfig):
if args.dry_run:
print(vc.incoming(_mx_home))
else:
print(vc.pull(_mx_home, update=True))
else:
print('Cannot update mx as git is unavailable')
def print_simple_help():
print('Welcome to Mx version ' + str(version))
print(ArgumentParser.format_help(_argParser))
print('Modify mx.<suite>/suite.py in the top level directory of a suite to change the project structure')
print('Here are common Mx commands:')
print('\nBuilding and testing:')
print(list_commands(_build_commands))
print('Checking stylistic aspects:')
print(list_commands(_style_check_commands))
print('Useful utilities:')
print(list_commands(_utilities_commands))
print('\'mx help\' lists all commands. See \'mx help <command>\' to read about a specific command')
def list_commands(l):
return _mx_commands.list_commands(l)
_build_commands = ['ideinit', 'build', 'unittest', 'gate', 'clean']
_style_check_commands = ['canonicalizeprojects', 'checkheaders', 'checkstyle', 'spotbugs', 'eclipseformat']
_utilities_commands = ['suites', 'envs', 'findclass', 'javap']
update_commands("mx", {
'autopep8': [autopep8, '[options]'],
'archive': [_archive, '[options]'],
'benchmark' : [mx_benchmark.benchmark, '--vmargs [vmargs] --runargs [runargs] suite:benchname'],
'benchtable': [mx_benchplot.benchtable, '[options]'],
'benchplot': [mx_benchplot.benchplot, '[options]'],
'binary-url': [binary_url, '<repository id> <distribution name>'],
'build': [build, '[options]'],
'canonicalizeprojects': [canonicalizeprojects, ''],
'checkcopyrights': [checkcopyrights, '[options]'],
'checkheaders': [mx_gate.checkheaders, ''],
'checkoverlap': [checkoverlap, ''],
'checkstyle': [checkstyle, ''],
'clean': [clean, ''],
'deploy-binary' : [deploy_binary, ''],
'envs': [show_envs, '[options]'],
'exportlibs': [exportlibs, ''],
'verifymultireleaseprojects' : [verifyMultiReleaseProjects, ''],
'flattenmultireleasesources' : [flattenMultiReleaseSources, 'version'],
'findbugs': [mx_spotbugs.spotbugs, ''],
'spotbugs': [mx_spotbugs.spotbugs, ''],
'findclass': [findclass, ''],
'gate': [mx_gate.gate, '[options]'],
'help': [help_, '[command]'],
'hg': [hg_command, '[options]'],
'init' : [suite_init_cmd, '[options] name'],
'jacocoreport' : [mx_gate.jacocoreport, '[--format {html,xml}] [output directory]'],
'java': [java_command, '[-options] class [args...]'],
'javadoc': [javadoc, '[options]'],
'javap': [javap, '[options] <class name patterns>'],
'maven-deploy' : [maven_deploy, ''],
'maven-install' : [maven_install, ''],
'maven-url': [maven_url, '<repository id> <distribution name>'],
'minheap' : [run_java_min_heap, ''],
'projectgraph': [projectgraph, ''],
'projects': [show_projects, ''],
'jar-distributions': [show_jar_distributions, ''],
'pylint': [pylint, ''],
'quiet-run': [quiet_run, ''],
'sbookmarkimports': [sbookmarkimports, '[options]'],
'scheckimports': [scheckimports, '[options]'],
'sclone': [sclone, '[options]'],
'scloneimports': [scloneimports, '[options]'],
'sforceimports': [sforceimports, ''],
'sha1': [sha1, ''],
'sigtest': [mx_sigtest.sigtest, ''],
'sincoming': [sincoming, ''],
'site': [site, '[options]'],
'sonarqube-upload': [mx_gate.sonarqube_upload, '[options]'],
'coverage-upload': [mx_gate.coverage_upload, '[options]'],
'spull': [spull, '[options]'],
'stip': [stip, ''],
'suites': [show_suites, ''],
'paths': [show_paths, ''],
'supdate': [supdate, ''],
'sversions': [sversions, '[options]'],
'testdownstream': [mx_downstream.testdownstream_cli, '[options]'],
'update': [update, ''],
'unstrip': [_unstrip, '[options]'],
'urlrewrite': [mx_urlrewrites.urlrewrite_cli, 'url'],
'verifylibraryurls': [verify_library_urls, ''],
'verifysourceinproject': [verifysourceinproject, ''],
'version': [show_version, ''],
})
import mx_fetchjdk # pylint: disable=unused-import
import mx_bisect # pylint: disable=unused-import
import mx_gc # pylint: disable=unused-import
from mx_unittest import unittest
from mx_jackpot import jackpot
from mx_webserver import webserver
_mx_commands.add_commands([
unittest,
jackpot,
webserver
])
_argParser = ArgParser()
def _mxDirName(name):
return 'mx.' + name
### ~~~~~~~~~~~~~ Distribution, _private
def _mx_binary_distribution_root(name):
return name + '-mx'
def _mx_binary_distribution_jar(name):
"""the (relative) path to the location of the mx binary distribution jar"""
return join('dists', _mx_binary_distribution_root(name) + '.jar')
def _mx_binary_distribution_version(name):
"""the (relative) path to the location of the mx binary distribution version file"""
return join('dists', _mx_binary_distribution_root(name) + '.version')
def _install_socks_proxy_opener(proxytype, proxyaddr, proxyport=None):
""" Install a socks proxy handler so that all urllib2 requests are routed through the socks proxy. """
try:
import socks
from sockshandler import SocksiPyHandler
except ImportError:
warn('WARNING: Failed to load PySocks module. Try installing it with `pip install PySocks`.')
return
if proxytype == 4:
proxytype = socks.SOCKS4
elif proxytype == 5:
proxytype = socks.SOCKS5
else:
abort("Unknown Socks Proxy type {0}".format(proxytype))
opener = _urllib_request.build_opener(SocksiPyHandler(proxytype, proxyaddr, proxyport))
_urllib_request.install_opener(opener)
_mx_args = []
_mx_command_and_args = []
def shell_quoted_args(args):
args_string = ' '.join([pipes.quote(str(arg)) for arg in args])
if args_string != '':
args_string = ' ' + args_string
return args_string
def current_mx_command(injected_args=None):
return 'mx' + shell_quoted_args(_mx_args) + '' + shell_quoted_args(injected_args if injected_args else _mx_command_and_args)
def main():
# make sure logv, logvv and warn work as early as possible
_opts.__dict__['verbose'] = '-v' in sys.argv or '-V' in sys.argv
_opts.__dict__['very_verbose'] = '-V' in sys.argv
_opts.__dict__['warn'] = '--no-warning' not in sys.argv
_opts.__dict__['quiet'] = '--quiet' in sys.argv
global _vc_systems
_vc_systems = [HgConfig(), GitConfig(), BinaryVC()]
global _mx_suite
_mx_suite = MXSuite()
os.environ['MX_HOME'] = _mx_home
def _get_env_upper_or_lowercase(name):
return os.environ.get(name, os.environ.get(name.upper()))
def _check_socks_proxy():
""" Install a Socks Proxy Handler if the environment variable is set. """
def _read_socks_proxy_config(proxy_raw):
s = proxy_raw.split(':')
if len(s) == 1:
return s[0], None
if len(s) == 2:
return s[0], int(s[1])
abort("Can not parse Socks proxy configuration: {0}".format(proxy_raw))
def _load_socks_env():
proxy = _get_env_upper_or_lowercase('socks5_proxy')
if proxy:
return proxy, 5
proxy = _get_env_upper_or_lowercase('socks4_proxy')
if proxy:
return proxy, 4
return None, -1
# check for socks5_proxy/socks4_proxy env variable
socksproxy, socksversion = _load_socks_env()
if socksproxy:
socksaddr, socksport = _read_socks_proxy_config(socksproxy)
_install_socks_proxy_opener(socksversion, socksaddr, socksport)
# Set the https proxy environment variable from the http proxy environment
# variable if the former is not explicitly specified but the latter is and
# vice versa.
# This is for supporting servers that redirect a http URL to a https URL.
httpProxy = os.environ.get('http_proxy', os.environ.get('HTTP_PROXY'))
httpsProxy = os.environ.get('https_proxy', os.environ.get('HTTPS_PROXY'))
if httpProxy:
if not httpsProxy:
os.environ['https_proxy'] = httpProxy
elif httpsProxy:
os.environ['http_proxy'] = httpsProxy
else:
# only check for socks proxy if no http(s) has been specified
_check_socks_proxy()
_argParser._parse_cmd_line(_opts, firstParse=True)
global _mvn
_mvn = MavenConfig()
SourceSuite._load_env_file(_global_env_file())
mx_urlrewrites.register_urlrewrites_from_env('MX_URLREWRITES')
# Do not treat initial_command as an abbreviation as it would prevent
# mx extensions from defining commands that match an abbreviation.
initial_command = _argParser.initialCommandAndArgs[0] if len(_argParser.initialCommandAndArgs) > 0 else None
is_suite_context_free = initial_command and initial_command in _suite_context_free
should_discover_suites = not is_suite_context_free and not (initial_command and initial_command in _no_suite_discovery)
should_load_suites = should_discover_suites and not (initial_command and initial_command in _no_suite_loading)
is_optional_suite_context = not initial_command or initial_command in _optional_suite_context
assert not should_load_suites or should_discover_suites, initial_command
def _setup_binary_suites():
global _binary_suites
bs = os.environ.get('MX_BINARY_SUITES')
if bs is not None:
if len(bs) > 0:
_binary_suites = bs.split(',')
else:
_binary_suites = []
primarySuiteMxDir = None
if is_suite_context_free:
_mx_suite._complete_init()
_setup_binary_suites()
commandAndArgs = _argParser._parse_cmd_line(_opts, firstParse=False)
else:
primarySuiteMxDir = _findPrimarySuiteMxDir()
if primarySuiteMxDir == _mx_suite.mxDir:
_primary_suite_init(_mx_suite)
_mx_suite._complete_init()
_mx_suite.internal = False
mx_benchmark.init_benchmark_suites()
elif primarySuiteMxDir:
# We explicitly load the 'env' file of the primary suite now as it might
# influence the suite loading logic. During loading of the sub-suites their
# environment variable definitions are collected and will be placed into the
# os.environ all at once. This ensures that a consistent set of definitions
# are seen. The primary suite must have everything required for loading
# defined.
SourceSuite._load_env_in_mxDir(primarySuiteMxDir)
_mx_suite._complete_init()
additional_env = _opts.additional_env or get_env('MX_ENV_PATH')
if additional_env:
SourceSuite._load_env_in_mxDir(primarySuiteMxDir, file_name=additional_env, abort_if_missing=True)
_setup_binary_suites()
if should_discover_suites:
primary = _discover_suites(primarySuiteMxDir, load=should_load_suites)
else:
primary = SourceSuite(primarySuiteMxDir, load=False, primary=True)
_primary_suite_init(primary)
else:
_mx_suite._complete_init()
if not is_optional_suite_context:
abort('no primary suite found for %s' % initial_command)
for envVar in _loadedEnv:
value = _loadedEnv[envVar]
if os.environ.get(envVar) != value:
logv('Setting environment variable %s=%s' % (envVar, value))
os.environ[envVar] = value
commandAndArgs = _argParser._parse_cmd_line(_opts, firstParse=False)
if _opts.java_home:
logv('Setting environment variable %s=%s from --java-home' % ('JAVA_HOME', _opts.java_home))
os.environ['JAVA_HOME'] = _opts.java_home
if _opts.mx_tests:
MXTestsSuite()
if primarySuiteMxDir and not _mx_suite.primary and should_load_suites:
primary_suite().recursive_post_init()
_check_dependency_cycles()
if len(commandAndArgs) == 0:
print_simple_help()
return
# add JMH archive participants
def _has_jmh_dep(dist):
class NonLocal:
""" Work around nonlocal access """
jmh_found = False
def _visit_and_find_jmh_dep(dst, edge):
if NonLocal.jmh_found:
return False
if dst.isLibrary() and dst.name.startswith('JMH'):
NonLocal.jmh_found = True
return False
return True
dist.walk_deps(preVisit=_visit_and_find_jmh_dep)
return NonLocal.jmh_found
for s_ in suites(True, includeBinary=False):
for d in s_.dists:
if d.isJARDistribution() and _has_jmh_dep(d):
d.set_archiveparticipant(JMHArchiveParticipant(d))
command = commandAndArgs[0]
global _mx_command_and_args
_mx_command_and_args = commandAndArgs
global _mx_args
_mx_args = sys.argv[1:sys.argv.index(command)]
command_args = commandAndArgs[1:]
if command not in _mx_commands.commands():
hits = [c for c in _mx_commands.commands().keys() if c.startswith(command)]
if len(hits) == 1:
command = hits[0]
elif len(hits) == 0:
abort('mx: unknown command \'{0}\'\n{1}use "mx help" for more options'.format(command, _format_commands()))
else:
abort('mx: command \'{0}\' is ambiguous\n {1}'.format(command, ' '.join(hits)))
mx_compdb.init()
c = _mx_commands.commands()[command]
if primarySuiteMxDir and should_load_suites:
if not _mx_commands.get_command_property(command, "keepUnsatisfiedDependencies"):
global _removedDeps
_removedDeps = _remove_unsatisfied_deps()
# Finally post_init remaining distributions
if should_load_suites:
for s_ in suites(includeBinary=False, include_mx=True):
for d in s_.dists:
d.post_init()
def term_handler(signum, frame):
abort(1, killsig=signal.SIGTERM)
signal.signal(signal.SIGTERM, term_handler)
def quit_handler(signum, frame):
_send_sigquit()
if not is_windows():
signal.signal(signal.SIGQUIT, quit_handler)
try:
if _opts.timeout != 0:
def alarm_handler(signum, frame):
abort('Command timed out after ' + str(_opts.timeout) + ' seconds: ' + ' '.join(commandAndArgs))
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(_opts.timeout)
retcode = c(command_args)
if retcode is not None and retcode != 0:
abort(retcode)
except KeyboardInterrupt:
# no need to show the stack trace when the user presses CTRL-C
abort(1, killsig=signal.SIGINT)
# The version must be updated for every PR (checked in CI)
version = VersionSpec("5.320.3") # GR-37334
currentUmask = None
_mx_start_datetime = datetime.utcnow()
_last_timestamp = _mx_start_datetime
if __name__ == '__main__':
# Capture the current umask since there's no way to query it without mutating it.
currentUmask = os.umask(0)
os.umask(currentUmask)
main()
|
graalvm/mx
|
mx.py
|
Python
|
gpl-2.0
| 772,837 | 0.003661 |
"""Tail any mongodb collection"""
from time import sleep
from bson import ObjectId
__version__ = "1.1.0"
def fetch(collection, filter, last_oid_generation_time=None):
if last_oid_generation_time is not None:
last_oid = ObjectId.from_datetime(last_oid_generation_time)
filter.update({"_id": {"$gte": last_oid}})
return collection.find(filter)
def filter_duplicates(cursor, ids):
for doc in cursor:
if doc["_id"] not in ids:
yield doc
def mongofollow(collection, filter=None, sleep_duration=0.1):
if filter is None:
filter = {}
last_oid_generation_time = None
last_oids = set()
while True:
cursor = fetch(collection, filter, last_oid_generation_time)
for doc in filter_duplicates(cursor, last_oids):
oid = doc["_id"]
last_oid_generation_time = doc["_id"].generation_time
last_oids.add(oid)
yield doc
last_oids = {oid for oid in last_oids if
oid.generation_time == last_oid_generation_time}
sleep(sleep_duration)
|
Shir0kamii/mongofollow
|
mongofollow.py
|
Python
|
mit
| 1,092 | 0 |
"""
This file is part of ALTcointip.
ALTcointip is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ALTcointip is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ALTcointip. If not, see <http://www.gnu.org/licenses/>.
"""
import logging, re, time
from pifkoin.bitcoind import Bitcoind, BitcoindException
from httplib import CannotSendRequest
lg = logging.getLogger('cointipbot')
class CtbCoin(object):
"""
Coin class for cointip bot
"""
conn = None
conf = None
def __init__(self, _conf = None):
"""
Initialize CtbCoin with given parameters. _conf is a coin config dictionary defined in conf/coins.yml
"""
# verify _conf is a config dictionary
if not _conf or not hasattr(_conf, 'name') or not hasattr(_conf, 'config_file') or not hasattr(_conf, 'txfee'):
raise Exception("CtbCoin::__init__(): _conf is empty or invalid")
self.conf = _conf
# connect to coin daemon
try:
lg.debug("CtbCoin::__init__(): connecting to %s...", self.conf.name)
self.conn = Bitcoind(self.conf.config_file, rpcserver=self.conf.config_rpcserver)
except BitcoindException as e:
lg.error("CtbCoin::__init__(): error connecting to %s using %s: %s", self.conf.name, self.conf.config_file, e)
raise
lg.info("CtbCoin::__init__():: connected to %s", self.conf.name)
time.sleep(0.5)
# set transaction fee
lg.info("Setting tx fee of %f", self.conf.txfee)
self.conn.settxfee(self.conf.txfee)
def getbalance(self, _user = None, _minconf = None):
"""
Get user's tip or withdraw balance. _minconf is number of confirmations to use.
Returns (float) balance
"""
lg.debug("CtbCoin::getbalance(%s, %s)", _user, _minconf)
user = self.verify_user(_user=_user)
minconf = self.verify_minconf(_minconf=_minconf)
balance = float(0)
try:
balance = self.conn.getbalance(user, minconf)
except BitcoindException as e:
lg.error("CtbCoin.getbalance(): error getting %s (minconf=%s) balance for %s: %s", self.conf.name, minconf, user, e)
raise
time.sleep(0.5)
return float(balance)
def sendtouser(self, _userfrom = None, _userto = None, _amount = None, _minconf = 1):
"""
Transfer (move) coins to user
Returns (bool)
"""
lg.debug("CtbCoin::sendtouser(%s, %s, %.9f)", _userfrom, _userto, _amount)
userfrom = self.verify_user(_user=_userfrom)
userto = self.verify_user(_user=_userto)
amount = self.verify_amount(_amount=_amount)
# send request to coin daemon
try:
lg.info("CtbCoin::sendtouser(): moving %.9f %s from %s to %s", amount, self.conf.name, userfrom, userto)
result = self.conn.move(userfrom, userto, amount)
time.sleep(0.5)
except Exception as e:
lg.error("CtbCoin::sendtouser(): error moving %.9f %s from %s to %s: %s", amount, self.conf.name, userfrom, userto, e)
return False
time.sleep(0.5)
return True
def sendtoaddr(self, _userfrom = None, _addrto = None, _amount = None):
"""
Send coins to address
Returns (string) txid
"""
lg.debug("CtbCoin::sendtoaddr(%s, %s, %.9f)", _userfrom, _addrto, _amount)
userfrom = self.verify_user(_user=_userfrom)
addrto = self.verify_addr(_addr=_addrto)
amount = self.verify_amount(_amount=_amount)
minconf = self.verify_minconf(_minconf=self.conf.minconf.withdraw)
txid = ""
# send request to coin daemon
try:
lg.info("CtbCoin::sendtoaddr(): sending %.9f %s from %s to %s", amount, self.conf.name, userfrom, addrto)
# Unlock wallet, if applicable
if hasattr(self.conf, 'walletpassphrase'):
lg.debug("CtbCoin::sendtoaddr(): unlocking wallet...")
self.conn.walletpassphrase(self.conf.walletpassphrase, 1)
# Perform transaction
lg.debug("CtbCoin::sendtoaddr(): calling sendfrom()...")
txid = self.conn.sendfrom(userfrom, addrto, amount, minconf)
# Lock wallet, if applicable
if hasattr(self.conf, 'walletpassphrase'):
lg.debug("CtbCoin::sendtoaddr(): locking wallet...")
self.conn.walletlock()
except Exception as e:
lg.error("CtbCoin::sendtoaddr(): error sending %.9f %s from %s to %s: %s", amount, self.conf.name, userfrom, addrto, e)
raise
time.sleep(0.5)
return str(txid)
def validateaddr(self, _addr = None):
"""
Verify that _addr is a valid coin address
Returns (bool)
"""
lg.debug("CtbCoin::validateaddr(%s)", _addr)
addr = self.verify_addr(_addr=_addr)
addr_valid = self.conn.validateaddress(addr)
time.sleep(0.5)
if not addr_valid.has_key('isvalid') or not addr_valid['isvalid']:
lg.debug("CtbCoin::validateaddr(%s): not valid", addr)
return False
else:
lg.debug("CtbCoin::validateaddr(%s): valid", addr)
return True
def getnewaddr(self, _user = None):
"""
Generate a new address for _user
Returns (string) address
"""
user = self.verify_user(_user=_user)
addr = ""
counter = 0
while True:
try:
# Unlock wallet for keypoolrefill
if hasattr(self.conf, 'walletpassphrase'):
self.conn.walletpassphrase(self.conf.walletpassphrase, 1)
# Generate new address
addr = self.conn.getnewaddress(user)
# Lock wallet
if hasattr(self.conf, 'walletpassphrase'):
self.conn.walletlock()
if not addr:
raise Exception("CtbCoin::getnewaddr(%s): empty addr", user)
time.sleep(0.1)
return str(addr)
except BitcoindException as e:
lg.error("CtbCoin::getnewaddr(%s): BitcoindException: %s", user, e)
raise
except CannotSendRequest as e:
if counter < 3:
lg.warning("CtbCoin::getnewaddr(%s): CannotSendRequest, retrying")
counter += 1
time.sleep(10)
continue
else:
raise
except Exception as e:
if str(e) == "timed out" and counter < 3:
lg.warning("CtbCoin::getnewaddr(%s): timed out, retrying")
counter += 1
time.sleep(10)
continue
else:
lg.error("CtbCoin::getnewaddr(%s): Exception: %s", user, e)
raise
def verify_user(self, _user = None):
"""
Verify and return a username
"""
if not _user or not type(_user) in [str, unicode]:
raise Exception("CtbCoin::verify_user(): _user wrong type (%s) or empty (%s)", type(_user), _user)
return str(_user.lower())
def verify_addr(self, _addr = None):
"""
Verify and return coin address
"""
if not _addr or not type(_addr) in [str, unicode]:
raise Exception("CtbCoin::verify_addr(): _addr wrong type (%s) or empty (%s)", type(_addr),_addr)
return re.escape(str(_addr))
def verify_amount(self, _amount = None):
"""
Verify and return amount
"""
if not _amount or not type(_amount) in [int, float] or not _amount > 0:
raise Exception("CtbCoin::verify_amount(): _amount wrong type (%s), empty, or negative (%s)", type(_amount), _amount)
return _amount
def verify_minconf(self, _minconf = None):
"""
Verify and return minimum number of confirmations
"""
if not _minconf or not type(_minconf) == int or not _minconf >= 0:
raise Exception("CtbCoin::verify_minconf(): _minconf wrong type (%s), empty, or negative (%s)", type(_minconf), _minconf)
return _minconf
|
Healdb/altcointip
|
src/ctb/ctb_coin.py
|
Python
|
gpl-2.0
| 8,785 | 0.006602 |
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios), its
# affiliates and/or its licensors.
#
from ..helpers.translators import verify_translate
from grenade.translators.sequence import SequenceTranslator
from probe.fixtures.mock_shotgun import MockShotgun
class TestSequenceTranslator(object):
"""
Nose unit test suite for Grenade SequenceTranslator.
.. versionadded:: v00_04_00
"""
def setup(self):
"""
Set up the unit test suite.
.. versionadded:: v00_04_00
"""
self.shotgun_data = [{'id':1, 'type':'Project', 'sg_short_name':'hf2'},
{'id':2, 'type':'Asset'},
{'id':3, 'type':'Note'},
{'id':4, 'type':'Scene'},
{'id':5, 'type':'Shot'},
{'id':6, 'type':'Task'}]
self.session = MockShotgun(schema=[], data=self.shotgun_data)
self.translator = SequenceTranslator(self.session)
def teardown(self):
"""
Tear down the unit test suite.
.. versionadded:: v00_04_00
"""
pass
def test_translate(self):
"""
Test that the translator converts the supplied test data as expected.
.. versionadded:: v00_04_00
"""
verify_translate(self.translator, 'project', {'id':1, 'type':'Project'}, 'hf2', 'mm4')
verify_translate(self.translator, 'assets', [{'id':2, 'type':'Asset'}], [{'Asset':[['id', 'is', 2]]}], [{'Asset':[['id', 'is', 3]]}])
verify_translate(self.translator, 'notes', [{'id':3, 'type':'Note'}], [{'Note':[['id', 'is', 3]]}], [{'Note':[['id', 'is', 4]]}])
verify_translate(self.translator, 'open_notes', [{'id':3, 'type':'Note'}], [{'Note':[['id', 'is', 3]]}], [{'Note':[['id', 'is', 4]]}])
verify_translate(self.translator, 'scenes', [{'id':4, 'type':'Scene'}], [{'Scene':[['id', 'is', 4]]}], [{'Scene':[['id', 'is', 5]]}])
verify_translate(self.translator, 'shots', [{'id':5, 'type':'Shot'}], [{'Shot':[['id', 'is', 5]]}], [{'Shot':[['id', 'is', 6]]}])
verify_translate(self.translator, 'tasks', [{'id':6, 'type':'Task'}], [{'Task':[['id', 'is', 6]]}], [{'Task':[['id', 'is', 7]]}])
# Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios)
#
# This file is part of anim-studio-tools.
#
# anim-studio-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# anim-studio-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
|
xxxIsaacPeralxxx/anim-studio-tools
|
grenade/tests/unit/test_translators/test_sequence.py
|
Python
|
gpl-3.0
| 3,110 | 0.018328 |
"""
Defines forms for providing validation of embargo admin details.
"""
from django import forms
from django.utils.translation import ugettext as _
import ipaddr
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from embargo.models import IPFilter, RestrictedCourse
class RestrictedCourseForm(forms.ModelForm):
"""Validate course keys for the RestrictedCourse model.
The default behavior in Django admin is to:
* Save course keys for courses that do not exist.
* Return a 500 response if the course key format is invalid.
Using this form ensures that we display a user-friendly
error message instead.
"""
class Meta: # pylint: disable=missing-docstring
model = RestrictedCourse
def clean_course_key(self):
"""Validate the course key.
Checks that the key format is valid and that
the course exists. If not, displays an error message.
Arguments:
field_name (str): The name of the field to validate.
Returns:
CourseKey
"""
cleaned_id = self.cleaned_data['course_key']
error_msg = _('COURSE NOT FOUND. Please check that the course ID is valid.')
try:
course_key = CourseKey.from_string(cleaned_id)
except InvalidKeyError:
raise forms.ValidationError(error_msg)
if not modulestore().has_course(course_key):
raise forms.ValidationError(error_msg)
return course_key
class IPFilterForm(forms.ModelForm): # pylint: disable=incomplete-protocol
"""Form validating entry of IP addresses"""
class Meta: # pylint: disable=missing-docstring
model = IPFilter
def _is_valid_ip(self, address):
"""Whether or not address is a valid ipv4 address or ipv6 address"""
try:
# Is this an valid ip address?
ipaddr.IPNetwork(address)
except ValueError:
return False
return True
def _valid_ip_addresses(self, addresses):
"""
Checks if a csv string of IP addresses contains valid values.
If not, raises a ValidationError.
"""
if addresses == '':
return ''
error_addresses = []
for addr in addresses.split(','):
address = addr.strip()
if not self._is_valid_ip(address):
error_addresses.append(address)
if error_addresses:
msg = 'Invalid IP Address(es): {0}'.format(error_addresses)
msg += ' Please fix the error(s) and try again.'
raise forms.ValidationError(msg)
return addresses
def clean_whitelist(self):
"""Validates the whitelist"""
whitelist = self.cleaned_data["whitelist"]
return self._valid_ip_addresses(whitelist)
def clean_blacklist(self):
"""Validates the blacklist"""
blacklist = self.cleaned_data["blacklist"]
return self._valid_ip_addresses(blacklist)
|
zadgroup/edx-platform
|
common/djangoapps/embargo/forms.py
|
Python
|
agpl-3.0
| 3,061 | 0.000327 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cardsgame', '0002_card_mana_cost'),
]
operations = [
migrations.AddField(
model_name='card',
name='modified',
field=models.PositiveIntegerField(null=True, verbose_name='Modified'),
preserve_default=True,
),
]
|
mrjmad/gnu_linux_mag_drf
|
hall_of_cards/cardsgame/migrations/0003_card_modified.py
|
Python
|
mit
| 465 | 0.002151 |
# coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import re
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
clean_html,
unescapeHTML,
ExtractorError,
int_or_none,
mimetype2ext,
)
from .nbc import NBCSportsVPlayerIE
class YahooIE(InfoExtractor):
IE_DESC = 'Yahoo screen and movies'
_VALID_URL = r'(?P<url>(?P<host>https?://(?:[a-zA-Z]{2}\.)?[\da-zA-Z_-]+\.yahoo\.com)/(?:[^/]+/)*(?P<display_id>.+)?-(?P<id>[0-9]+)(?:-[a-z]+)?\.html)'
_TESTS = [
{
'url': 'http://screen.yahoo.com/julian-smith-travis-legg-watch-214727115.html',
'info_dict': {
'id': '2d25e626-2378-391f-ada0-ddaf1417e588',
'ext': 'mp4',
'title': 'Julian Smith & Travis Legg Watch Julian Smith',
'description': 'Julian and Travis watch Julian Smith',
'duration': 6863,
},
},
{
'url': 'http://screen.yahoo.com/wired/codefellas-s1-ep12-cougar-lies-103000935.html',
'md5': 'd6e6fc6e1313c608f316ddad7b82b306',
'info_dict': {
'id': 'd1dedf8c-d58c-38c3-8963-e899929ae0a9',
'ext': 'mp4',
'title': 'Codefellas - The Cougar Lies with Spanish Moss',
'description': 'md5:66b627ab0a282b26352136ca96ce73c1',
'duration': 151,
},
},
{
'url': 'https://screen.yahoo.com/community/community-sizzle-reel-203225340.html?format=embed',
'md5': '60e8ac193d8fb71997caa8fce54c6460',
'info_dict': {
'id': '4fe78544-8d48-39d8-97cd-13f205d9fcdb',
'ext': 'mp4',
'title': "Yahoo Saves 'Community'",
'description': 'md5:4d4145af2fd3de00cbb6c1d664105053',
'duration': 170,
}
},
{
'url': 'https://tw.screen.yahoo.com/election-2014-askmayor/敢問市長-黃秀霜批賴清德-非常高傲-033009720.html',
'md5': '3a09cf59349cfaddae1797acc3c087fc',
'info_dict': {
'id': 'cac903b3-fcf4-3c14-b632-643ab541712f',
'ext': 'mp4',
'title': '敢問市長/黃秀霜批賴清德「非常高傲」',
'description': '直言台南沒捷運 交通居五都之末',
'duration': 396,
}
},
{
'url': 'https://uk.screen.yahoo.com/editor-picks/cute-raccoon-freed-drain-using-091756545.html',
'md5': '0b51660361f0e27c9789e7037ef76f4b',
'info_dict': {
'id': 'b3affa53-2e14-3590-852b-0e0db6cd1a58',
'ext': 'mp4',
'title': 'Cute Raccoon Freed From Drain\u00a0Using Angle Grinder',
'description': 'md5:f66c890e1490f4910a9953c941dee944',
'duration': 97,
}
},
{
'url': 'https://ca.sports.yahoo.com/video/program-makes-hockey-more-affordable-013127711.html',
'md5': '57e06440778b1828a6079d2f744212c4',
'info_dict': {
'id': 'c9fa2a36-0d4d-3937-b8f6-cc0fb1881e73',
'ext': 'mp4',
'title': 'Program that makes hockey more affordable not offered in Manitoba',
'description': 'md5:c54a609f4c078d92b74ffb9bf1f496f4',
'duration': 121,
}
}, {
'url': 'https://ca.finance.yahoo.com/news/hackers-sony-more-trouble-well-154609075.html',
'md5': '226a895aae7e21b0129e2a2006fe9690',
'info_dict': {
'id': 'e624c4bc-3389-34de-9dfc-025f74943409',
'ext': 'mp4',
'title': '\'The Interview\' TV Spot: War',
'description': 'The Interview',
'duration': 30,
}
}, {
'url': 'http://news.yahoo.com/video/china-moses-crazy-blues-104538833.html',
'md5': '88e209b417f173d86186bef6e4d1f160',
'info_dict': {
'id': 'f885cf7f-43d4-3450-9fac-46ac30ece521',
'ext': 'mp4',
'title': 'China Moses Is Crazy About the Blues',
'description': 'md5:9900ab8cd5808175c7b3fe55b979bed0',
'duration': 128,
}
}, {
'url': 'https://in.lifestyle.yahoo.com/video/connect-dots-dark-side-virgo-090247395.html',
'md5': 'd9a083ccf1379127bf25699d67e4791b',
'info_dict': {
'id': '52aeeaa3-b3d1-30d8-9ef8-5d0cf05efb7c',
'ext': 'mp4',
'title': 'Connect the Dots: Dark Side of Virgo',
'description': 'md5:1428185051cfd1949807ad4ff6d3686a',
'duration': 201,
}
}, {
'url': 'https://www.yahoo.com/movies/v/true-story-trailer-173000497.html',
'md5': '989396ae73d20c6f057746fb226aa215',
'info_dict': {
'id': '071c4013-ce30-3a93-a5b2-e0413cd4a9d1',
'ext': 'mp4',
'title': '\'True Story\' Trailer',
'description': 'True Story',
'duration': 150,
},
}, {
'url': 'https://gma.yahoo.com/pizza-delivery-man-surprised-huge-tip-college-kids-195200785.html',
'only_matching': True,
}, {
'note': 'NBC Sports embeds',
'url': 'http://sports.yahoo.com/blogs/ncaab-the-dagger/tyler-kalinoski-s-buzzer-beater-caps-davidson-s-comeback-win-185609842.html?guid=nbc_cbk_davidsonbuzzerbeater_150313',
'info_dict': {
'id': '9CsDKds0kvHI',
'ext': 'flv',
'description': 'md5:df390f70a9ba7c95ff1daace988f0d8d',
'title': 'Tyler Kalinoski hits buzzer-beater to lift Davidson',
}
}, {
'url': 'https://tw.news.yahoo.com/-100120367.html',
'only_matching': True,
}, {
# Query result is embedded in webpage, but explicit request to video API fails with geo restriction
'url': 'https://screen.yahoo.com/community/communitary-community-episode-1-ladders-154501237.html',
'md5': '4fbafb9c9b6f07aa8f870629f6671b35',
'info_dict': {
'id': '1f32853c-a271-3eef-8cb6-f6d6872cb504',
'ext': 'mp4',
'title': 'Communitary - Community Episode 1: Ladders',
'description': 'md5:8fc39608213295748e1e289807838c97',
'duration': 1646,
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id') or self._match_id(url)
page_id = mobj.group('id')
url = mobj.group('url')
host = mobj.group('host')
webpage = self._download_webpage(url, display_id)
# Look for iframed media first
iframe_m = re.search(r'<iframe[^>]+src="(/video/.+?-\d+\.html\?format=embed.*?)"', webpage)
if iframe_m:
iframepage = self._download_webpage(
host + iframe_m.group(1), display_id, 'Downloading iframe webpage')
items_json = self._search_regex(
r'mediaItems: (\[.+?\])$', iframepage, 'items', flags=re.MULTILINE, default=None)
if items_json:
items = json.loads(items_json)
video_id = items[0]['id']
return self._get_info(video_id, display_id, webpage)
# Look for NBCSports iframes
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Query result is often embedded in webpage as JSON. Sometimes explicit requests
# to video API results in a failure with geo restriction reason therefore using
# embedded query result when present sounds reasonable.
config_json = self._search_regex(
r'window\.Af\.bootstrap\[[^\]]+\]\s*=\s*({.*?"applet_type"\s*:\s*"td-applet-videoplayer".*?});(?:</script>|$)',
webpage, 'videoplayer applet', default=None)
if config_json:
config = self._parse_json(config_json, display_id, fatal=False)
if config:
sapi = config.get('models', {}).get('applet_model', {}).get('data', {}).get('sapi')
if sapi:
return self._extract_info(display_id, sapi, webpage)
items_json = self._search_regex(
r'mediaItems: ({.*?})$', webpage, 'items', flags=re.MULTILINE,
default=None)
if items_json is None:
CONTENT_ID_REGEXES = [
r'YUI\.namespace\("Media"\)\.CONTENT_ID\s*=\s*"([^"]+)"',
r'root\.App\.Cache\.context\.videoCache\.curVideo = \{"([^"]+)"',
r'"first_videoid"\s*:\s*"([^"]+)"',
r'%s[^}]*"ccm_id"\s*:\s*"([^"]+)"' % re.escape(page_id),
]
video_id = self._search_regex(CONTENT_ID_REGEXES, webpage, 'content ID')
else:
items = json.loads(items_json)
info = items['mediaItems']['query']['results']['mediaObj'][0]
# The 'meta' field is not always in the video webpage, we request it
# from another page
video_id = info['id']
return self._get_info(video_id, display_id, webpage)
def _extract_info(self, display_id, query, webpage):
info = query['query']['results']['mediaObj'][0]
meta = info.get('meta')
video_id = info.get('id')
if not meta:
msg = info['status'].get('msg')
if msg:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, msg), expected=True)
raise ExtractorError('Unable to extract media object meta')
formats = []
for s in info['streams']:
format_info = {
'width': int_or_none(s.get('width')),
'height': int_or_none(s.get('height')),
'tbr': int_or_none(s.get('bitrate')),
}
host = s['host']
path = s['path']
if host.startswith('rtmp'):
format_info.update({
'url': host,
'play_path': path,
'ext': 'flv',
})
else:
if s.get('format') == 'm3u8_playlist':
format_info['protocol'] = 'm3u8_native'
format_info['ext'] = 'mp4'
format_url = compat_urlparse.urljoin(host, path)
format_info['url'] = format_url
formats.append(format_info)
self._sort_formats(formats)
closed_captions = self._html_search_regex(
r'"closedcaptions":(\[[^\]]+\])', webpage, 'closed captions',
default='[]')
cc_json = self._parse_json(closed_captions, video_id, fatal=False)
subtitles = {}
if cc_json:
for closed_caption in cc_json:
lang = closed_caption['lang']
if lang not in subtitles:
subtitles[lang] = []
subtitles[lang].append({
'url': closed_caption['url'],
'ext': mimetype2ext(closed_caption['content_type']),
})
return {
'id': video_id,
'display_id': display_id,
'title': unescapeHTML(meta['title']),
'formats': formats,
'description': clean_html(meta['description']),
'thumbnail': meta['thumbnail'] if meta.get('thumbnail') else self._og_search_thumbnail(webpage),
'duration': int_or_none(meta.get('duration')),
'subtitles': subtitles,
}
def _get_info(self, video_id, display_id, webpage):
region = self._search_regex(
r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
webpage, 'region', fatal=False, default='US')
data = compat_urllib_parse.urlencode({
'protocol': 'http',
'region': region,
})
query_url = (
'https://video.media.yql.yahoo.com/v1/video/sapi/streams/'
'{id}?{data}'.format(id=video_id, data=data))
query_result = self._download_json(
query_url, display_id, 'Downloading video info')
return self._extract_info(display_id, query_result, webpage)
class YahooSearchIE(SearchInfoExtractor):
IE_DESC = 'Yahoo screen search'
_MAX_RESULTS = 1000
IE_NAME = 'screen.yahoo:search'
_SEARCH_KEY = 'yvsearch'
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
entries = []
for pagenum in itertools.count(0):
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
info = self._download_json(result_url, query,
note='Downloading results page ' + str(pagenum + 1))
m = info['m']
results = info['results']
for (i, r) in enumerate(results):
if (pagenum * 30) + i >= n:
break
mobj = re.search(r'(?P<url>screen\.yahoo\.com/.*?-\d*?\.html)"', r)
e = self.url_result('http://' + mobj.group('url'), 'Yahoo')
entries.append(e)
if (pagenum * 30 + i >= n) or (m['last'] >= (m['total'] - 1)):
break
return {
'_type': 'playlist',
'id': query,
'entries': entries,
}
|
Buggaarde/youtube-dl
|
youtube_dl/extractor/yahoo.py
|
Python
|
unlicense
| 13,867 | 0.002323 |
""" Simple datasets to be used for unit tests. """
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
from theano.compat.six.moves import xrange
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
class ArangeDataset(DenseDesignMatrix):
"""
A dataset where example i is just the number i. Makes it easy to track
which sets of examples are visited.
Parameters
----------
num_examples : WRITEME
To see the other parameters, look at the DenseDesignMatrix class
documentation
"""
def __init__(self, num_examples, *args, **kwargs):
X = np.zeros((num_examples, 1))
X[:, 0] = np.arange(num_examples)
super(ArangeDataset, self).__init__(X, *args, **kwargs)
def random_dense_design_matrix(rng, num_examples, dim, num_classes):
"""
Creates a random dense design matrix that has class labels.
Parameters
----------
rng : numpy.random.RandomState
The random number generator used to generate the dataset.
num_examples : int
The number of examples to create.
dim : int
The number of features in each example.
num_classes : int
The number of classes to assign the examples to.
0 indicates that no class labels will be generated.
"""
X = rng.randn(num_examples, dim)
if num_classes:
Y = rng.randint(0, num_classes, (num_examples, 1))
y_labels = num_classes
else:
Y = None
y_labels = None
return DenseDesignMatrix(X=X, y=Y, y_labels=y_labels)
def random_one_hot_dense_design_matrix(rng, num_examples, dim, num_classes):
X = rng.randn(num_examples, dim)
idx = rng.randint(0, num_classes, (num_examples, ))
Y = np.zeros((num_examples, num_classes))
for i in xrange(num_examples):
Y[i, idx[i]] = 1
return DenseDesignMatrix(X=X, y=Y)
def random_one_hot_topological_dense_design_matrix(rng,
num_examples,
shape,
channels,
axes,
num_classes):
dims = {'b': num_examples,
'c': channels}
for i, dim in enumerate(shape):
dims[i] = dim
shape = [dims[axis] for axis in axes]
X = rng.randn(*shape)
idx = rng.randint(0, num_classes, (num_examples,))
Y = np.zeros((num_examples, num_classes))
for i in xrange(num_examples):
Y[i, idx[i]] = 1
return DenseDesignMatrix(topo_view=X, axes=axes, y=Y)
|
junbochen/pylearn2
|
pylearn2/testing/datasets.py
|
Python
|
bsd-3-clause
| 2,822 | 0 |
# Copyright (c) 2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2007 Sippy Software, Inc. All rights reserved.
#
# This file is part of SIPPY, a free RFC3261 SIP stack and B2BUA.
#
# SIPPY is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# For a license to use the SIPPY software under conditions
# other than those described here, or to purchase support for this
# software, please contact Sippy Software, Inc. by e-mail at the
# following addresses: sales@sippysoft.com.
#
# SIPPY is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from SipGenericHF import SipGenericHF
class SipReplaces(SipGenericHF):
hf_names = ('replaces',)
call_id = None
from_tag = None
to_tag = None
early_only = False
params = None
def __init__(self, body = None, call_id = None, from_tag = None, to_tag = None, \
early_only = False, params = None):
SipGenericHF.__init__(self, body)
if body != None:
return
self.parsed = True
self.params = []
self.call_id = call_id
self.from_tag = from_tag
self.to_tag = to_tag
self.early_only = early_only
if params != None:
self.params = params[:]
def parse(self):
self.parsed = True
self.params = []
params = self.body.split(';')
self.call_id = params.pop(0)
for param in params:
if param.startswith('from-tag='):
self.from_tag = param[len('from-tag='):]
elif param.startswith('to-tag='):
self.to_tag = param[len('to-tag='):]
elif param == 'early-only':
self.early_only = True
else:
self.params.append(param)
def __str__(self):
if not self.parsed:
return self.body
res = '%s;from-tag=%s;to-tag=%s' % (self.call_id, self.from_tag, self.to_tag)
if self.early_only:
res += ';early-only'
for param in self.params:
res += ';' + param
return res
def getCopy(self):
if not self.parsed:
return SipReplaces(self.body)
return SipReplaces(call_id = self.call_id, from_tag = self.from_tag, to_tag = self.to_tag, \
early_only = self.early_only, params = self.params)
|
lemenkov/sippy
|
sippy/SipReplaces.py
|
Python
|
gpl-2.0
| 2,867 | 0.011161 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.amazon.aws.sensors.sagemaker_tuning`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.sensors.sagemaker_tuning import SageMakerTuningSensor # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.sagemaker_tuning`.",
DeprecationWarning,
stacklevel=2,
)
|
nathanielvarona/airflow
|
airflow/contrib/sensors/sagemaker_tuning_sensor.py
|
Python
|
apache-2.0
| 1,201 | 0.001665 |
import os
import sympy
from example_helper import save_example_fit
from scipy_data_fitting import Data, Model, Fit
#
# Example of a fit to a sine wave with error bars.
#
name = 'wave'
# Load data from a csv file.
data = Data(name)
data.path = os.path.join('examples','data', 'wave.csv')
data.genfromtxt_args['skip_header'] = 1
data.error = (0.1, 0.05)
# Create a wave model.
model = Model(name)
model.add_symbols('t', 'A', 'ω', 'δ')
A, t, ω, δ = model.get_symbols('A', 't', 'ω', 'δ')
model.expressions['wave'] = A * sympy.functions.sin(ω * t + δ)
model.expressions['frequency'] = ω / (2 * sympy.pi)
# Create the fit using the data and model.
fit = Fit(name, data=data, model=model)
fit.expression = 'wave'
fit.independent = {'symbol': 't', 'name': 'Time', 'units': 's'}
fit.dependent = {'name': 'Voltage', 'prefix': 'kilo', 'units': 'kV'}
fit.parameters = [
{'symbol': 'A', 'value': 0.3, 'prefix': 'kilo', 'units': 'kV'},
{'symbol': 'ω', 'guess': 1, 'units': 'Hz'},
{'symbol': 'δ', 'guess': 1},
]
fit.quantities = [
{'expression': 'frequency', 'name': 'Frequency', 'units': 'Hz'},
{'expression': 1 / model.expressions['frequency'] , 'name': 'Period', 'units': 's'},
]
# Save the fit to disk.
save_example_fit(fit)
|
razor-x/scipy-data_fitting
|
examples/wave.py
|
Python
|
mit
| 1,252 | 0.002417 |
from gtable import Table
import numpy as np
def test_records():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])})
t.stack(t1)
records = [r for r in t.records()]
assert records == [
{'a': 1, 'b': 4},
{'a': 2, 'b': 5},
{'a': 3, 'b': 6},
{'a': 1, 'd': 4},
{'a': 2, 'd': 5},
{'a': 3, 'd': 6}]
records = [r for r in t.records(fill=True)]
assert records == [
{'a': 1, 'b': 4, 'd': np.nan},
{'a': 2, 'b': 5, 'd': np.nan},
{'a': 3, 'b': 6, 'd': np.nan},
{'a': 1, 'b': np.nan, 'd': 4},
{'a': 2, 'b': np.nan, 'd': 5},
{'a': 3, 'b': np.nan, 'd': 6}]
def test_first_record():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])})
t.stack(t1)
assert t.first_record() == {'a': 1, 'b': 4}
assert t.first_record(fill=True) == {'a': 1, 'b': 4, 'd': np.nan}
def test_last_record():
t = Table({'a': [1, 2, 3], 'b': np.array([4, 5, 6])})
t1 = Table({'a': [1, 2, 3], 'd': np.array([4, 5, 6])})
t.stack(t1)
assert t.last_record() == {'a': 1, 'd': 4}
assert t.last_record(fill=True) == {'a': 1, 'd': 4, 'b': np.nan}
|
guillemborrell/gtable
|
tests/test_records.py
|
Python
|
bsd-3-clause
| 1,282 | 0.00078 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AffineLinearOperator bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["AffineLinearOperator"]
remove_undocumented(__name__, _allowed_symbols)
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/tensorflow/contrib/distributions/python/ops/bijectors/affine_linear_operator.py
|
Python
|
bsd-2-clause
| 1,182 | 0.000846 |
# Imported via `make aws_managed_policies`
aws_managed_policies_data = """
{
"AWSAccountActivityAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSAccountActivityAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:18+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"aws-portal:ViewBilling"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJQRYCWMFX5J3E333K",
"PolicyName": "AWSAccountActivityAccess",
"UpdateDate": "2015-02-06T18:41:18+00:00",
"VersionId": "v1"
},
"AWSAccountUsageReportAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSAccountUsageReportAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:19+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"aws-portal:ViewUsage"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJLIB4VSBVO47ZSBB6",
"PolicyName": "AWSAccountUsageReportAccess",
"UpdateDate": "2015-02-06T18:41:19+00:00",
"VersionId": "v1"
},
"AWSAgentlessDiscoveryService": {
"Arn": "arn:aws:iam::aws:policy/AWSAgentlessDiscoveryService",
"AttachmentCount": 0,
"CreateDate": "2016-08-02T01:35:11+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"awsconnector:RegisterConnector",
"awsconnector:GetConnectorHealth"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "iam:GetUser",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:GetObject",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::connector-platform-upgrade-info/*",
"arn:aws:s3:::connector-platform-upgrade-info",
"arn:aws:s3:::connector-platform-upgrade-bundles/*",
"arn:aws:s3:::connector-platform-upgrade-bundles",
"arn:aws:s3:::connector-platform-release-notes/*",
"arn:aws:s3:::connector-platform-release-notes",
"arn:aws:s3:::prod.agentless.discovery.connector.upgrade/*",
"arn:aws:s3:::prod.agentless.discovery.connector.upgrade"
]
},
{
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::import-to-ec2-connector-debug-logs/*"
]
},
{
"Action": [
"SNS:Publish"
],
"Effect": "Allow",
"Resource": "arn:aws:sns:*:*:metrics-sns-topic-for-*"
},
{
"Action": [
"Discovery:*"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "Discovery"
},
{
"Action": [
"arsenal:RegisterOnPremisesAgent"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "arsenal"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIA3DIL7BYQ35ISM4K",
"PolicyName": "AWSAgentlessDiscoveryService",
"UpdateDate": "2016-08-02T01:35:11+00:00",
"VersionId": "v1"
},
"AWSApplicationDiscoveryAgentAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSApplicationDiscoveryAgentAccess",
"AttachmentCount": 0,
"CreateDate": "2016-05-11T21:38:47+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"arsenal:RegisterOnPremisesAgent"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAICZIOVAGC6JPF3WHC",
"PolicyName": "AWSApplicationDiscoveryAgentAccess",
"UpdateDate": "2016-05-11T21:38:47+00:00",
"VersionId": "v1"
},
"AWSApplicationDiscoveryServiceFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSApplicationDiscoveryServiceFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-05-11T21:30:50+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "discovery:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJBNJEA6ZXM2SBOPDU",
"PolicyName": "AWSApplicationDiscoveryServiceFullAccess",
"UpdateDate": "2016-05-11T21:30:50+00:00",
"VersionId": "v1"
},
"AWSBatchFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSBatchFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-13T00:38:59+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"batch:*",
"cloudwatch:GetMetricStatistics",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeKeyPairs",
"ecs:DescribeClusters",
"ecs:Describe*",
"ecs:List*",
"logs:Describe*",
"logs:Get*",
"logs:TestMetricFilter",
"logs:FilterLogEvents",
"iam:ListInstanceProfiles",
"iam:ListRoles"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"iam:PassRole"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/AWSBatchServiceRole",
"arn:aws:iam::*:role/ecsInstanceRole",
"arn:aws:iam::*:role/iaws-ec2-spot-fleet-role",
"arn:aws:iam::*:role/aws-ec2-spot-fleet-role",
"arn:aws:iam::*:role/AWSBatchJobRole*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ7K2KIWB3HZVK3CUO",
"PolicyName": "AWSBatchFullAccess",
"UpdateDate": "2016-12-13T00:38:59+00:00",
"VersionId": "v2"
},
"AWSBatchServiceRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole",
"AttachmentCount": 0,
"CreateDate": "2017-05-11T20:44:52+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeAccountAttributes",
"ec2:DescribeInstances",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeKeyPairs",
"ec2:DescribeImages",
"ec2:DescribeImageAttribute",
"ec2:DescribeSpotFleetInstances",
"ec2:DescribeSpotFleetRequests",
"ec2:DescribeSpotPriceHistory",
"ec2:RequestSpotFleet",
"ec2:CancelSpotFleetRequests",
"ec2:ModifySpotFleetRequest",
"ec2:TerminateInstances",
"autoscaling:DescribeAccountLimits",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:CreateLaunchConfiguration",
"autoscaling:CreateAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"autoscaling:SetDesiredCapacity",
"autoscaling:DeleteLaunchConfiguration",
"autoscaling:DeleteAutoScalingGroup",
"autoscaling:CreateOrUpdateTags",
"autoscaling:SuspendProcesses",
"autoscaling:PutNotificationConfiguration",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ecs:DescribeClusters",
"ecs:DescribeContainerInstances",
"ecs:DescribeTaskDefinition",
"ecs:DescribeTasks",
"ecs:ListClusters",
"ecs:ListContainerInstances",
"ecs:ListTaskDefinitionFamilies",
"ecs:ListTaskDefinitions",
"ecs:ListTasks",
"ecs:CreateCluster",
"ecs:DeleteCluster",
"ecs:RegisterTaskDefinition",
"ecs:DeregisterTaskDefinition",
"ecs:RunTask",
"ecs:StartTask",
"ecs:StopTask",
"ecs:UpdateContainerAgent",
"ecs:DeregisterContainerInstance",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"logs:DescribeLogGroups",
"iam:GetInstanceProfile",
"iam:PassRole"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIUETIXPCKASQJURFE",
"PolicyName": "AWSBatchServiceRole",
"UpdateDate": "2017-05-11T20:44:52+00:00",
"VersionId": "v4"
},
"AWSCertificateManagerFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCertificateManagerFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-01-21T17:02:36+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"acm:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJYCHABBP6VQIVBCBQ",
"PolicyName": "AWSCertificateManagerFullAccess",
"UpdateDate": "2016-01-21T17:02:36+00:00",
"VersionId": "v1"
},
"AWSCertificateManagerReadOnly": {
"Arn": "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly",
"AttachmentCount": 0,
"CreateDate": "2016-04-21T15:08:16+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": {
"Action": [
"acm:DescribeCertificate",
"acm:ListCertificates",
"acm:GetCertificate",
"acm:ListTagsForCertificate"
],
"Effect": "Allow",
"Resource": "*"
},
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI4GSWX6S4MESJ3EWC",
"PolicyName": "AWSCertificateManagerReadOnly",
"UpdateDate": "2016-04-21T15:08:16+00:00",
"VersionId": "v2"
},
"AWSCloudFormationReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:39:49+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cloudformation:DescribeStacks",
"cloudformation:DescribeStackEvents",
"cloudformation:DescribeStackResource",
"cloudformation:DescribeStackResources",
"cloudformation:GetTemplate",
"cloudformation:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJWVBEE4I2POWLODLW",
"PolicyName": "AWSCloudFormationReadOnlyAccess",
"UpdateDate": "2015-02-06T18:39:49+00:00",
"VersionId": "v1"
},
"AWSCloudHSMFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCloudHSMFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:39:51+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "cloudhsm:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIMBQYQZM7F63DA2UU",
"PolicyName": "AWSCloudHSMFullAccess",
"UpdateDate": "2015-02-06T18:39:51+00:00",
"VersionId": "v1"
},
"AWSCloudHSMReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCloudHSMReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:39:52+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cloudhsm:Get*",
"cloudhsm:List*",
"cloudhsm:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAISVCBSY7YDBOT67KE",
"PolicyName": "AWSCloudHSMReadOnlyAccess",
"UpdateDate": "2015-02-06T18:39:52+00:00",
"VersionId": "v1"
},
"AWSCloudHSMRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSCloudHSMRole",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:23+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:CreateNetworkInterface",
"ec2:CreateTags",
"ec2:DeleteNetworkInterface",
"ec2:DescribeNetworkInterfaceAttribute",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:DetachNetworkInterface"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAI7QIUU4GC66SF26WE",
"PolicyName": "AWSCloudHSMRole",
"UpdateDate": "2015-02-06T18:41:23+00:00",
"VersionId": "v1"
},
"AWSCloudTrailFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCloudTrailFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-02-16T18:31:28+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"sns:AddPermission",
"sns:CreateTopic",
"sns:DeleteTopic",
"sns:ListTopics",
"sns:SetTopicAttributes",
"sns:GetTopicAttributes"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:CreateBucket",
"s3:DeleteBucket",
"s3:ListAllMyBuckets",
"s3:PutBucketPolicy",
"s3:ListBucket",
"s3:GetObject",
"s3:GetBucketLocation",
"s3:GetBucketPolicy"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "cloudtrail:*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"logs:CreateLogGroup"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"iam:PassRole",
"iam:ListRoles",
"iam:GetRolePolicy",
"iam:GetUser"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"kms:ListKeys",
"kms:ListAliases"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIQNUJTQYDRJPC3BNK",
"PolicyName": "AWSCloudTrailFullAccess",
"UpdateDate": "2016-02-16T18:31:28+00:00",
"VersionId": "v4"
},
"AWSCloudTrailReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCloudTrailReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-14T20:41:52+00:00",
"DefaultVersionId": "v6",
"Document": {
"Statement": [
{
"Action": [
"s3:GetObject",
"s3:GetBucketLocation"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"cloudtrail:GetTrailStatus",
"cloudtrail:DescribeTrails",
"cloudtrail:LookupEvents",
"cloudtrail:ListTags",
"cloudtrail:ListPublicKeys",
"cloudtrail:GetEventSelectors",
"s3:ListAllMyBuckets",
"kms:ListAliases"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJDU7KJADWBSEQ3E7S",
"PolicyName": "AWSCloudTrailReadOnlyAccess",
"UpdateDate": "2016-12-14T20:41:52+00:00",
"VersionId": "v6"
},
"AWSCodeBuildAdminAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodeBuildAdminAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-01T19:04:44+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"codebuild:*",
"codecommit:GetBranch",
"codecommit:GetCommit",
"codecommit:GetRepository",
"codecommit:ListBranches",
"codecommit:ListRepositories",
"ecr:DescribeRepositories",
"ecr:ListImages",
"s3:GetBucketLocation",
"s3:ListAllMyBuckets"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"logs:GetLogEvents"
],
"Effect": "Allow",
"Resource": "arn:aws:logs:*:*:log-group:/aws/codebuild/*:log-stream:*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJQJGIOIE3CD2TQXDS",
"PolicyName": "AWSCodeBuildAdminAccess",
"UpdateDate": "2016-12-01T19:04:44+00:00",
"VersionId": "v1"
},
"AWSCodeBuildDeveloperAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodeBuildDeveloperAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-01T19:02:32+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"codebuild:StartBuild",
"codebuild:StopBuild",
"codebuild:BatchGet*",
"codebuild:Get*",
"codebuild:List*",
"codecommit:GetBranch",
"codecommit:GetCommit",
"codecommit:GetRepository",
"codecommit:ListBranches",
"s3:GetBucketLocation",
"s3:ListAllMyBuckets"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"logs:GetLogEvents"
],
"Effect": "Allow",
"Resource": "arn:aws:logs:*:*:log-group:/aws/codebuild/*:log-stream:*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIMKTMR34XSBQW45HS",
"PolicyName": "AWSCodeBuildDeveloperAccess",
"UpdateDate": "2016-12-01T19:02:32+00:00",
"VersionId": "v1"
},
"AWSCodeBuildReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodeBuildReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-01T19:03:41+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"codebuild:BatchGet*",
"codebuild:Get*",
"codebuild:List*",
"codecommit:GetBranch",
"codecommit:GetCommit",
"codecommit:GetRepository"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"logs:GetLogEvents"
],
"Effect": "Allow",
"Resource": "arn:aws:logs:*:*:log-group:/aws/codebuild/*:log-stream:*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJIZZWN6557F5HVP2K",
"PolicyName": "AWSCodeBuildReadOnlyAccess",
"UpdateDate": "2016-12-01T19:03:41+00:00",
"VersionId": "v1"
},
"AWSCodeCommitFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodeCommitFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-07-09T17:02:19+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"codecommit:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI4VCZ3XPIZLQ5NZV2",
"PolicyName": "AWSCodeCommitFullAccess",
"UpdateDate": "2015-07-09T17:02:19+00:00",
"VersionId": "v1"
},
"AWSCodeCommitPowerUser": {
"Arn": "arn:aws:iam::aws:policy/AWSCodeCommitPowerUser",
"AttachmentCount": 0,
"CreateDate": "2017-05-22T21:12:48+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"codecommit:BatchGetRepositories",
"codecommit:CreateBranch",
"codecommit:CreateRepository",
"codecommit:DeleteBranch",
"codecommit:Get*",
"codecommit:GitPull",
"codecommit:GitPush",
"codecommit:List*",
"codecommit:Put*",
"codecommit:Test*",
"codecommit:Update*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI4UIINUVGB5SEC57G",
"PolicyName": "AWSCodeCommitPowerUser",
"UpdateDate": "2017-05-22T21:12:48+00:00",
"VersionId": "v3"
},
"AWSCodeCommitReadOnly": {
"Arn": "arn:aws:iam::aws:policy/AWSCodeCommitReadOnly",
"AttachmentCount": 0,
"CreateDate": "2015-07-09T17:05:06+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"codecommit:BatchGetRepositories",
"codecommit:Get*",
"codecommit:GitPull",
"codecommit:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJACNSXR7Z2VLJW3D6",
"PolicyName": "AWSCodeCommitReadOnly",
"UpdateDate": "2015-07-09T17:05:06+00:00",
"VersionId": "v1"
},
"AWSCodeDeployDeployerAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodeDeployDeployerAccess",
"AttachmentCount": 0,
"CreateDate": "2015-05-19T18:18:43+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"codedeploy:Batch*",
"codedeploy:CreateDeployment",
"codedeploy:Get*",
"codedeploy:List*",
"codedeploy:RegisterApplicationRevision"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJUWEPOMGLMVXJAPUI",
"PolicyName": "AWSCodeDeployDeployerAccess",
"UpdateDate": "2015-05-19T18:18:43+00:00",
"VersionId": "v1"
},
"AWSCodeDeployFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodeDeployFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-05-19T18:13:23+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "codedeploy:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIONKN3TJZUKXCHXWC",
"PolicyName": "AWSCodeDeployFullAccess",
"UpdateDate": "2015-05-19T18:13:23+00:00",
"VersionId": "v1"
},
"AWSCodeDeployReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodeDeployReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-05-19T18:21:32+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"codedeploy:Batch*",
"codedeploy:Get*",
"codedeploy:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAILZHHKCKB4NE7XOIQ",
"PolicyName": "AWSCodeDeployReadOnlyAccess",
"UpdateDate": "2015-05-19T18:21:32+00:00",
"VersionId": "v1"
},
"AWSCodeDeployRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSCodeDeployRole",
"AttachmentCount": 0,
"CreateDate": "2017-09-11T19:09:51+00:00",
"DefaultVersionId": "v6",
"Document": {
"Statement": [
{
"Action": [
"autoscaling:CompleteLifecycleAction",
"autoscaling:DeleteLifecycleHook",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeLifecycleHooks",
"autoscaling:PutLifecycleHook",
"autoscaling:RecordLifecycleActionHeartbeat",
"autoscaling:CreateAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"autoscaling:EnableMetricsCollection",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribePolicies",
"autoscaling:DescribeScheduledActions",
"autoscaling:DescribeNotificationConfigurations",
"autoscaling:DescribeLifecycleHooks",
"autoscaling:SuspendProcesses",
"autoscaling:ResumeProcesses",
"autoscaling:AttachLoadBalancers",
"autoscaling:PutScalingPolicy",
"autoscaling:PutScheduledUpdateGroupAction",
"autoscaling:PutNotificationConfiguration",
"autoscaling:PutLifecycleHook",
"autoscaling:DescribeScalingActivities",
"autoscaling:DeleteAutoScalingGroup",
"ec2:DescribeInstances",
"ec2:DescribeInstanceStatus",
"ec2:TerminateInstances",
"tag:GetTags",
"tag:GetResources",
"sns:Publish",
"cloudwatch:DescribeAlarms",
"cloudwatch:PutMetricAlarm",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeInstanceHealth",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:DeregisterTargets"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJ2NKMKD73QS5NBFLA",
"PolicyName": "AWSCodeDeployRole",
"UpdateDate": "2017-09-11T19:09:51+00:00",
"VersionId": "v6"
},
"AWSCodePipelineApproverAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodePipelineApproverAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-02T17:24:58+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"codepipeline:GetPipeline",
"codepipeline:GetPipelineState",
"codepipeline:GetPipelineExecution",
"codepipeline:ListPipelineExecutions",
"codepipeline:ListPipelines",
"codepipeline:PutApprovalResult"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAICXNWK42SQ6LMDXM2",
"PolicyName": "AWSCodePipelineApproverAccess",
"UpdateDate": "2017-08-02T17:24:58+00:00",
"VersionId": "v3"
},
"AWSCodePipelineCustomActionAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodePipelineCustomActionAccess",
"AttachmentCount": 0,
"CreateDate": "2015-07-09T17:02:54+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"codepipeline:AcknowledgeJob",
"codepipeline:GetJobDetails",
"codepipeline:PollForJobs",
"codepipeline:PutJobFailureResult",
"codepipeline:PutJobSuccessResult"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJFW5Z32BTVF76VCYC",
"PolicyName": "AWSCodePipelineCustomActionAccess",
"UpdateDate": "2015-07-09T17:02:54+00:00",
"VersionId": "v1"
},
"AWSCodePipelineFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodePipelineFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-11-01T19:59:46+00:00",
"DefaultVersionId": "v5",
"Document": {
"Statement": [
{
"Action": [
"codepipeline:*",
"iam:ListRoles",
"iam:PassRole",
"s3:CreateBucket",
"s3:GetBucketPolicy",
"s3:GetObject",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:PutBucketPolicy",
"codecommit:ListBranches",
"codecommit:ListRepositories",
"codedeploy:GetApplication",
"codedeploy:GetDeploymentGroup",
"codedeploy:ListApplications",
"codedeploy:ListDeploymentGroups",
"elasticbeanstalk:DescribeApplications",
"elasticbeanstalk:DescribeEnvironments",
"lambda:GetFunctionConfiguration",
"lambda:ListFunctions",
"opsworks:DescribeApps",
"opsworks:DescribeLayers",
"opsworks:DescribeStacks",
"cloudformation:DescribeStacks",
"cloudformation:ListChangeSets"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJP5LH77KSAT2KHQGG",
"PolicyName": "AWSCodePipelineFullAccess",
"UpdateDate": "2016-11-01T19:59:46+00:00",
"VersionId": "v5"
},
"AWSCodePipelineReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodePipelineReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-02T17:25:18+00:00",
"DefaultVersionId": "v6",
"Document": {
"Statement": [
{
"Action": [
"codepipeline:GetPipeline",
"codepipeline:GetPipelineState",
"codepipeline:GetPipelineExecution",
"codepipeline:ListPipelineExecutions",
"codepipeline:ListActionTypes",
"codepipeline:ListPipelines",
"iam:ListRoles",
"s3:GetBucketPolicy",
"s3:GetObject",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"codecommit:ListBranches",
"codecommit:ListRepositories",
"codedeploy:GetApplication",
"codedeploy:GetDeploymentGroup",
"codedeploy:ListApplications",
"codedeploy:ListDeploymentGroups",
"elasticbeanstalk:DescribeApplications",
"elasticbeanstalk:DescribeEnvironments",
"lambda:GetFunctionConfiguration",
"lambda:ListFunctions",
"opsworks:DescribeApps",
"opsworks:DescribeLayers",
"opsworks:DescribeStacks"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAILFKZXIBOTNC5TO2Q",
"PolicyName": "AWSCodePipelineReadOnlyAccess",
"UpdateDate": "2017-08-02T17:25:18+00:00",
"VersionId": "v6"
},
"AWSCodeStarFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSCodeStarFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-04-19T16:23:19+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"codestar:*",
"ec2:DescribeKeyPairs",
"ec2:DescribeVpcs",
"ec2:DescribeSubnets"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "CodeStarEC2"
},
{
"Action": [
"cloudformation:DescribeStack*",
"cloudformation:GetTemplateSummary"
],
"Effect": "Allow",
"Resource": [
"arn:aws:cloudformation:*:*:stack/awscodestar-*"
],
"Sid": "CodeStarCF"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIXI233TFUGLZOJBEC",
"PolicyName": "AWSCodeStarFullAccess",
"UpdateDate": "2017-04-19T16:23:19+00:00",
"VersionId": "v1"
},
"AWSCodeStarServiceRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSCodeStarServiceRole",
"AttachmentCount": 0,
"CreateDate": "2017-07-13T19:53:22+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"cloudformation:*Stack*",
"cloudformation:GetTemplate"
],
"Effect": "Allow",
"Resource": [
"arn:aws:cloudformation:*:*:stack/awscodestar-*",
"arn:aws:cloudformation:*:*:stack/awseb-*"
],
"Sid": "ProjectStack"
},
{
"Action": [
"cloudformation:GetTemplateSummary",
"cloudformation:DescribeChangeSet"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "ProjectStackTemplate"
},
{
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::awscodestar-*/*"
],
"Sid": "ProjectQuickstarts"
},
{
"Action": [
"s3:*"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::aws-codestar-*",
"arn:aws:s3:::aws-codestar-*/*",
"arn:aws:s3:::elasticbeanstalk-*",
"arn:aws:s3:::elasticbeanstalk-*/*"
],
"Sid": "ProjectS3Buckets"
},
{
"Action": [
"codestar:*Project",
"codestar:*Resource*",
"codestar:List*",
"codestar:Describe*",
"codestar:Get*",
"codestar:AssociateTeamMember",
"codecommit:*",
"codepipeline:*",
"codedeploy:*",
"codebuild:*",
"ec2:RunInstances",
"autoscaling:*",
"cloudwatch:Put*",
"ec2:*",
"elasticbeanstalk:*",
"elasticloadbalancing:*",
"iam:ListRoles",
"logs:*",
"sns:*"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "ProjectServices"
},
{
"Action": [
"iam:AttachRolePolicy",
"iam:CreateRole",
"iam:DeleteRole",
"iam:DeleteRolePolicy",
"iam:DetachRolePolicy",
"iam:GetRole",
"iam:PassRole",
"iam:PutRolePolicy",
"iam:SetDefaultPolicyVersion",
"iam:CreatePolicy",
"iam:DeletePolicy",
"iam:AddRoleToInstanceProfile",
"iam:CreateInstanceProfile",
"iam:DeleteInstanceProfile",
"iam:RemoveRoleFromInstanceProfile"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/CodeStarWorker*",
"arn:aws:iam::*:policy/CodeStarWorker*",
"arn:aws:iam::*:instance-profile/awscodestar-*"
],
"Sid": "ProjectWorkerRoles"
},
{
"Action": [
"iam:AttachUserPolicy",
"iam:DetachUserPolicy"
],
"Condition": {
"ArnEquals": {
"iam:PolicyArn": [
"arn:aws:iam::*:policy/CodeStar_*"
]
}
},
"Effect": "Allow",
"Resource": "*",
"Sid": "ProjectTeamMembers"
},
{
"Action": [
"iam:CreatePolicy",
"iam:DeletePolicy",
"iam:CreatePolicyVersion",
"iam:DeletePolicyVersion",
"iam:ListEntitiesForPolicy",
"iam:ListPolicyVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:policy/CodeStar_*"
],
"Sid": "ProjectRoles"
},
{
"Action": [
"iam:ListAttachedRolePolicies"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/aws-codestar-service-role",
"arn:aws:iam::*:role/service-role/aws-codestar-service-role"
],
"Sid": "InspectServiceRole"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIN6D4M2KD3NBOC4M4",
"PolicyName": "AWSCodeStarServiceRole",
"UpdateDate": "2017-07-13T19:53:22+00:00",
"VersionId": "v2"
},
"AWSConfigRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSConfigRole",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T19:04:46+00:00",
"DefaultVersionId": "v10",
"Document": {
"Statement": [
{
"Action": [
"cloudtrail:DescribeTrails",
"ec2:Describe*",
"config:Put*",
"config:Get*",
"config:List*",
"config:Describe*",
"cloudtrail:GetTrailStatus",
"s3:GetObject",
"iam:GetAccountAuthorizationDetails",
"iam:GetAccountPasswordPolicy",
"iam:GetAccountSummary",
"iam:GetGroup",
"iam:GetGroupPolicy",
"iam:GetPolicy",
"iam:GetPolicyVersion",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:GetUser",
"iam:GetUserPolicy",
"iam:ListAttachedGroupPolicies",
"iam:ListAttachedRolePolicies",
"iam:ListAttachedUserPolicies",
"iam:ListEntitiesForPolicy",
"iam:ListGroupPolicies",
"iam:ListGroupsForUser",
"iam:ListInstanceProfilesForRole",
"iam:ListPolicyVersions",
"iam:ListRolePolicies",
"iam:ListUserPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeTags",
"acm:DescribeCertificate",
"acm:ListCertificates",
"acm:ListTagsForCertificate",
"rds:DescribeDBInstances",
"rds:DescribeDBSecurityGroups",
"rds:DescribeDBSnapshotAttributes",
"rds:DescribeDBSnapshots",
"rds:DescribeDBSubnetGroups",
"rds:DescribeEventSubscriptions",
"rds:ListTagsForResource",
"rds:DescribeDBClusters",
"s3:GetAccelerateConfiguration",
"s3:GetBucketAcl",
"s3:GetBucketCORS",
"s3:GetBucketLocation",
"s3:GetBucketLogging",
"s3:GetBucketNotification",
"s3:GetBucketPolicy",
"s3:GetBucketRequestPayment",
"s3:GetBucketTagging",
"s3:GetBucketVersioning",
"s3:GetBucketWebsite",
"s3:GetLifecycleConfiguration",
"s3:GetReplicationConfiguration",
"s3:ListAllMyBuckets",
"redshift:DescribeClusterParameterGroups",
"redshift:DescribeClusterParameters",
"redshift:DescribeClusterSecurityGroups",
"redshift:DescribeClusterSnapshots",
"redshift:DescribeClusterSubnetGroups",
"redshift:DescribeClusters",
"redshift:DescribeEventSubscriptions",
"redshift:DescribeLoggingStatus",
"dynamodb:DescribeLimits",
"dynamodb:DescribeTable",
"dynamodb:ListTables",
"dynamodb:ListTagsOfResource",
"cloudwatch:DescribeAlarms",
"application-autoscaling:DescribeScalableTargets",
"application-autoscaling:DescribeScalingPolicies",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeLifecycleHooks",
"autoscaling:DescribePolicies",
"autoscaling:DescribeScheduledActions",
"autoscaling:DescribeTags"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIQRXRDRGJUA33ELIO",
"PolicyName": "AWSConfigRole",
"UpdateDate": "2017-08-14T19:04:46+00:00",
"VersionId": "v10"
},
"AWSConfigRulesExecutionRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSConfigRulesExecutionRole",
"AttachmentCount": 0,
"CreateDate": "2016-03-25T17:59:36+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::*/AWSLogs/*/Config/*"
},
{
"Action": [
"config:Put*",
"config:Get*",
"config:List*",
"config:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJUB3KIKTA4PU4OYAA",
"PolicyName": "AWSConfigRulesExecutionRole",
"UpdateDate": "2016-03-25T17:59:36+00:00",
"VersionId": "v1"
},
"AWSConfigUserAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSConfigUserAccess",
"AttachmentCount": 0,
"CreateDate": "2016-08-30T19:15:19+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"config:Get*",
"config:Describe*",
"config:Deliver*",
"config:List*",
"tag:GetResources",
"tag:GetTagKeys",
"cloudtrail:DescribeTrails",
"cloudtrail:GetTrailStatus",
"cloudtrail:LookupEvents"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIWTTSFJ7KKJE3MWGA",
"PolicyName": "AWSConfigUserAccess",
"UpdateDate": "2016-08-30T19:15:19+00:00",
"VersionId": "v3"
},
"AWSConnector": {
"Arn": "arn:aws:iam::aws:policy/AWSConnector",
"AttachmentCount": 0,
"CreateDate": "2015-09-28T19:50:38+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": "iam:GetUser",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:ListAllMyBuckets"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:CreateBucket",
"s3:DeleteBucket",
"s3:DeleteObject",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
"s3:PutObject",
"s3:PutObjectAcl",
"s3:AbortMultipartUpload",
"s3:ListBucketMultipartUploads",
"s3:ListMultipartUploadParts"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::import-to-ec2-*"
},
{
"Action": [
"ec2:CancelConversionTask",
"ec2:CancelExportTask",
"ec2:CreateImage",
"ec2:CreateInstanceExportTask",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:DeleteTags",
"ec2:DeleteVolume",
"ec2:DescribeConversionTasks",
"ec2:DescribeExportTasks",
"ec2:DescribeImages",
"ec2:DescribeInstanceAttribute",
"ec2:DescribeInstanceStatus",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ec2:DescribeTags",
"ec2:DetachVolume",
"ec2:ImportInstance",
"ec2:ImportVolume",
"ec2:ModifyInstanceAttribute",
"ec2:RunInstances",
"ec2:StartInstances",
"ec2:StopInstances",
"ec2:TerminateInstances",
"ec2:ImportImage",
"ec2:DescribeImportImageTasks",
"ec2:DeregisterImage",
"ec2:DescribeSnapshots",
"ec2:DeleteSnapshot",
"ec2:CancelImportTask",
"ec2:ImportSnapshot",
"ec2:DescribeImportSnapshotTasks"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"SNS:Publish"
],
"Effect": "Allow",
"Resource": "arn:aws:sns:*:*:metrics-sns-topic-for-*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ6YATONJHICG3DJ3U",
"PolicyName": "AWSConnector",
"UpdateDate": "2015-09-28T19:50:38+00:00",
"VersionId": "v3"
},
"AWSDataPipelineRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSDataPipelineRole",
"AttachmentCount": 0,
"CreateDate": "2016-02-22T17:17:38+00:00",
"DefaultVersionId": "v5",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:*",
"datapipeline:DescribeObjects",
"datapipeline:EvaluateExpression",
"dynamodb:BatchGetItem",
"dynamodb:DescribeTable",
"dynamodb:GetItem",
"dynamodb:Query",
"dynamodb:Scan",
"dynamodb:UpdateTable",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CancelSpotInstanceRequests",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DeleteTags",
"ec2:Describe*",
"ec2:ModifyImageAttribute",
"ec2:ModifyInstanceAttribute",
"ec2:RequestSpotInstances",
"ec2:RunInstances",
"ec2:StartInstances",
"ec2:StopInstances",
"ec2:TerminateInstances",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:DeleteSecurityGroup",
"ec2:RevokeSecurityGroupEgress",
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DetachNetworkInterface",
"elasticmapreduce:*",
"iam:GetInstanceProfile",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:ListAttachedRolePolicies",
"iam:ListRolePolicies",
"iam:ListInstanceProfiles",
"iam:PassRole",
"rds:DescribeDBInstances",
"rds:DescribeDBSecurityGroups",
"redshift:DescribeClusters",
"redshift:DescribeClusterSecurityGroups",
"s3:CreateBucket",
"s3:DeleteObject",
"s3:Get*",
"s3:List*",
"s3:Put*",
"sdb:BatchPutAttributes",
"sdb:Select*",
"sns:GetTopicAttributes",
"sns:ListTopics",
"sns:Publish",
"sns:Subscribe",
"sns:Unsubscribe",
"sqs:CreateQueue",
"sqs:Delete*",
"sqs:GetQueue*",
"sqs:PurgeQueue",
"sqs:ReceiveMessage"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIKCP6XS3ESGF4GLO2",
"PolicyName": "AWSDataPipelineRole",
"UpdateDate": "2016-02-22T17:17:38+00:00",
"VersionId": "v5"
},
"AWSDataPipeline_FullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSDataPipeline_FullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-17T18:48:39+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"s3:List*",
"dynamodb:DescribeTable",
"rds:DescribeDBInstances",
"rds:DescribeDBSecurityGroups",
"redshift:DescribeClusters",
"redshift:DescribeClusterSecurityGroups",
"sns:ListTopics",
"sns:Subscribe",
"iam:ListRoles",
"iam:GetRolePolicy",
"iam:GetInstanceProfile",
"iam:ListInstanceProfiles",
"datapipeline:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "iam:PassRole",
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/DataPipelineDefaultResourceRole",
"arn:aws:iam::*:role/DataPipelineDefaultRole"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIXOFIG7RSBMRPHXJ4",
"PolicyName": "AWSDataPipeline_FullAccess",
"UpdateDate": "2017-08-17T18:48:39+00:00",
"VersionId": "v2"
},
"AWSDataPipeline_PowerUser": {
"Arn": "arn:aws:iam::aws:policy/AWSDataPipeline_PowerUser",
"AttachmentCount": 0,
"CreateDate": "2017-08-17T18:49:42+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"s3:List*",
"dynamodb:DescribeTable",
"rds:DescribeDBInstances",
"rds:DescribeDBSecurityGroups",
"redshift:DescribeClusters",
"redshift:DescribeClusterSecurityGroups",
"sns:ListTopics",
"iam:ListRoles",
"iam:GetRolePolicy",
"iam:GetInstanceProfile",
"iam:ListInstanceProfiles",
"datapipeline:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "iam:PassRole",
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/DataPipelineDefaultResourceRole",
"arn:aws:iam::*:role/DataPipelineDefaultRole"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIMXGLVY6DVR24VTYS",
"PolicyName": "AWSDataPipeline_PowerUser",
"UpdateDate": "2017-08-17T18:49:42+00:00",
"VersionId": "v2"
},
"AWSDeviceFarmFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSDeviceFarmFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-07-13T16:37:38+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"devicefarm:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJO7KEDP4VYJPNT5UW",
"PolicyName": "AWSDeviceFarmFullAccess",
"UpdateDate": "2015-07-13T16:37:38+00:00",
"VersionId": "v1"
},
"AWSDirectConnectFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSDirectConnectFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:07+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"directconnect:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJQF2QKZSK74KTIHOW",
"PolicyName": "AWSDirectConnectFullAccess",
"UpdateDate": "2015-02-06T18:40:07+00:00",
"VersionId": "v1"
},
"AWSDirectConnectReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSDirectConnectReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:08+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"directconnect:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI23HZ27SI6FQMGNQ2",
"PolicyName": "AWSDirectConnectReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:08+00:00",
"VersionId": "v1"
},
"AWSDirectoryServiceFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSDirectoryServiceFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-02-24T23:10:36+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"ds:*",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateNetworkInterface",
"ec2:CreateSecurityGroup",
"ec2:DeleteNetworkInterface",
"ec2:DeleteSecurityGroup",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress",
"sns:GetTopicAttributes",
"sns:ListSubscriptions",
"sns:ListSubscriptionsByTopic",
"sns:ListTopics"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"sns:CreateTopic",
"sns:DeleteTopic",
"sns:SetTopicAttributes",
"sns:Subscribe",
"sns:Unsubscribe"
],
"Effect": "Allow",
"Resource": "arn:aws:sns:*:*:DirectoryMonitoring*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAINAW5ANUWTH3R4ANI",
"PolicyName": "AWSDirectoryServiceFullAccess",
"UpdateDate": "2016-02-24T23:10:36+00:00",
"VersionId": "v2"
},
"AWSDirectoryServiceReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSDirectoryServiceReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-02-24T23:11:18+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"ds:Check*",
"ds:Describe*",
"ds:Get*",
"ds:List*",
"ds:Verify*",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"sns:ListTopics",
"sns:GetTopicAttributes",
"sns:ListSubscriptions",
"sns:ListSubscriptionsByTopic"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIHWYO6WSDNCG64M2W",
"PolicyName": "AWSDirectoryServiceReadOnlyAccess",
"UpdateDate": "2016-02-24T23:11:18+00:00",
"VersionId": "v3"
},
"AWSEC2SpotServiceRolePolicy": {
"Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSEC2SpotServiceRolePolicy",
"AttachmentCount": 0,
"CreateDate": "2017-09-18T18:51:54+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeInstances",
"ec2:StartInstances",
"ec2:StopInstances"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:PassRole"
],
"Condition": {
"StringLike": {
"iam:PassedToService": "ec2.amazonaws.com"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/aws-service-role/",
"PolicyId": "ANPAIZJJBQNXQYVKTEXGM",
"PolicyName": "AWSEC2SpotServiceRolePolicy",
"UpdateDate": "2017-09-18T18:51:54+00:00",
"VersionId": "v1"
},
"AWSElasticBeanstalkCustomPlatformforEC2Role": {
"Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkCustomPlatformforEC2Role",
"AttachmentCount": 0,
"CreateDate": "2017-02-21T22:50:30+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CopyImage",
"ec2:CreateImage",
"ec2:CreateKeypair",
"ec2:CreateSecurityGroup",
"ec2:CreateSnapshot",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:DeleteKeypair",
"ec2:DeleteSecurityGroup",
"ec2:DeleteSnapshot",
"ec2:DeleteVolume",
"ec2:DeregisterImage",
"ec2:DescribeImageAttribute",
"ec2:DescribeImages",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSnapshots",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVolumes",
"ec2:DetachVolume",
"ec2:GetPasswordData",
"ec2:ModifyImageAttribute",
"ec2:ModifyInstanceAttribute",
"ec2:ModifySnapshotAttribute",
"ec2:RegisterImage",
"ec2:RunInstances",
"ec2:StopInstances",
"ec2:TerminateInstances"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "EC2Access"
},
{
"Action": [
"s3:Get*",
"s3:List*",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::elasticbeanstalk-*",
"arn:aws:s3:::elasticbeanstalk-*/*"
],
"Sid": "BucketAccess"
},
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"logs:DescribeLogStreams"
],
"Effect": "Allow",
"Resource": "arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk/platform/*",
"Sid": "CloudWatchLogsAccess"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJRVFXSS6LEIQGBKDY",
"PolicyName": "AWSElasticBeanstalkCustomPlatformforEC2Role",
"UpdateDate": "2017-02-21T22:50:30+00:00",
"VersionId": "v1"
},
"AWSElasticBeanstalkEnhancedHealth": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkEnhancedHealth",
"AttachmentCount": 0,
"CreateDate": "2016-08-22T20:28:36+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"elasticloadbalancing:DescribeInstanceHealth",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetHealth",
"ec2:DescribeInstances",
"ec2:DescribeInstanceStatus",
"ec2:GetConsoleOutput",
"ec2:AssociateAddress",
"ec2:DescribeAddresses",
"ec2:DescribeSecurityGroups",
"sqs:GetQueueAttributes",
"sqs:GetQueueUrl",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeScalingActivities",
"autoscaling:DescribeNotificationConfigurations"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIH5EFJNMOGUUTKLFE",
"PolicyName": "AWSElasticBeanstalkEnhancedHealth",
"UpdateDate": "2016-08-22T20:28:36+00:00",
"VersionId": "v2"
},
"AWSElasticBeanstalkFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-21T01:00:13+00:00",
"DefaultVersionId": "v5",
"Document": {
"Statement": [
{
"Action": [
"elasticbeanstalk:*",
"ec2:*",
"ecs:*",
"ecr:*",
"elasticloadbalancing:*",
"autoscaling:*",
"cloudwatch:*",
"s3:*",
"sns:*",
"cloudformation:*",
"dynamodb:*",
"rds:*",
"sqs:*",
"logs:*",
"iam:GetPolicyVersion",
"iam:GetRole",
"iam:PassRole",
"iam:ListRolePolicies",
"iam:ListAttachedRolePolicies",
"iam:ListInstanceProfiles",
"iam:ListRoles",
"iam:ListServerCertificates",
"acm:DescribeCertificate",
"acm:ListCertificates",
"codebuild:CreateProject",
"codebuild:DeleteProject",
"codebuild:BatchGetBuilds",
"codebuild:StartBuild"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"iam:AddRoleToInstanceProfile",
"iam:CreateInstanceProfile",
"iam:CreateRole"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/aws-elasticbeanstalk*",
"arn:aws:iam::*:instance-profile/aws-elasticbeanstalk*"
]
},
{
"Action": [
"iam:AttachRolePolicy"
],
"Condition": {
"StringLike": {
"iam:PolicyArn": [
"arn:aws:iam::aws:policy/AWSElasticBeanstalk*",
"arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalk*"
]
}
},
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIZYX2YLLBW2LJVUFW",
"PolicyName": "AWSElasticBeanstalkFullAccess",
"UpdateDate": "2016-12-21T01:00:13+00:00",
"VersionId": "v5"
},
"AWSElasticBeanstalkMulticontainerDocker": {
"Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkMulticontainerDocker",
"AttachmentCount": 0,
"CreateDate": "2016-06-06T23:45:37+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"ecs:Poll",
"ecs:StartTask",
"ecs:StopTask",
"ecs:DiscoverPollEndpoint",
"ecs:StartTelemetrySession",
"ecs:RegisterContainerInstance",
"ecs:DeregisterContainerInstance",
"ecs:DescribeContainerInstances",
"ecs:Submit*",
"ecs:DescribeTasks"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "ECSAccess"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ45SBYG72SD6SHJEY",
"PolicyName": "AWSElasticBeanstalkMulticontainerDocker",
"UpdateDate": "2016-06-06T23:45:37+00:00",
"VersionId": "v2"
},
"AWSElasticBeanstalkReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:19+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"elasticbeanstalk:Check*",
"elasticbeanstalk:Describe*",
"elasticbeanstalk:List*",
"elasticbeanstalk:RequestEnvironmentInfo",
"elasticbeanstalk:RetrieveEnvironmentInfo",
"ec2:Describe*",
"elasticloadbalancing:Describe*",
"autoscaling:Describe*",
"cloudwatch:Describe*",
"cloudwatch:List*",
"cloudwatch:Get*",
"s3:Get*",
"s3:List*",
"sns:Get*",
"sns:List*",
"cloudformation:Describe*",
"cloudformation:Get*",
"cloudformation:List*",
"cloudformation:Validate*",
"cloudformation:Estimate*",
"rds:Describe*",
"sqs:Get*",
"sqs:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI47KNGXDAXFD4SDHG",
"PolicyName": "AWSElasticBeanstalkReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:19+00:00",
"VersionId": "v1"
},
"AWSElasticBeanstalkService": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkService",
"AttachmentCount": 0,
"CreateDate": "2017-06-21T16:49:23+00:00",
"DefaultVersionId": "v11",
"Document": {
"Statement": [
{
"Action": [
"cloudformation:*"
],
"Effect": "Allow",
"Resource": [
"arn:aws:cloudformation:*:*:stack/awseb-*",
"arn:aws:cloudformation:*:*:stack/eb-*"
],
"Sid": "AllowCloudformationOperationsOnElasticBeanstalkStacks"
},
{
"Action": [
"logs:DeleteLogGroup"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*"
],
"Sid": "AllowDeleteCloudwatchLogGroups"
},
{
"Action": [
"s3:*"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::elasticbeanstalk-*",
"arn:aws:s3:::elasticbeanstalk-*/*"
],
"Sid": "AllowS3OperationsOnElasticBeanstalkBuckets"
},
{
"Action": [
"autoscaling:AttachInstances",
"autoscaling:CreateAutoScalingGroup",
"autoscaling:CreateLaunchConfiguration",
"autoscaling:DeleteLaunchConfiguration",
"autoscaling:DeleteAutoScalingGroup",
"autoscaling:DeleteScheduledAction",
"autoscaling:DescribeAccountLimits",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeLoadBalancers",
"autoscaling:DescribeNotificationConfigurations",
"autoscaling:DescribeScalingActivities",
"autoscaling:DescribeScheduledActions",
"autoscaling:DetachInstances",
"autoscaling:PutScheduledUpdateGroupAction",
"autoscaling:ResumeProcesses",
"autoscaling:SetDesiredCapacity",
"autoscaling:SuspendProcesses",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"cloudwatch:PutMetricAlarm",
"ec2:AssociateAddress",
"ec2:AllocateAddress",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateSecurityGroup",
"ec2:DeleteSecurityGroup",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DescribeImages",
"ec2:DescribeInstances",
"ec2:DescribeKeyPairs",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSnapshots",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:DisassociateAddress",
"ec2:ReleaseAddress",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress",
"ec2:TerminateInstances",
"ecs:CreateCluster",
"ecs:DeleteCluster",
"ecs:DescribeClusters",
"ecs:RegisterTaskDefinition",
"elasticbeanstalk:*",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DescribeInstanceHealth",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:DeregisterTargets",
"iam:ListRoles",
"iam:PassRole",
"logs:CreateLogGroup",
"logs:PutRetentionPolicy",
"rds:DescribeDBEngineVersions",
"rds:DescribeDBInstances",
"rds:DescribeOrderableDBInstanceOptions",
"s3:CopyObject",
"s3:GetObject",
"s3:GetObjectAcl",
"s3:GetObjectMetadata",
"s3:ListBucket",
"s3:listBuckets",
"s3:ListObjects",
"sns:CreateTopic",
"sns:GetTopicAttributes",
"sns:ListSubscriptionsByTopic",
"sns:Subscribe",
"sns:SetTopicAttributes",
"sqs:GetQueueAttributes",
"sqs:GetQueueUrl",
"codebuild:CreateProject",
"codebuild:DeleteProject",
"codebuild:BatchGetBuilds",
"codebuild:StartBuild"
],
"Effect": "Allow",
"Resource": [
"*"
],
"Sid": "AllowOperations"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJKQ5SN74ZQ4WASXBM",
"PolicyName": "AWSElasticBeanstalkService",
"UpdateDate": "2017-06-21T16:49:23+00:00",
"VersionId": "v11"
},
"AWSElasticBeanstalkServiceRolePolicy": {
"Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSElasticBeanstalkServiceRolePolicy",
"AttachmentCount": 0,
"CreateDate": "2017-09-13T23:46:37+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"iam:PassRole"
],
"Condition": {
"StringLikeIfExists": {
"iam:PassedToService": "elasticbeanstalk.amazonaws.com"
}
},
"Effect": "Allow",
"Resource": "*",
"Sid": "AllowPassRoleToElasticBeanstalk"
},
{
"Action": [
"cloudformation:*"
],
"Effect": "Allow",
"Resource": [
"arn:aws:cloudformation:*:*:stack/awseb-*",
"arn:aws:cloudformation:*:*:stack/eb-*"
],
"Sid": "AllowCloudformationOperationsOnElasticBeanstalkStacks"
},
{
"Action": [
"logs:DeleteLogGroup"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*"
],
"Sid": "AllowDeleteCloudwatchLogGroups"
},
{
"Action": [
"s3:*"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::elasticbeanstalk-*",
"arn:aws:s3:::elasticbeanstalk-*/*"
],
"Sid": "AllowS3OperationsOnElasticBeanstalkBuckets"
},
{
"Action": [
"autoscaling:AttachInstances",
"autoscaling:CreateAutoScalingGroup",
"autoscaling:CreateLaunchConfiguration",
"autoscaling:DeleteLaunchConfiguration",
"autoscaling:DeleteAutoScalingGroup",
"autoscaling:DeleteScheduledAction",
"autoscaling:DescribeAccountLimits",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeLoadBalancers",
"autoscaling:DescribeNotificationConfigurations",
"autoscaling:DescribeScalingActivities",
"autoscaling:DescribeScheduledActions",
"autoscaling:DetachInstances",
"autoscaling:PutScheduledUpdateGroupAction",
"autoscaling:ResumeProcesses",
"autoscaling:SetDesiredCapacity",
"autoscaling:SuspendProcesses",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"cloudwatch:PutMetricAlarm",
"ec2:AssociateAddress",
"ec2:AllocateAddress",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateSecurityGroup",
"ec2:DeleteSecurityGroup",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DescribeImages",
"ec2:DescribeInstances",
"ec2:DescribeKeyPairs",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:DisassociateAddress",
"ec2:ReleaseAddress",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress",
"ec2:TerminateInstances",
"ecs:CreateCluster",
"ecs:DeleteCluster",
"ecs:DescribeClusters",
"ecs:RegisterTaskDefinition",
"elasticbeanstalk:*",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DescribeInstanceHealth",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:DeregisterTargets",
"iam:ListRoles",
"logs:CreateLogGroup",
"logs:PutRetentionPolicy",
"rds:DescribeDBInstances",
"rds:DescribeOrderableDBInstanceOptions",
"rds:DescribeDBEngineVersions",
"sns:ListTopics",
"sns:GetTopicAttributes",
"sns:ListSubscriptionsByTopic",
"sqs:GetQueueAttributes",
"sqs:GetQueueUrl",
"codebuild:CreateProject",
"codebuild:DeleteProject",
"codebuild:BatchGetBuilds",
"codebuild:StartBuild"
],
"Effect": "Allow",
"Resource": [
"*"
],
"Sid": "AllowOperations"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/aws-service-role/",
"PolicyId": "ANPAIID62QSI3OSIPQXTM",
"PolicyName": "AWSElasticBeanstalkServiceRolePolicy",
"UpdateDate": "2017-09-13T23:46:37+00:00",
"VersionId": "v1"
},
"AWSElasticBeanstalkWebTier": {
"Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkWebTier",
"AttachmentCount": 0,
"CreateDate": "2016-12-21T02:06:25+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"s3:Get*",
"s3:List*",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::elasticbeanstalk-*",
"arn:aws:s3:::elasticbeanstalk-*/*"
],
"Sid": "BucketAccess"
},
{
"Action": [
"xray:PutTraceSegments",
"xray:PutTelemetryRecords"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "XRayAccess"
},
{
"Action": [
"logs:PutLogEvents",
"logs:CreateLogStream"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*"
],
"Sid": "CloudWatchLogsAccess"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIUF4325SJYOREKW3A",
"PolicyName": "AWSElasticBeanstalkWebTier",
"UpdateDate": "2016-12-21T02:06:25+00:00",
"VersionId": "v4"
},
"AWSElasticBeanstalkWorkerTier": {
"Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkWorkerTier",
"AttachmentCount": 0,
"CreateDate": "2016-12-21T02:01:55+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:PutMetricData"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "MetricsAccess"
},
{
"Action": [
"xray:PutTraceSegments",
"xray:PutTelemetryRecords"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "XRayAccess"
},
{
"Action": [
"sqs:ChangeMessageVisibility",
"sqs:DeleteMessage",
"sqs:ReceiveMessage",
"sqs:SendMessage"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "QueueAccess"
},
{
"Action": [
"s3:Get*",
"s3:List*",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::elasticbeanstalk-*",
"arn:aws:s3:::elasticbeanstalk-*/*"
],
"Sid": "BucketAccess"
},
{
"Action": [
"dynamodb:BatchGetItem",
"dynamodb:BatchWriteItem",
"dynamodb:DeleteItem",
"dynamodb:GetItem",
"dynamodb:PutItem",
"dynamodb:Query",
"dynamodb:Scan",
"dynamodb:UpdateItem"
],
"Effect": "Allow",
"Resource": [
"arn:aws:dynamodb:*:*:table/*-stack-AWSEBWorkerCronLeaderRegistry*"
],
"Sid": "DynamoPeriodicTasks"
},
{
"Action": [
"logs:PutLogEvents",
"logs:CreateLogStream"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*"
],
"Sid": "CloudWatchLogsAccess"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJQDLBRSJVKVF4JMSK",
"PolicyName": "AWSElasticBeanstalkWorkerTier",
"UpdateDate": "2016-12-21T02:01:55+00:00",
"VersionId": "v4"
},
"AWSElasticLoadBalancingClassicServiceRolePolicy": {
"Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSElasticLoadBalancingClassicServiceRolePolicy",
"AttachmentCount": 0,
"CreateDate": "2017-09-19T22:36:18+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeAddresses",
"ec2:DescribeInstances",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeVpcs",
"ec2:DescribeInternetGateways",
"ec2:DescribeAccountAttributes",
"ec2:DescribeClassicLinkInstances",
"ec2:DescribeVpcClassicLink",
"ec2:CreateSecurityGroup",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:ModifyNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:AssociateAddress",
"ec2:DisassociateAddress",
"ec2:AttachNetworkInterface",
"ec2:DetachNetworkInterface",
"ec2:AssignPrivateIpAddresses",
"ec2:AssignIpv6Addresses",
"ec2:UnassignIpv6Addresses"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/aws-service-role/",
"PolicyId": "ANPAIUMWW3QP7DPZPNVU4",
"PolicyName": "AWSElasticLoadBalancingClassicServiceRolePolicy",
"UpdateDate": "2017-09-19T22:36:18+00:00",
"VersionId": "v1"
},
"AWSElasticLoadBalancingServiceRolePolicy": {
"Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSElasticLoadBalancingServiceRolePolicy",
"AttachmentCount": 0,
"CreateDate": "2017-09-19T22:19:04+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeAddresses",
"ec2:DescribeInstances",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"ec2:DescribeVpcs",
"ec2:DescribeInternetGateways",
"ec2:DescribeAccountAttributes",
"ec2:DescribeClassicLinkInstances",
"ec2:DescribeVpcClassicLink",
"ec2:CreateSecurityGroup",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:ModifyNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:AssociateAddress",
"ec2:DisassociateAddress",
"ec2:AttachNetworkInterface",
"ec2:DetachNetworkInterface",
"ec2:AssignPrivateIpAddresses",
"ec2:AssignIpv6Addresses",
"ec2:UnassignIpv6Addresses"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/aws-service-role/",
"PolicyId": "ANPAIMHWGGSRHLOQUICJQ",
"PolicyName": "AWSElasticLoadBalancingServiceRolePolicy",
"UpdateDate": "2017-09-19T22:19:04+00:00",
"VersionId": "v1"
},
"AWSEnhancedClassicNetworkingMangementPolicy": {
"Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSEnhancedClassicNetworkingMangementPolicy",
"AttachmentCount": 0,
"CreateDate": "2017-09-20T17:29:09+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeSecurityGroups"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/aws-service-role/",
"PolicyId": "ANPAI7T4V2HZTS72QVO52",
"PolicyName": "AWSEnhancedClassicNetworkingMangementPolicy",
"UpdateDate": "2017-09-20T17:29:09+00:00",
"VersionId": "v1"
},
"AWSGlueConsoleFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSGlueConsoleFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-09-13T00:12:54+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"glue:*",
"redshift:DescribeClusters",
"redshift:DescribeClusterSubnetGroups",
"iam:ListRoles",
"iam:ListRolePolicies",
"iam:GetRole",
"iam:GetRolePolicy",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:DescribeVpcEndpoints",
"ec2:DescribeRouteTables",
"ec2:DescribeVpcAttribute",
"ec2:DescribeKeyPairs",
"ec2:DescribeInstances",
"rds:DescribeDBInstances",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:GetBucketAcl",
"s3:GetBucketLocation",
"cloudformation:DescribeStacks",
"cloudformation:GetTemplateSummary"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:GetObject",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::aws-glue-*/*",
"arn:aws:s3:::*/*aws-glue-*/*",
"arn:aws:s3:::aws-glue-*"
]
},
{
"Action": [
"s3:CreateBucket"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::aws-glue-*"
]
},
{
"Action": [
"logs:GetLogEvents"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:/aws-glue/*"
]
},
{
"Action": [
"cloudformation:CreateStack",
"cloudformation:DeleteStack"
],
"Effect": "Allow",
"Resource": "arn:aws:cloudformation:*:*:stack/aws-glue*/*"
},
{
"Action": [
"ec2:TerminateInstances",
"ec2:RunInstances",
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"ForAllValues:StringEquals": {
"aws:TagKeys": [
"aws-glue-dev-endpoint"
]
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:PassRole"
],
"Condition": {
"StringLike": {
"iam:PassedToService": [
"glue.amazonaws.com"
]
}
},
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/AWSGlueServiceRole*"
},
{
"Action": [
"iam:PassRole"
],
"Condition": {
"StringLike": {
"iam:PassedToService": [
"ec2.amazonaws.com"
]
}
},
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/AWSGlueServiceNotebookRole*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJNZGDEOD7MISOVSVI",
"PolicyName": "AWSGlueConsoleFullAccess",
"UpdateDate": "2017-09-13T00:12:54+00:00",
"VersionId": "v2"
},
"AWSGlueServiceNotebookRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSGlueServiceNotebookRole",
"AttachmentCount": 0,
"CreateDate": "2017-08-17T18:08:29+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"glue:CreateDatabase",
"glue:CreatePartition",
"glue:CreateTable",
"glue:DeleteDatabase",
"glue:DeletePartition",
"glue:DeleteTable",
"glue:GetDatabase",
"glue:GetDatabases",
"glue:GetPartition",
"glue:GetPartitions",
"glue:GetTable",
"glue:GetTableVersions",
"glue:GetTables",
"glue:UpdateDatabase",
"glue:UpdatePartition",
"glue:UpdateTable",
"glue:CreateBookmark",
"glue:GetBookmark",
"glue:UpdateBookmark",
"glue:GetMetric",
"glue:PutMetric",
"glue:CreateConnection",
"glue:CreateJob",
"glue:DeleteConnection",
"glue:DeleteJob",
"glue:GetConnection",
"glue:GetConnections",
"glue:GetDevEndpoint",
"glue:GetDevEndpoints",
"glue:GetJob",
"glue:GetJobs",
"glue:UpdateJob",
"glue:BatchDeleteConnection",
"glue:UpdateConnection",
"glue:GetUserDefinedFunction",
"glue:UpdateUserDefinedFunction",
"glue:GetUserDefinedFunctions",
"glue:DeleteUserDefinedFunction",
"glue:CreateUserDefinedFunction",
"glue:BatchGetPartition",
"glue:BatchDeletePartition",
"glue:BatchCreatePartition",
"glue:BatchDeleteTable",
"glue:UpdateDevEndpoint",
"s3:GetBucketLocation",
"s3:ListBucket",
"s3:ListAllMyBuckets",
"s3:GetBucketAcl"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::crawler-public*",
"arn:aws:s3:::aws-glue*"
]
},
{
"Action": [
"s3:PutObject",
"s3:DeleteObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::aws-glue*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"ForAllValues:StringEquals": {
"aws:TagKeys": [
"aws-glue-service-resource"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:network-interface/*",
"arn:aws:ec2:*:*:security-group/*",
"arn:aws:ec2:*:*:instance/*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIMRC6VZUHJYCTKWFI",
"PolicyName": "AWSGlueServiceNotebookRole",
"UpdateDate": "2017-08-17T18:08:29+00:00",
"VersionId": "v2"
},
"AWSGlueServiceRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole",
"AttachmentCount": 0,
"CreateDate": "2017-08-23T21:35:25+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"glue:*",
"s3:GetBucketLocation",
"s3:ListBucket",
"s3:ListAllMyBuckets",
"s3:GetBucketAcl",
"ec2:DescribeVpcEndpoints",
"ec2:DescribeRouteTables",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcAttribute",
"iam:ListRolePolicies",
"iam:GetRole",
"iam:GetRolePolicy"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:CreateBucket"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::aws-glue-*"
]
},
{
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::aws-glue-*/*",
"arn:aws:s3:::*/*aws-glue-*/*"
]
},
{
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::crawler-public*",
"arn:aws:s3:::aws-glue-*"
]
},
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:/aws-glue/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"ForAllValues:StringEquals": {
"aws:TagKeys": [
"aws-glue-service-resource"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:network-interface/*",
"arn:aws:ec2:*:*:security-group/*",
"arn:aws:ec2:*:*:instance/*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIRUJCPEBPMEZFAS32",
"PolicyName": "AWSGlueServiceRole",
"UpdateDate": "2017-08-23T21:35:25+00:00",
"VersionId": "v3"
},
"AWSGreengrassFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSGreengrassFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-05-03T00:47:37+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"greengrass:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJWPV6OBK4QONH4J3O",
"PolicyName": "AWSGreengrassFullAccess",
"UpdateDate": "2017-05-03T00:47:37+00:00",
"VersionId": "v1"
},
"AWSGreengrassResourceAccessRolePolicy": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSGreengrassResourceAccessRolePolicy",
"AttachmentCount": 0,
"CreateDate": "2017-05-26T23:10:54+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"iot:DeleteThingShadow",
"iot:GetThingShadow",
"iot:UpdateThingShadow"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iot:*:*:thing/GG_*",
"arn:aws:iot:*:*:thing/*-gcm",
"arn:aws:iot:*:*:thing/*-gda",
"arn:aws:iot:*:*:thing/*-gci"
],
"Sid": "AllowGreengrassAccessToShadows"
},
{
"Action": [
"iot:DescribeThing"
],
"Effect": "Allow",
"Resource": "arn:aws:iot:*:*:thing/*",
"Sid": "AllowGreengrassToDescribeThings"
},
{
"Action": [
"iot:DescribeCertificate"
],
"Effect": "Allow",
"Resource": "arn:aws:iot:*:*:cert/*",
"Sid": "AllowGreengrassToDescribeCertificates"
},
{
"Action": [
"greengrass:*"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "AllowGreengrassToCallGreengrassServices"
},
{
"Action": [
"lambda:GetFunction",
"lambda:GetFunctionConfiguration"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "AllowGreengrassToGetLambdaFunctions"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJPKEIMB6YMXDEVRTM",
"PolicyName": "AWSGreengrassResourceAccessRolePolicy",
"UpdateDate": "2017-05-26T23:10:54+00:00",
"VersionId": "v3"
},
"AWSHealthFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSHealthFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-06T12:30:31+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"health:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI3CUMPCPEUPCSXC4Y",
"PolicyName": "AWSHealthFullAccess",
"UpdateDate": "2016-12-06T12:30:31+00:00",
"VersionId": "v1"
},
"AWSImportExportFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSImportExportFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:43+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"importexport:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJCQCT4JGTLC6722MQ",
"PolicyName": "AWSImportExportFullAccess",
"UpdateDate": "2015-02-06T18:40:43+00:00",
"VersionId": "v1"
},
"AWSImportExportReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSImportExportReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:42+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"importexport:ListJobs",
"importexport:GetStatus"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJNTV4OG52ESYZHCNK",
"PolicyName": "AWSImportExportReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:42+00:00",
"VersionId": "v1"
},
"AWSIoTConfigAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSIoTConfigAccess",
"AttachmentCount": 0,
"CreateDate": "2016-07-27T20:41:18+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"iot:AcceptCertificateTransfer",
"iot:AttachPrincipalPolicy",
"iot:AttachThingPrincipal",
"iot:CancelCertificateTransfer",
"iot:CreateCertificateFromCsr",
"iot:CreateKeysAndCertificate",
"iot:CreatePolicy",
"iot:CreatePolicyVersion",
"iot:CreateThing",
"iot:CreateThingType",
"iot:CreateTopicRule",
"iot:DeleteCertificate",
"iot:DeleteCACertificate",
"iot:DeletePolicy",
"iot:DeletePolicyVersion",
"iot:DeleteRegistrationCode",
"iot:DeleteThing",
"iot:DeleteThingType",
"iot:DeleteTopicRule",
"iot:DeprecateThingType",
"iot:DescribeCertificate",
"iot:DescribeCACertificate",
"iot:DescribeEndpoint",
"iot:DescribeThing",
"iot:DescribeThingType",
"iot:DetachPrincipalPolicy",
"iot:DetachThingPrincipal",
"iot:GetLoggingOptions",
"iot:GetPolicy",
"iot:GetPolicyVersion",
"iot:GetRegistrationCode",
"iot:GetTopicRule",
"iot:ListCertificates",
"iot:ListCACertificates",
"iot:ListCertificatesByCA",
"iot:ListPolicies",
"iot:ListPolicyPrincipals",
"iot:ListPolicyVersions",
"iot:ListPrincipalPolicies",
"iot:ListPrincipalThings",
"iot:ListThingPrincipals",
"iot:ListThings",
"iot:ListThingTypes",
"iot:ListTopicRules",
"iot:RegisterCertificate",
"iot:RegisterCACertificate",
"iot:RejectCertificateTransfer",
"iot:ReplaceTopicRule",
"iot:SetDefaultPolicyVersion",
"iot:SetLoggingOptions",
"iot:TransferCertificate",
"iot:UpdateCertificate",
"iot:UpdateCACertificate",
"iot:UpdateThing"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIWWGD4LM4EMXNRL7I",
"PolicyName": "AWSIoTConfigAccess",
"UpdateDate": "2016-07-27T20:41:18+00:00",
"VersionId": "v4"
},
"AWSIoTConfigReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSIoTConfigReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-07-27T20:41:36+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"iot:DescribeCertificate",
"iot:DescribeCACertificate",
"iot:DescribeEndpoint",
"iot:DescribeThing",
"iot:DescribeThingType",
"iot:GetLoggingOptions",
"iot:GetPolicy",
"iot:GetPolicyVersion",
"iot:GetRegistrationCode",
"iot:GetTopicRule",
"iot:ListCertificates",
"iot:ListCertificatesByCA",
"iot:ListCACertificates",
"iot:ListPolicies",
"iot:ListPolicyPrincipals",
"iot:ListPolicyVersions",
"iot:ListPrincipalPolicies",
"iot:ListPrincipalThings",
"iot:ListThingPrincipals",
"iot:ListThings",
"iot:ListThingTypes",
"iot:ListTopicRules"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJHENEMXGX4XMFOIOI",
"PolicyName": "AWSIoTConfigReadOnlyAccess",
"UpdateDate": "2016-07-27T20:41:36+00:00",
"VersionId": "v4"
},
"AWSIoTDataAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSIoTDataAccess",
"AttachmentCount": 0,
"CreateDate": "2015-10-27T21:51:18+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"iot:Connect",
"iot:Publish",
"iot:Subscribe",
"iot:Receive",
"iot:GetThingShadow",
"iot:UpdateThingShadow"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJM2KI2UJDR24XPS2K",
"PolicyName": "AWSIoTDataAccess",
"UpdateDate": "2015-10-27T21:51:18+00:00",
"VersionId": "v1"
},
"AWSIoTFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSIoTFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-10-08T15:19:49+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"iot:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJU2FPGG6PQWN72V2G",
"PolicyName": "AWSIoTFullAccess",
"UpdateDate": "2015-10-08T15:19:49+00:00",
"VersionId": "v1"
},
"AWSIoTLogging": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSIoTLogging",
"AttachmentCount": 0,
"CreateDate": "2015-10-08T15:17:25+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"logs:PutMetricFilter",
"logs:PutRetentionPolicy",
"logs:GetLogEvents",
"logs:DeleteLogStream"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAI6R6Z2FHHGS454W7W",
"PolicyName": "AWSIoTLogging",
"UpdateDate": "2015-10-08T15:17:25+00:00",
"VersionId": "v1"
},
"AWSIoTRuleActions": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSIoTRuleActions",
"AttachmentCount": 0,
"CreateDate": "2015-10-08T15:14:51+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": {
"Action": [
"dynamodb:PutItem",
"kinesis:PutRecord",
"iot:Publish",
"s3:PutObject",
"sns:Publish",
"sqs:SendMessage*"
],
"Effect": "Allow",
"Resource": "*"
},
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJEZ6FS7BUZVUHMOKY",
"PolicyName": "AWSIoTRuleActions",
"UpdateDate": "2015-10-08T15:14:51+00:00",
"VersionId": "v1"
},
"AWSKeyManagementServicePowerUser": {
"Arn": "arn:aws:iam::aws:policy/AWSKeyManagementServicePowerUser",
"AttachmentCount": 1,
"CreateDate": "2017-03-07T00:55:11+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"kms:CreateAlias",
"kms:CreateKey",
"kms:DeleteAlias",
"kms:Describe*",
"kms:GenerateRandom",
"kms:Get*",
"kms:List*",
"kms:TagResource",
"kms:UntagResource",
"iam:ListGroups",
"iam:ListRoles",
"iam:ListUsers"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJNPP7PPPPMJRV2SA4",
"PolicyName": "AWSKeyManagementServicePowerUser",
"UpdateDate": "2017-03-07T00:55:11+00:00",
"VersionId": "v2"
},
"AWSLambdaBasicExecutionRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
"AttachmentCount": 0,
"CreateDate": "2015-04-09T15:03:43+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJNCQGXC42545SKXIK",
"PolicyName": "AWSLambdaBasicExecutionRole",
"UpdateDate": "2015-04-09T15:03:43+00:00",
"VersionId": "v1"
},
"AWSLambdaDynamoDBExecutionRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole",
"AttachmentCount": 0,
"CreateDate": "2015-04-09T15:09:29+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"dynamodb:DescribeStream",
"dynamodb:GetRecords",
"dynamodb:GetShardIterator",
"dynamodb:ListStreams",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIP7WNAGMIPYNW4WQG",
"PolicyName": "AWSLambdaDynamoDBExecutionRole",
"UpdateDate": "2015-04-09T15:09:29+00:00",
"VersionId": "v1"
},
"AWSLambdaENIManagementAccess": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaENIManagementAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-06T00:37:27+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:CreateNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DeleteNetworkInterface"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJXAW2Q3KPTURUT2QC",
"PolicyName": "AWSLambdaENIManagementAccess",
"UpdateDate": "2016-12-06T00:37:27+00:00",
"VersionId": "v1"
},
"AWSLambdaExecute": {
"Arn": "arn:aws:iam::aws:policy/AWSLambdaExecute",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:46+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"logs:*"
],
"Effect": "Allow",
"Resource": "arn:aws:logs:*:*:*"
},
{
"Action": [
"s3:GetObject",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJE5FX7FQZSU5XAKGO",
"PolicyName": "AWSLambdaExecute",
"UpdateDate": "2015-02-06T18:40:46+00:00",
"VersionId": "v1"
},
"AWSLambdaFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSLambdaFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-05-25T19:08:45+00:00",
"DefaultVersionId": "v7",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:*",
"cognito-identity:ListIdentityPools",
"cognito-sync:GetCognitoEvents",
"cognito-sync:SetCognitoEvents",
"dynamodb:*",
"events:*",
"iam:ListAttachedRolePolicies",
"iam:ListRolePolicies",
"iam:ListRoles",
"iam:PassRole",
"kinesis:DescribeStream",
"kinesis:ListStreams",
"kinesis:PutRecord",
"lambda:*",
"logs:*",
"s3:*",
"sns:ListSubscriptions",
"sns:ListSubscriptionsByTopic",
"sns:ListTopics",
"sns:Subscribe",
"sns:Unsubscribe",
"sns:Publish",
"sqs:ListQueues",
"sqs:SendMessage",
"tag:GetResources",
"kms:ListAliases",
"ec2:DescribeVpcs",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"iot:GetTopicRule",
"iot:ListTopicRules",
"iot:CreateTopicRule",
"iot:ReplaceTopicRule",
"iot:AttachPrincipalPolicy",
"iot:AttachThingPrincipal",
"iot:CreateKeysAndCertificate",
"iot:CreatePolicy",
"iot:CreateThing",
"iot:ListPolicies",
"iot:ListThings",
"iot:DescribeEndpoint",
"xray:PutTraceSegments",
"xray:PutTelemetryRecords"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI6E2CYYMI4XI7AA5K",
"PolicyName": "AWSLambdaFullAccess",
"UpdateDate": "2017-05-25T19:08:45+00:00",
"VersionId": "v7"
},
"AWSLambdaInvocation-DynamoDB": {
"Arn": "arn:aws:iam::aws:policy/AWSLambdaInvocation-DynamoDB",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:47+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"lambda:InvokeFunction"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"dynamodb:DescribeStream",
"dynamodb:GetRecords",
"dynamodb:GetShardIterator",
"dynamodb:ListStreams"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJTHQ3EKCQALQDYG5G",
"PolicyName": "AWSLambdaInvocation-DynamoDB",
"UpdateDate": "2015-02-06T18:40:47+00:00",
"VersionId": "v1"
},
"AWSLambdaKinesisExecutionRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole",
"AttachmentCount": 0,
"CreateDate": "2015-04-09T15:14:16+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"kinesis:DescribeStream",
"kinesis:GetRecords",
"kinesis:GetShardIterator",
"kinesis:ListStreams",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJHOLKJPXV4GBRMJUQ",
"PolicyName": "AWSLambdaKinesisExecutionRole",
"UpdateDate": "2015-04-09T15:14:16+00:00",
"VersionId": "v1"
},
"AWSLambdaReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSLambdaReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-05-04T18:22:29+00:00",
"DefaultVersionId": "v6",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:Describe*",
"cloudwatch:Get*",
"cloudwatch:List*",
"cognito-identity:ListIdentityPools",
"cognito-sync:GetCognitoEvents",
"dynamodb:BatchGetItem",
"dynamodb:DescribeStream",
"dynamodb:DescribeTable",
"dynamodb:GetItem",
"dynamodb:ListStreams",
"dynamodb:ListTables",
"dynamodb:Query",
"dynamodb:Scan",
"events:List*",
"events:Describe*",
"iam:ListRoles",
"kinesis:DescribeStream",
"kinesis:ListStreams",
"lambda:List*",
"lambda:Get*",
"logs:DescribeMetricFilters",
"logs:GetLogEvents",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"s3:Get*",
"s3:List*",
"sns:ListTopics",
"sns:ListSubscriptions",
"sns:ListSubscriptionsByTopic",
"sqs:ListQueues",
"tag:GetResources",
"kms:ListAliases",
"ec2:DescribeVpcs",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"iot:GetTopicRules",
"iot:ListTopicRules",
"iot:ListPolicies",
"iot:ListThings",
"iot:DescribeEndpoint"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJLDG7J3CGUHFN4YN6",
"PolicyName": "AWSLambdaReadOnlyAccess",
"UpdateDate": "2017-05-04T18:22:29+00:00",
"VersionId": "v6"
},
"AWSLambdaRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaRole",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:28+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"lambda:InvokeFunction"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJX4DPCRGTC4NFDUXI",
"PolicyName": "AWSLambdaRole",
"UpdateDate": "2015-02-06T18:41:28+00:00",
"VersionId": "v1"
},
"AWSLambdaVPCAccessExecutionRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole",
"AttachmentCount": 0,
"CreateDate": "2016-02-11T23:15:26+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"ec2:CreateNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DeleteNetworkInterface"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJVTME3YLVNL72YR2K",
"PolicyName": "AWSLambdaVPCAccessExecutionRole",
"UpdateDate": "2016-02-11T23:15:26+00:00",
"VersionId": "v1"
},
"AWSMarketplaceFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSMarketplaceFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-11T17:21:45+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"aws-marketplace:*",
"cloudformation:CreateStack",
"cloudformation:DescribeStackResource",
"cloudformation:DescribeStackResources",
"cloudformation:DescribeStacks",
"cloudformation:List*",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DeleteSecurityGroup",
"ec2:DescribeAccountAttributes",
"ec2:DescribeImages",
"ec2:DescribeInstances",
"ec2:DescribeKeyPairs",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVpcs",
"ec2:RunInstances",
"ec2:StartInstances",
"ec2:StopInstances",
"ec2:TerminateInstances"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI2DV5ULJSO2FYVPYG",
"PolicyName": "AWSMarketplaceFullAccess",
"UpdateDate": "2015-02-11T17:21:45+00:00",
"VersionId": "v1"
},
"AWSMarketplaceGetEntitlements": {
"Arn": "arn:aws:iam::aws:policy/AWSMarketplaceGetEntitlements",
"AttachmentCount": 0,
"CreateDate": "2017-03-27T19:37:24+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"aws-marketplace:GetEntitlements"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJLPIMQE4WMHDC2K7C",
"PolicyName": "AWSMarketplaceGetEntitlements",
"UpdateDate": "2017-03-27T19:37:24+00:00",
"VersionId": "v1"
},
"AWSMarketplaceManageSubscriptions": {
"Arn": "arn:aws:iam::aws:policy/AWSMarketplaceManageSubscriptions",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:32+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"aws-marketplace:ViewSubscriptions",
"aws-marketplace:Subscribe",
"aws-marketplace:Unsubscribe"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJRDW2WIFN7QLUAKBQ",
"PolicyName": "AWSMarketplaceManageSubscriptions",
"UpdateDate": "2015-02-06T18:40:32+00:00",
"VersionId": "v1"
},
"AWSMarketplaceMeteringFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSMarketplaceMeteringFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-03-17T22:39:22+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"aws-marketplace:MeterUsage"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ65YJPG7CC7LDXNA6",
"PolicyName": "AWSMarketplaceMeteringFullAccess",
"UpdateDate": "2016-03-17T22:39:22+00:00",
"VersionId": "v1"
},
"AWSMarketplaceRead-only": {
"Arn": "arn:aws:iam::aws:policy/AWSMarketplaceRead-only",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:31+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"aws-marketplace:ViewSubscriptions",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DescribeImages",
"ec2:DescribeInstances",
"ec2:DescribeKeyPairs",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJOOM6LETKURTJ3XZ2",
"PolicyName": "AWSMarketplaceRead-only",
"UpdateDate": "2015-02-06T18:40:31+00:00",
"VersionId": "v1"
},
"AWSMigrationHubDMSAccess": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSMigrationHubDMSAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T14:00:06+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"mgh:CreateProgressUpdateStream"
],
"Effect": "Allow",
"Resource": "arn:aws:mgh:*:*:progressUpdateStream/DMS"
},
{
"Action": [
"mgh:AssociateCreatedArtifact",
"mgh:DescribeMigrationTask",
"mgh:DisassociateCreatedArtifact",
"mgh:ImportMigrationTask",
"mgh:ListCreatedArtifacts",
"mgh:NotifyMigrationTaskState",
"mgh:PutResourceAttributes",
"mgh:NotifyApplicationState",
"mgh:DescribeApplicationState",
"mgh:AssociateDiscoveredResource",
"mgh:DisassociateDiscoveredResource",
"mgh:ListDiscoveredResources"
],
"Effect": "Allow",
"Resource": "arn:aws:mgh:*:*:progressUpdateStream/DMS/*"
},
{
"Action": [
"mgh:ListMigrationTasks"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIUQB56VA4JHLN7G2W",
"PolicyName": "AWSMigrationHubDMSAccess",
"UpdateDate": "2017-08-14T14:00:06+00:00",
"VersionId": "v1"
},
"AWSMigrationHubDiscoveryAccess": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSMigrationHubDiscoveryAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T13:30:51+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"discovery:ListConfigurations",
"discovery:DescribeConfigurations"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAITRMRLSV7JAL6YIGG",
"PolicyName": "AWSMigrationHubDiscoveryAccess",
"UpdateDate": "2017-08-14T13:30:51+00:00",
"VersionId": "v1"
},
"AWSMigrationHubFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSMigrationHubFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T14:09:27+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"mgh:*",
"discovery:*"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"iam:GetRole"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ4A2SZKHUYHDYIGOK",
"PolicyName": "AWSMigrationHubFullAccess",
"UpdateDate": "2017-08-14T14:09:27+00:00",
"VersionId": "v2"
},
"AWSMigrationHubSMSAccess": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSMigrationHubSMSAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T13:57:54+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"mgh:CreateProgressUpdateStream"
],
"Effect": "Allow",
"Resource": "arn:aws:mgh:*:*:progressUpdateStream/SMS"
},
{
"Action": [
"mgh:AssociateCreatedArtifact",
"mgh:DescribeMigrationTask",
"mgh:DisassociateCreatedArtifact",
"mgh:ImportMigrationTask",
"mgh:ListCreatedArtifacts",
"mgh:NotifyMigrationTaskState",
"mgh:PutResourceAttributes",
"mgh:NotifyApplicationState",
"mgh:DescribeApplicationState",
"mgh:AssociateDiscoveredResource",
"mgh:DisassociateDiscoveredResource",
"mgh:ListDiscoveredResources"
],
"Effect": "Allow",
"Resource": "arn:aws:mgh:*:*:progressUpdateStream/SMS/*"
},
{
"Action": [
"mgh:ListMigrationTasks"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIWQYYT6TSVIRJO4TY",
"PolicyName": "AWSMigrationHubSMSAccess",
"UpdateDate": "2017-08-14T13:57:54+00:00",
"VersionId": "v1"
},
"AWSMobileHub_FullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSMobileHub_FullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-10T22:23:47+00:00",
"DefaultVersionId": "v10",
"Document": {
"Statement": [
{
"Action": [
"apigateway:GET",
"apigateway:GetRestApis",
"apigateway:GetResources",
"apigateway:POST",
"apigateway:TestInvokeMethod",
"dynamodb:DescribeTable",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"iam:ListSAMLProviders",
"lambda:ListFunctions",
"sns:ListTopics",
"lex:GetIntent",
"lex:GetIntents",
"lex:GetSlotType",
"lex:GetSlotTypes",
"lex:GetBot",
"lex:GetBots",
"lex:GetBotAlias",
"lex:GetBotAliases",
"mobilehub:CreateProject",
"mobilehub:DeleteProject",
"mobilehub:UpdateProject",
"mobilehub:ExportProject",
"mobilehub:ImportProject",
"mobilehub:SynchronizeProject",
"mobilehub:GenerateProjectParameters",
"mobilehub:GetProject",
"mobilehub:GetProjectSnapshot",
"mobilehub:ListAvailableConnectors",
"mobilehub:ListAvailableFeatures",
"mobilehub:ListAvailableRegions",
"mobilehub:ListProjects",
"mobilehub:ValidateProject",
"mobilehub:VerifyServiceRole",
"mobilehub:DescribeBundle",
"mobilehub:ExportBundle",
"mobilehub:ListBundles"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::*/aws-my-sample-app*.zip"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIJLU43R6AGRBK76DM",
"PolicyName": "AWSMobileHub_FullAccess",
"UpdateDate": "2017-08-10T22:23:47+00:00",
"VersionId": "v10"
},
"AWSMobileHub_ReadOnly": {
"Arn": "arn:aws:iam::aws:policy/AWSMobileHub_ReadOnly",
"AttachmentCount": 0,
"CreateDate": "2017-08-10T22:08:23+00:00",
"DefaultVersionId": "v8",
"Document": {
"Statement": [
{
"Action": [
"dynamodb:DescribeTable",
"iam:ListSAMLProviders",
"lambda:ListFunctions",
"sns:ListTopics",
"lex:GetIntent",
"lex:GetIntents",
"lex:GetSlotType",
"lex:GetSlotTypes",
"lex:GetBot",
"lex:GetBots",
"lex:GetBotAlias",
"lex:GetBotAliases",
"mobilehub:ExportProject",
"mobilehub:GenerateProjectParameters",
"mobilehub:GetProject",
"mobilehub:GetProjectSnapshot",
"mobilehub:ListAvailableConnectors",
"mobilehub:ListAvailableFeatures",
"mobilehub:ListAvailableRegions",
"mobilehub:ListProjects",
"mobilehub:ValidateProject",
"mobilehub:VerifyServiceRole",
"mobilehub:DescribeBundle",
"mobilehub:ExportBundle",
"mobilehub:ListBundles"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::*/aws-my-sample-app*.zip"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIBXVYVL3PWQFBZFGW",
"PolicyName": "AWSMobileHub_ReadOnly",
"UpdateDate": "2017-08-10T22:08:23+00:00",
"VersionId": "v8"
},
"AWSMobileHub_ServiceUseOnly": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSMobileHub_ServiceUseOnly",
"AttachmentCount": 0,
"CreateDate": "2017-06-02T23:35:49+00:00",
"DefaultVersionId": "v23",
"Document": {
"Statement": [
{
"Action": [
"cloudformation:CreateUploadBucket",
"cloudformation:ValidateTemplate",
"cloudfront:CreateDistribution",
"cloudfront:DeleteDistribution",
"cloudfront:GetDistribution",
"cloudfront:GetDistributionConfig",
"cloudfront:UpdateDistribution",
"cognito-identity:CreateIdentityPool",
"cognito-identity:UpdateIdentityPool",
"cognito-identity:DeleteIdentityPool",
"cognito-identity:SetIdentityPoolRoles",
"cognito-idp:CreateUserPool",
"dynamodb:CreateTable",
"dynamodb:DeleteTable",
"dynamodb:DescribeTable",
"dynamodb:UpdateTable",
"iam:AddClientIDToOpenIDConnectProvider",
"iam:CreateOpenIDConnectProvider",
"iam:GetOpenIDConnectProvider",
"iam:ListOpenIDConnectProviders",
"iam:CreateSAMLProvider",
"iam:GetSAMLProvider",
"iam:ListSAMLProvider",
"iam:UpdateSAMLProvider",
"lambda:CreateFunction",
"lambda:DeleteFunction",
"lambda:GetFunction",
"mobileanalytics:CreateApp",
"mobileanalytics:DeleteApp",
"sns:CreateTopic",
"sns:DeleteTopic",
"sns:ListPlatformApplications",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"lex:PutIntent",
"lex:GetIntent",
"lex:GetIntents",
"lex:PutSlotType",
"lex:GetSlotType",
"lex:GetSlotTypes",
"lex:PutBot",
"lex:GetBot",
"lex:GetBots",
"lex:GetBotAlias",
"lex:GetBotAliases"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"sns:CreatePlatformApplication",
"sns:DeletePlatformApplication",
"sns:GetPlatformApplicationAttributes",
"sns:SetPlatformApplicationAttributes"
],
"Effect": "Allow",
"Resource": [
"arn:aws:sns:*:*:app/*_MOBILEHUB_*"
]
},
{
"Action": [
"s3:CreateBucket",
"s3:DeleteBucket",
"s3:DeleteBucketPolicy",
"s3:DeleteBucketWebsite",
"s3:ListBucket",
"s3:ListBucketVersions",
"s3:GetBucketLocation",
"s3:GetBucketVersioning",
"s3:PutBucketVersioning",
"s3:PutBucketWebsite",
"s3:PutBucketPolicy",
"s3:SetBucketCrossOriginConfiguration"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::*-userfiles-mobilehub-*",
"arn:aws:s3:::*-contentdelivery-mobilehub-*",
"arn:aws:s3:::*-hosting-mobilehub-*",
"arn:aws:s3:::*-deployments-mobilehub-*"
]
},
{
"Action": [
"s3:DeleteObject",
"s3:DeleteVersion",
"s3:DeleteObjectVersion",
"s3:GetObject",
"s3:GetObjectVersion",
"s3:PutObject",
"s3:PutObjectAcl"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::*-userfiles-mobilehub-*/*",
"arn:aws:s3:::*-contentdelivery-mobilehub-*/*",
"arn:aws:s3:::*-hosting-mobilehub-*/*",
"arn:aws:s3:::*-deployments-mobilehub-*/*"
]
},
{
"Action": [
"lambda:AddPermission",
"lambda:CreateAlias",
"lambda:DeleteAlias",
"lambda:UpdateAlias",
"lambda:GetFunctionConfiguration",
"lambda:GetPolicy",
"lambda:RemovePermission",
"lambda:UpdateFunctionCode",
"lambda:UpdateFunctionConfiguration"
],
"Effect": "Allow",
"Resource": [
"arn:aws:lambda:*:*:function:*-mobilehub-*"
]
},
{
"Action": [
"iam:CreateRole",
"iam:DeleteRole",
"iam:DeleteRolePolicy",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:ListRolePolicies",
"iam:PassRole",
"iam:PutRolePolicy",
"iam:UpdateAssumeRolePolicy",
"iam:AttachRolePolicy",
"iam:DetachRolePolicy"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/*_unauth_MOBILEHUB_*",
"arn:aws:iam::*:role/*_auth_MOBILEHUB_*",
"arn:aws:iam::*:role/*_consolepush_MOBILEHUB_*",
"arn:aws:iam::*:role/*_lambdaexecutionrole_MOBILEHUB_*",
"arn:aws:iam::*:role/*_smsverification_MOBILEHUB_*",
"arn:aws:iam::*:role/*_botexecutionrole_MOBILEHUB_*",
"arn:aws:iam::*:role/pinpoint-events",
"arn:aws:iam::*:role/MOBILEHUB-*-lambdaexecution*",
"arn:aws:iam::*:role/MobileHub_Service_Role"
]
},
{
"Action": [
"iam:CreateServiceLinkedRole",
"iam:GetRole"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots"
]
},
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:/aws/mobilehub/*:log-stream:*"
]
},
{
"Action": [
"iam:ListAttachedRolePolicies"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/MobileHub_Service_Role"
]
},
{
"Action": [
"cloudformation:CreateStack",
"cloudformation:DeleteStack",
"cloudformation:DescribeStacks",
"cloudformation:DescribeStackEvents",
"cloudformation:DescribeStackResource",
"cloudformation:GetTemplate",
"cloudformation:ListStackResources",
"cloudformation:ListStacks",
"cloudformation:UpdateStack"
],
"Effect": "Allow",
"Resource": [
"arn:aws:cloudformation:*:*:stack/MOBILEHUB-*"
]
},
{
"Action": [
"apigateway:DELETE",
"apigateway:GET",
"apigateway:HEAD",
"apigateway:OPTIONS",
"apigateway:PATCH",
"apigateway:POST",
"apigateway:PUT"
],
"Effect": "Allow",
"Resource": [
"arn:aws:apigateway:*::/restapis*"
]
},
{
"Action": [
"cognito-idp:DeleteUserPool",
"cognito-idp:DescribeUserPool",
"cognito-idp:CreateUserPoolClient",
"cognito-idp:DescribeUserPoolClient",
"cognito-idp:DeleteUserPoolClient"
],
"Effect": "Allow",
"Resource": [
"arn:aws:cognito-idp:*:*:userpool/*"
]
},
{
"Action": [
"mobiletargeting:UpdateApnsChannel",
"mobiletargeting:UpdateApnsSandboxChannel",
"mobiletargeting:UpdateEmailChannel",
"mobiletargeting:UpdateGcmChannel",
"mobiletargeting:UpdateSmsChannel",
"mobiletargeting:DeleteApnsChannel",
"mobiletargeting:DeleteApnsSandboxChannel",
"mobiletargeting:DeleteEmailChannel",
"mobiletargeting:DeleteGcmChannel",
"mobiletargeting:DeleteSmsChannel"
],
"Effect": "Allow",
"Resource": [
"arn:aws:mobiletargeting:*:*:apps/*/channels/*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIUHPQXBDZUWOP3PSK",
"PolicyName": "AWSMobileHub_ServiceUseOnly",
"UpdateDate": "2017-06-02T23:35:49+00:00",
"VersionId": "v23"
},
"AWSOpsWorksCMInstanceProfileRole": {
"Arn": "arn:aws:iam::aws:policy/AWSOpsWorksCMInstanceProfileRole",
"AttachmentCount": 0,
"CreateDate": "2016-11-24T09:48:22+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"s3:AbortMultipartUpload",
"s3:DeleteObject",
"s3:GetObject",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:ListMultipartUploadParts",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::aws-opsworks-cm-*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAICSU3OSHCURP2WIZW",
"PolicyName": "AWSOpsWorksCMInstanceProfileRole",
"UpdateDate": "2016-11-24T09:48:22+00:00",
"VersionId": "v1"
},
"AWSOpsWorksCMServiceRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSOpsWorksCMServiceRole",
"AttachmentCount": 0,
"CreateDate": "2017-04-03T12:00:07+00:00",
"DefaultVersionId": "v6",
"Document": {
"Statement": [
{
"Action": [
"s3:CreateBucket",
"s3:DeleteObject",
"s3:DeleteBucket",
"s3:GetObject",
"s3:HeadBucket",
"s3:ListBucket",
"s3:ListObjects",
"s3:PutBucketPolicy"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::aws-opsworks-cm-*"
]
},
{
"Action": [
"ssm:DescribeInstanceInformation",
"ssm:GetCommandInvocation",
"ssm:ListCommandInvocations",
"ssm:ListCommands"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ssm:SendCommand"
],
"Condition": {
"StringLike": {
"ssm:resourceTag/aws:cloudformation:stack-name": "aws-opsworks-cm-*"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ssm:SendCommand"
],
"Effect": "Allow",
"Resource": [
"arn:aws:ssm:*::document/*",
"arn:aws:s3:::aws-opsworks-cm-*"
]
},
{
"Action": [
"ec2:AllocateAddress",
"ec2:AssociateAddress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateImage",
"ec2:CreateSecurityGroup",
"ec2:CreateSnapshot",
"ec2:CreateTags",
"ec2:DeleteSecurityGroup",
"ec2:DeleteSnapshot",
"ec2:DeregisterImage",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DescribeImages",
"ec2:DescribeInstanceStatus",
"ec2:DescribeInstances",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSnapshots",
"ec2:DescribeSubnets",
"ec2:DisassociateAddress",
"ec2:ReleaseAddress",
"ec2:RunInstances",
"ec2:StopInstances"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:TerminateInstances"
],
"Condition": {
"StringLike": {
"ec2:ResourceTag/aws:cloudformation:stack-name": "aws-opsworks-cm-*"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"cloudformation:CreateStack",
"cloudformation:DeleteStack",
"cloudformation:DescribeStackEvents",
"cloudformation:DescribeStackResources",
"cloudformation:DescribeStacks",
"cloudformation:UpdateStack"
],
"Effect": "Allow",
"Resource": [
"arn:aws:cloudformation:*:*:stack/aws-opsworks-cm-*"
]
},
{
"Action": [
"iam:PassRole"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/aws-opsworks-cm-*",
"arn:aws:iam::*:role/service-role/aws-opsworks-cm-*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJ6I6MPGJE62URSHCO",
"PolicyName": "AWSOpsWorksCMServiceRole",
"UpdateDate": "2017-04-03T12:00:07+00:00",
"VersionId": "v6"
},
"AWSOpsWorksCloudWatchLogs": {
"Arn": "arn:aws:iam::aws:policy/AWSOpsWorksCloudWatchLogs",
"AttachmentCount": 0,
"CreateDate": "2017-03-30T17:47:19+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"logs:DescribeLogStreams"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJXFIK7WABAY5CPXM4",
"PolicyName": "AWSOpsWorksCloudWatchLogs",
"UpdateDate": "2017-03-30T17:47:19+00:00",
"VersionId": "v1"
},
"AWSOpsWorksFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSOpsWorksFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:48+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"opsworks:*",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeKeyPairs",
"ec2:DescribeSecurityGroups",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"elasticloadbalancing:DescribeInstanceHealth",
"elasticloadbalancing:DescribeLoadBalancers",
"iam:GetRolePolicy",
"iam:ListInstanceProfiles",
"iam:ListRoles",
"iam:ListUsers",
"iam:PassRole"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAICN26VXMXASXKOQCG",
"PolicyName": "AWSOpsWorksFullAccess",
"UpdateDate": "2015-02-06T18:40:48+00:00",
"VersionId": "v1"
},
"AWSOpsWorksInstanceRegistration": {
"Arn": "arn:aws:iam::aws:policy/AWSOpsWorksInstanceRegistration",
"AttachmentCount": 0,
"CreateDate": "2016-06-03T14:23:15+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"opsworks:DescribeStackProvisioningParameters",
"opsworks:DescribeStacks",
"opsworks:RegisterInstance"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJG3LCPVNI4WDZCIMU",
"PolicyName": "AWSOpsWorksInstanceRegistration",
"UpdateDate": "2016-06-03T14:23:15+00:00",
"VersionId": "v1"
},
"AWSOpsWorksRegisterCLI": {
"Arn": "arn:aws:iam::aws:policy/AWSOpsWorksRegisterCLI",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:49+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"opsworks:AssignInstance",
"opsworks:CreateStack",
"opsworks:CreateLayer",
"opsworks:DeregisterInstance",
"opsworks:DescribeInstances",
"opsworks:DescribeStackProvisioningParameters",
"opsworks:DescribeStacks",
"opsworks:UnassignInstance"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:DescribeInstances"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:AddUserToGroup",
"iam:CreateAccessKey",
"iam:CreateGroup",
"iam:CreateUser",
"iam:ListInstanceProfiles",
"iam:PassRole",
"iam:PutUserPolicy"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ3AB5ZBFPCQGTVDU4",
"PolicyName": "AWSOpsWorksRegisterCLI",
"UpdateDate": "2015-02-06T18:40:49+00:00",
"VersionId": "v1"
},
"AWSOpsWorksRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSOpsWorksRole",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:27+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:GetMetricStatistics",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeInstances",
"ec2:DescribeKeyPairs",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"elasticloadbalancing:DescribeInstanceHealth",
"elasticloadbalancing:DescribeLoadBalancers",
"iam:GetRolePolicy",
"iam:ListInstanceProfiles",
"iam:ListRoles",
"iam:ListUsers",
"iam:PassRole",
"opsworks:*",
"rds:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIDUTMOKHJFAPJV45W",
"PolicyName": "AWSOpsWorksRole",
"UpdateDate": "2015-02-06T18:41:27+00:00",
"VersionId": "v1"
},
"AWSQuickSightDescribeRDS": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSQuickSightDescribeRDS",
"AttachmentCount": 0,
"CreateDate": "2015-11-10T23:24:50+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"rds:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJU5J6OAMCJD3OO76O",
"PolicyName": "AWSQuickSightDescribeRDS",
"UpdateDate": "2015-11-10T23:24:50+00:00",
"VersionId": "v1"
},
"AWSQuickSightDescribeRedshift": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSQuickSightDescribeRedshift",
"AttachmentCount": 0,
"CreateDate": "2015-11-10T23:25:01+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"redshift:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJFEM6MLSLTW4ZNBW2",
"PolicyName": "AWSQuickSightDescribeRedshift",
"UpdateDate": "2015-11-10T23:25:01+00:00",
"VersionId": "v1"
},
"AWSQuickSightListIAM": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSQuickSightListIAM",
"AttachmentCount": 0,
"CreateDate": "2015-11-10T23:25:07+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"iam:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAI3CH5UUWZN4EKGILO",
"PolicyName": "AWSQuickSightListIAM",
"UpdateDate": "2015-11-10T23:25:07+00:00",
"VersionId": "v1"
},
"AWSQuicksightAthenaAccess": {
"Arn": "arn:aws:iam::aws:policy/service-role/AWSQuicksightAthenaAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-11T23:37:32+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"athena:BatchGetQueryExecution",
"athena:CancelQueryExecution",
"athena:GetCatalogs",
"athena:GetExecutionEngine",
"athena:GetExecutionEngines",
"athena:GetNamespace",
"athena:GetNamespaces",
"athena:GetQueryExecution",
"athena:GetQueryExecutions",
"athena:GetQueryResults",
"athena:GetTable",
"athena:GetTables",
"athena:ListQueryExecutions",
"athena:RunQuery",
"athena:StartQueryExecution",
"athena:StopQueryExecution"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"glue:CreateDatabase",
"glue:DeleteDatabase",
"glue:GetDatabase",
"glue:GetDatabases",
"glue:UpdateDatabase",
"glue:CreateTable",
"glue:DeleteTable",
"glue:BatchDeleteTable",
"glue:UpdateTable",
"glue:GetTable",
"glue:GetTables",
"glue:BatchCreatePartition",
"glue:CreatePartition",
"glue:DeletePartition",
"glue:BatchDeletePartition",
"glue:UpdatePartition",
"glue:GetPartition",
"glue:GetPartitions",
"glue:BatchGetPartition"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:ListMultipartUploadParts",
"s3:AbortMultipartUpload",
"s3:CreateBucket",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::aws-athena-query-results-*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAI4JB77JXFQXDWNRPM",
"PolicyName": "AWSQuicksightAthenaAccess",
"UpdateDate": "2017-08-11T23:37:32+00:00",
"VersionId": "v3"
},
"AWSStepFunctionsConsoleFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSStepFunctionsConsoleFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-01-12T00:19:34+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": "states:*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "iam:ListRoles",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "iam:PassRole",
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/service-role/StatesExecutionRole*"
},
{
"Action": "lambda:ListFunctions",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJIYC52YWRX6OSMJWK",
"PolicyName": "AWSStepFunctionsConsoleFullAccess",
"UpdateDate": "2017-01-12T00:19:34+00:00",
"VersionId": "v2"
},
"AWSStepFunctionsFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSStepFunctionsFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-01-11T21:51:32+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "states:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJXKA6VP3UFBVHDPPA",
"PolicyName": "AWSStepFunctionsFullAccess",
"UpdateDate": "2017-01-11T21:51:32+00:00",
"VersionId": "v1"
},
"AWSStepFunctionsReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSStepFunctionsReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-01-11T21:46:19+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"states:ListStateMachines",
"states:ListActivities",
"states:DescribeStateMachine",
"states:ListExecutions",
"states:DescribeExecution",
"states:GetExecutionHistory",
"states:DescribeActivity"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJONHB2TJQDJPFW5TM",
"PolicyName": "AWSStepFunctionsReadOnlyAccess",
"UpdateDate": "2017-01-11T21:46:19+00:00",
"VersionId": "v1"
},
"AWSStorageGatewayFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSStorageGatewayFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:09+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"storagegateway:*"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:DescribeSnapshots",
"ec2:DeleteSnapshot"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJG5SSPAVOGK3SIDGU",
"PolicyName": "AWSStorageGatewayFullAccess",
"UpdateDate": "2015-02-06T18:41:09+00:00",
"VersionId": "v1"
},
"AWSStorageGatewayReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSStorageGatewayReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:10+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"storagegateway:List*",
"storagegateway:Describe*"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:DescribeSnapshots"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIFKCTUVOPD5NICXJK",
"PolicyName": "AWSStorageGatewayReadOnlyAccess",
"UpdateDate": "2015-02-06T18:41:10+00:00",
"VersionId": "v1"
},
"AWSSupportAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSSupportAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:11+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"support:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJSNKQX2OW67GF4S7E",
"PolicyName": "AWSSupportAccess",
"UpdateDate": "2015-02-06T18:41:11+00:00",
"VersionId": "v1"
},
"AWSWAFFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSWAFFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-07T21:33:25+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"waf:*",
"waf-regional:*",
"elasticloadbalancing:SetWebACL"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJMIKIAFXZEGOLRH7C",
"PolicyName": "AWSWAFFullAccess",
"UpdateDate": "2016-12-07T21:33:25+00:00",
"VersionId": "v2"
},
"AWSWAFReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSWAFReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-07T21:30:54+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"waf:Get*",
"waf:List*",
"waf-regional:Get*",
"waf-regional:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAINZVDMX2SBF7EU2OC",
"PolicyName": "AWSWAFReadOnlyAccess",
"UpdateDate": "2016-12-07T21:30:54+00:00",
"VersionId": "v2"
},
"AWSXrayFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSXrayFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-01T18:30:55+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"xray:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJQBYG45NSJMVQDB2K",
"PolicyName": "AWSXrayFullAccess",
"UpdateDate": "2016-12-01T18:30:55+00:00",
"VersionId": "v1"
},
"AWSXrayReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSXrayReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-01T18:27:02+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"xray:BatchGetTraces",
"xray:GetServiceGraph",
"xray:GetTraceGraph",
"xray:GetTraceSummaries"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIH4OFXWPS6ZX6OPGQ",
"PolicyName": "AWSXrayReadOnlyAccess",
"UpdateDate": "2016-12-01T18:27:02+00:00",
"VersionId": "v1"
},
"AWSXrayWriteOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-01T18:19:53+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"xray:PutTraceSegments",
"xray:PutTelemetryRecords"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIAACM4LMYSRGBCTM6",
"PolicyName": "AWSXrayWriteOnlyAccess",
"UpdateDate": "2016-12-01T18:19:53+00:00",
"VersionId": "v1"
},
"AdministratorAccess": {
"Arn": "arn:aws:iam::aws:policy/AdministratorAccess",
"AttachmentCount": 3,
"CreateDate": "2015-02-06T18:39:46+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIWMBCKSKIEE64ZLYK",
"PolicyName": "AdministratorAccess",
"UpdateDate": "2015-02-06T18:39:46+00:00",
"VersionId": "v1"
},
"AmazonAPIGatewayAdministrator": {
"Arn": "arn:aws:iam::aws:policy/AmazonAPIGatewayAdministrator",
"AttachmentCount": 0,
"CreateDate": "2015-07-09T17:34:45+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"apigateway:*"
],
"Effect": "Allow",
"Resource": "arn:aws:apigateway:*::/*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ4PT6VY5NLKTNUYSI",
"PolicyName": "AmazonAPIGatewayAdministrator",
"UpdateDate": "2015-07-09T17:34:45+00:00",
"VersionId": "v1"
},
"AmazonAPIGatewayInvokeFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonAPIGatewayInvokeFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-07-09T17:36:12+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"execute-api:Invoke"
],
"Effect": "Allow",
"Resource": "arn:aws:execute-api:*:*:*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIIWAX2NOOQJ4AIEQ6",
"PolicyName": "AmazonAPIGatewayInvokeFullAccess",
"UpdateDate": "2015-07-09T17:36:12+00:00",
"VersionId": "v1"
},
"AmazonAPIGatewayPushToCloudWatchLogs": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonAPIGatewayPushToCloudWatchLogs",
"AttachmentCount": 0,
"CreateDate": "2015-11-11T23:41:46+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"logs:FilterLogEvents"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIK4GFO7HLKYN64ASK",
"PolicyName": "AmazonAPIGatewayPushToCloudWatchLogs",
"UpdateDate": "2015-11-11T23:41:46+00:00",
"VersionId": "v1"
},
"AmazonAppStreamFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonAppStreamFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-09-07T23:56:23+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"appstream:*"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"application-autoscaling:DeleteScalingPolicy",
"application-autoscaling:DescribeScalableTargets",
"application-autoscaling:DescribeScalingPolicies",
"application-autoscaling:PutScalingPolicy",
"application-autoscaling:RegisterScalableTarget"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"cloudwatch:DeleteAlarms",
"cloudwatch:DescribeAlarms",
"cloudwatch:GetMetricStatistics",
"cloudwatch:PutMetricAlarm"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "iam:ListRoles",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "iam:PassRole",
"Condition": {
"StringLike": {
"iam:PassedToService": "application-autoscaling.amazonaws.com"
}
},
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/service-role/ApplicationAutoScalingForAmazonAppStreamAccess"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJLZZXU2YQVGL4QDNC",
"PolicyName": "AmazonAppStreamFullAccess",
"UpdateDate": "2017-09-07T23:56:23+00:00",
"VersionId": "v2"
},
"AmazonAppStreamReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonAppStreamReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-07T21:00:06+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"appstream:Get*",
"appstream:List*",
"appstream:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJXIFDGB4VBX23DX7K",
"PolicyName": "AmazonAppStreamReadOnlyAccess",
"UpdateDate": "2016-12-07T21:00:06+00:00",
"VersionId": "v2"
},
"AmazonAppStreamServiceAccess": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonAppStreamServiceAccess",
"AttachmentCount": 0,
"CreateDate": "2017-05-23T23:00:47+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeSubnets",
"ec2:DescribeAvailabilityZones",
"ec2:CreateNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DeleteNetworkInterface",
"ec2:DescribeSubnets",
"ec2:AssociateAddress",
"ec2:DisassociateAddress",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
"s3:GetObjectVersion",
"s3:DeleteObjectVersion",
"s3:PutBucketPolicy"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::appstream2-36fb080bb8-*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAISBRZ7LMMCBYEF3SE",
"PolicyName": "AmazonAppStreamServiceAccess",
"UpdateDate": "2017-05-23T23:00:47+00:00",
"VersionId": "v3"
},
"AmazonAthenaFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonAthenaFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-09-13T00:13:48+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"athena:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"glue:CreateDatabase",
"glue:DeleteDatabase",
"glue:GetDatabase",
"glue:GetDatabases",
"glue:UpdateDatabase",
"glue:CreateTable",
"glue:DeleteTable",
"glue:BatchDeleteTable",
"glue:UpdateTable",
"glue:GetTable",
"glue:GetTables",
"glue:BatchCreatePartition",
"glue:CreatePartition",
"glue:DeletePartition",
"glue:BatchDeletePartition",
"glue:UpdatePartition",
"glue:GetPartition",
"glue:GetPartitions",
"glue:BatchGetPartition"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
"s3:ListBucketMultipartUploads",
"s3:ListMultipartUploadParts",
"s3:AbortMultipartUpload",
"s3:CreateBucket",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::aws-athena-query-results-*"
]
},
{
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::athena-examples*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIPJMLMD4C7RYZ6XCK",
"PolicyName": "AmazonAthenaFullAccess",
"UpdateDate": "2017-09-13T00:13:48+00:00",
"VersionId": "v3"
},
"AmazonCloudDirectoryFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonCloudDirectoryFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-02-25T00:41:39+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"clouddirectory:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJG3XQK77ATFLCF2CK",
"PolicyName": "AmazonCloudDirectoryFullAccess",
"UpdateDate": "2017-02-25T00:41:39+00:00",
"VersionId": "v1"
},
"AmazonCloudDirectoryReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonCloudDirectoryReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-02-28T23:42:06+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"clouddirectory:List*",
"clouddirectory:Get*",
"clouddirectory:LookupPolicy",
"clouddirectory:BatchRead"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAICMSZQGR3O62KMD6M",
"PolicyName": "AmazonCloudDirectoryReadOnlyAccess",
"UpdateDate": "2017-02-28T23:42:06+00:00",
"VersionId": "v1"
},
"AmazonCognitoDeveloperAuthenticatedIdentities": {
"Arn": "arn:aws:iam::aws:policy/AmazonCognitoDeveloperAuthenticatedIdentities",
"AttachmentCount": 0,
"CreateDate": "2015-03-24T17:22:23+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cognito-identity:GetOpenIdTokenForDeveloperIdentity",
"cognito-identity:LookupDeveloperIdentity",
"cognito-identity:MergeDeveloperIdentities",
"cognito-identity:UnlinkDeveloperIdentity"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIQOKZ5BGKLCMTXH4W",
"PolicyName": "AmazonCognitoDeveloperAuthenticatedIdentities",
"UpdateDate": "2015-03-24T17:22:23+00:00",
"VersionId": "v1"
},
"AmazonCognitoPowerUser": {
"Arn": "arn:aws:iam::aws:policy/AmazonCognitoPowerUser",
"AttachmentCount": 0,
"CreateDate": "2016-06-02T16:57:56+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"cognito-identity:*",
"cognito-idp:*",
"cognito-sync:*",
"iam:ListRoles",
"iam:ListOpenIdConnectProviders",
"sns:ListPlatformApplications"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJKW5H2HNCPGCYGR6Y",
"PolicyName": "AmazonCognitoPowerUser",
"UpdateDate": "2016-06-02T16:57:56+00:00",
"VersionId": "v2"
},
"AmazonCognitoReadOnly": {
"Arn": "arn:aws:iam::aws:policy/AmazonCognitoReadOnly",
"AttachmentCount": 0,
"CreateDate": "2016-06-02T17:30:24+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"cognito-identity:Describe*",
"cognito-identity:Get*",
"cognito-identity:List*",
"cognito-idp:Describe*",
"cognito-idp:AdminGetUser",
"cognito-idp:List*",
"cognito-sync:Describe*",
"cognito-sync:Get*",
"cognito-sync:List*",
"iam:ListOpenIdConnectProviders",
"iam:ListRoles",
"sns:ListPlatformApplications"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJBFTRZD2GQGJHSVQK",
"PolicyName": "AmazonCognitoReadOnly",
"UpdateDate": "2016-06-02T17:30:24+00:00",
"VersionId": "v2"
},
"AmazonDMSCloudWatchLogsRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonDMSCloudWatchLogsRole",
"AttachmentCount": 0,
"CreateDate": "2016-01-07T23:44:53+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"logs:DescribeLogGroups"
],
"Effect": "Allow",
"Resource": [
"*"
],
"Sid": "AllowDescribeOnAllLogGroups"
},
{
"Action": [
"logs:DescribeLogStreams"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:dms-tasks-*"
],
"Sid": "AllowDescribeOfAllLogStreamsOnDmsTasksLogGroup"
},
{
"Action": [
"logs:CreateLogGroup"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:dms-tasks-*"
],
"Sid": "AllowCreationOfDmsTasksLogGroups"
},
{
"Action": [
"logs:CreateLogStream"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:dms-tasks-*:log-stream:dms-task-*"
],
"Sid": "AllowCreationOfDmsTaskLogStream"
},
{
"Action": [
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:dms-tasks-*:log-stream:dms-task-*"
],
"Sid": "AllowUploadOfLogEventsToDmsTaskLogStream"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJBG7UXZZXUJD3TDJE",
"PolicyName": "AmazonDMSCloudWatchLogsRole",
"UpdateDate": "2016-01-07T23:44:53+00:00",
"VersionId": "v1"
},
"AmazonDMSRedshiftS3Role": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonDMSRedshiftS3Role",
"AttachmentCount": 0,
"CreateDate": "2016-04-20T17:05:56+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"s3:CreateBucket",
"s3:ListBucket",
"s3:DeleteBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
"s3:GetObjectVersion",
"s3:GetBucketPolicy",
"s3:PutBucketPolicy",
"s3:DeleteBucketPolicy"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::dms-*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAI3CCUQ4U5WNC5F6B6",
"PolicyName": "AmazonDMSRedshiftS3Role",
"UpdateDate": "2016-04-20T17:05:56+00:00",
"VersionId": "v1"
},
"AmazonDMSVPCManagementRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonDMSVPCManagementRole",
"AttachmentCount": 0,
"CreateDate": "2016-05-23T16:29:57+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"ec2:CreateNetworkInterface",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeInternetGateways",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:DeleteNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJHKIGMBQI4AEFFSYO",
"PolicyName": "AmazonDMSVPCManagementRole",
"UpdateDate": "2016-05-23T16:29:57+00:00",
"VersionId": "v3"
},
"AmazonDRSVPCManagement": {
"Arn": "arn:aws:iam::aws:policy/AmazonDRSVPCManagement",
"AttachmentCount": 0,
"CreateDate": "2015-09-02T00:09:20+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateNetworkInterface",
"ec2:CreateSecurityGroup",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeInternetGateways",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcs",
"ec2:DeleteNetworkInterface",
"ec2:DeleteSecurityGroup",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:RevokeSecurityGroupIngress"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJPXIBTTZMBEFEX6UA",
"PolicyName": "AmazonDRSVPCManagement",
"UpdateDate": "2015-09-02T00:09:20+00:00",
"VersionId": "v1"
},
"AmazonDynamoDBFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-06-28T23:23:34+00:00",
"DefaultVersionId": "v5",
"Document": {
"Statement": [
{
"Action": [
"dynamodb:*",
"dax:*",
"application-autoscaling:DeleteScalingPolicy",
"application-autoscaling:DeregisterScalableTarget",
"application-autoscaling:DescribeScalableTargets",
"application-autoscaling:DescribeScalingActivities",
"application-autoscaling:DescribeScalingPolicies",
"application-autoscaling:PutScalingPolicy",
"application-autoscaling:RegisterScalableTarget",
"cloudwatch:DeleteAlarms",
"cloudwatch:DescribeAlarmHistory",
"cloudwatch:DescribeAlarms",
"cloudwatch:DescribeAlarmsForMetric",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"cloudwatch:PutMetricAlarm",
"datapipeline:ActivatePipeline",
"datapipeline:CreatePipeline",
"datapipeline:DeletePipeline",
"datapipeline:DescribeObjects",
"datapipeline:DescribePipelines",
"datapipeline:GetPipelineDefinition",
"datapipeline:ListPipelines",
"datapipeline:PutPipelineDefinition",
"datapipeline:QueryObjects",
"ec2:DescribeVpcs",
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups",
"iam:GetRole",
"iam:ListRoles",
"sns:CreateTopic",
"sns:DeleteTopic",
"sns:ListSubscriptions",
"sns:ListSubscriptionsByTopic",
"sns:ListTopics",
"sns:Subscribe",
"sns:Unsubscribe",
"sns:SetTopicAttributes",
"lambda:CreateFunction",
"lambda:ListFunctions",
"lambda:ListEventSourceMappings",
"lambda:CreateEventSourceMapping",
"lambda:DeleteEventSourceMapping",
"lambda:GetFunctionConfiguration",
"lambda:DeleteFunction"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"iam:PassRole"
],
"Condition": {
"StringLike": {
"iam:PassedToService": [
"application-autoscaling.amazonaws.com",
"dax.amazonaws.com"
]
}
},
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAINUGF2JSOSUY76KYA",
"PolicyName": "AmazonDynamoDBFullAccess",
"UpdateDate": "2017-06-28T23:23:34+00:00",
"VersionId": "v5"
},
"AmazonDynamoDBFullAccesswithDataPipeline": {
"Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccesswithDataPipeline",
"AttachmentCount": 0,
"CreateDate": "2015-11-12T02:17:42+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:DeleteAlarms",
"cloudwatch:DescribeAlarmHistory",
"cloudwatch:DescribeAlarms",
"cloudwatch:DescribeAlarmsForMetric",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"cloudwatch:PutMetricAlarm",
"dynamodb:*",
"sns:CreateTopic",
"sns:DeleteTopic",
"sns:ListSubscriptions",
"sns:ListSubscriptionsByTopic",
"sns:ListTopics",
"sns:Subscribe",
"sns:Unsubscribe",
"sns:SetTopicAttributes"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "DDBConsole"
},
{
"Action": [
"lambda:*",
"iam:ListRoles"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "DDBConsoleTriggers"
},
{
"Action": [
"datapipeline:*",
"iam:ListRoles"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "DDBConsoleImportExport"
},
{
"Action": [
"iam:GetRolePolicy",
"iam:PassRole"
],
"Effect": "Allow",
"Resource": [
"*"
],
"Sid": "IAMEDPRoles"
},
{
"Action": [
"ec2:CreateTags",
"ec2:DescribeInstances",
"ec2:RunInstances",
"ec2:StartInstances",
"ec2:StopInstances",
"ec2:TerminateInstances",
"elasticmapreduce:*",
"datapipeline:*"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "EMR"
},
{
"Action": [
"s3:DeleteObject",
"s3:Get*",
"s3:List*",
"s3:Put*"
],
"Effect": "Allow",
"Resource": [
"*"
],
"Sid": "S3"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ3ORT7KDISSXGHJXA",
"PolicyName": "AmazonDynamoDBFullAccesswithDataPipeline",
"UpdateDate": "2015-11-12T02:17:42+00:00",
"VersionId": "v2"
},
"AmazonDynamoDBReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-06-12T21:11:40+00:00",
"DefaultVersionId": "v5",
"Document": {
"Statement": [
{
"Action": [
"application-autoscaling:DescribeScalableTargets",
"application-autoscaling:DescribeScalingActivities",
"application-autoscaling:DescribeScalingPolicies",
"cloudwatch:DescribeAlarmHistory",
"cloudwatch:DescribeAlarms",
"cloudwatch:DescribeAlarmsForMetric",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"datapipeline:DescribeObjects",
"datapipeline:DescribePipelines",
"datapipeline:GetPipelineDefinition",
"datapipeline:ListPipelines",
"datapipeline:QueryObjects",
"dynamodb:BatchGetItem",
"dynamodb:DescribeTable",
"dynamodb:GetItem",
"dynamodb:ListTables",
"dynamodb:Query",
"dynamodb:Scan",
"dynamodb:DescribeReservedCapacity",
"dynamodb:DescribeReservedCapacityOfferings",
"dynamodb:ListTagsOfResource",
"dynamodb:DescribeTimeToLive",
"dynamodb:DescribeLimits",
"iam:GetRole",
"iam:ListRoles",
"sns:ListSubscriptionsByTopic",
"sns:ListTopics",
"lambda:ListFunctions",
"lambda:ListEventSourceMappings",
"lambda:GetFunctionConfiguration"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIY2XFNA232XJ6J7X2",
"PolicyName": "AmazonDynamoDBReadOnlyAccess",
"UpdateDate": "2017-06-12T21:11:40+00:00",
"VersionId": "v5"
},
"AmazonEC2ContainerRegistryFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-12-21T17:06:48+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ecr:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIESRL7KD7IIVF6V4W",
"PolicyName": "AmazonEC2ContainerRegistryFullAccess",
"UpdateDate": "2015-12-21T17:06:48+00:00",
"VersionId": "v1"
},
"AmazonEC2ContainerRegistryPowerUser": {
"Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPowerUser",
"AttachmentCount": 0,
"CreateDate": "2016-10-11T22:28:07+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:DescribeImages",
"ecr:BatchGetImage",
"ecr:InitiateLayerUpload",
"ecr:UploadLayerPart",
"ecr:CompleteLayerUpload",
"ecr:PutImage"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJDNE5PIHROIBGGDDW",
"PolicyName": "AmazonEC2ContainerRegistryPowerUser",
"UpdateDate": "2016-10-11T22:28:07+00:00",
"VersionId": "v2"
},
"AmazonEC2ContainerRegistryReadOnly": {
"Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
"AttachmentCount": 0,
"CreateDate": "2016-10-11T22:08:43+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:DescribeImages",
"ecr:BatchGetImage"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIFYZPA37OOHVIH7KQ",
"PolicyName": "AmazonEC2ContainerRegistryReadOnly",
"UpdateDate": "2016-10-11T22:08:43+00:00",
"VersionId": "v2"
},
"AmazonEC2ContainerServiceAutoscaleRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceAutoscaleRole",
"AttachmentCount": 1,
"CreateDate": "2016-05-12T23:25:44+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ecs:DescribeServices",
"ecs:UpdateService"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"cloudwatch:DescribeAlarms"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIUAP3EGGGXXCPDQKK",
"PolicyName": "AmazonEC2ContainerServiceAutoscaleRole",
"UpdateDate": "2016-05-12T23:25:44+00:00",
"VersionId": "v1"
},
"AmazonEC2ContainerServiceEventsRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceEventsRole",
"AttachmentCount": 0,
"CreateDate": "2017-05-30T16:51:35+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ecs:RunTask"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAITKFNIUAG27VSYNZ4",
"PolicyName": "AmazonEC2ContainerServiceEventsRole",
"UpdateDate": "2017-05-30T16:51:35+00:00",
"VersionId": "v1"
},
"AmazonEC2ContainerServiceFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerServiceFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-06-08T00:18:56+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"autoscaling:Describe*",
"autoscaling:UpdateAutoScalingGroup",
"cloudformation:CreateStack",
"cloudformation:DeleteStack",
"cloudformation:DescribeStack*",
"cloudformation:UpdateStack",
"cloudwatch:GetMetricStatistics",
"ec2:Describe*",
"elasticloadbalancing:*",
"ecs:*",
"events:DescribeRule",
"events:DeleteRule",
"events:ListRuleNamesByTarget",
"events:ListTargetsByRule",
"events:PutRule",
"events:PutTargets",
"events:RemoveTargets",
"iam:ListInstanceProfiles",
"iam:ListRoles",
"iam:PassRole"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJALOYVTPDZEMIACSM",
"PolicyName": "AmazonEC2ContainerServiceFullAccess",
"UpdateDate": "2017-06-08T00:18:56+00:00",
"VersionId": "v4"
},
"AmazonEC2ContainerServiceRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole",
"AttachmentCount": 1,
"CreateDate": "2016-08-11T13:08:01+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"ec2:AuthorizeSecurityGroupIngress",
"ec2:Describe*",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:Describe*",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJO53W2XHNACG7V77Q",
"PolicyName": "AmazonEC2ContainerServiceRole",
"UpdateDate": "2016-08-11T13:08:01+00:00",
"VersionId": "v2"
},
"AmazonEC2ContainerServiceforEC2Role": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role",
"AttachmentCount": 1,
"CreateDate": "2017-05-17T23:09:13+00:00",
"DefaultVersionId": "v5",
"Document": {
"Statement": [
{
"Action": [
"ecs:CreateCluster",
"ecs:DeregisterContainerInstance",
"ecs:DiscoverPollEndpoint",
"ecs:Poll",
"ecs:RegisterContainerInstance",
"ecs:StartTelemetrySession",
"ecs:UpdateContainerInstancesState",
"ecs:Submit*",
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJLYJCVHC7TQHCSQDS",
"PolicyName": "AmazonEC2ContainerServiceforEC2Role",
"UpdateDate": "2017-05-17T23:09:13+00:00",
"VersionId": "v5"
},
"AmazonEC2FullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonEC2FullAccess",
"AttachmentCount": 1,
"CreateDate": "2015-02-06T18:40:15+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "ec2:*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "elasticloadbalancing:*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "cloudwatch:*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "autoscaling:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI3VAJF5ZCRZ7MCQE6",
"PolicyName": "AmazonEC2FullAccess",
"UpdateDate": "2015-02-06T18:40:15+00:00",
"VersionId": "v1"
},
"AmazonEC2ReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:17+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "ec2:Describe*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "elasticloadbalancing:Describe*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"cloudwatch:ListMetrics",
"cloudwatch:GetMetricStatistics",
"cloudwatch:Describe*"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "autoscaling:Describe*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIGDT4SV4GSETWTBZK",
"PolicyName": "AmazonEC2ReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:17+00:00",
"VersionId": "v1"
},
"AmazonEC2ReportsAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonEC2ReportsAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:16+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "ec2-reports:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIU6NBZVF2PCRW36ZW",
"PolicyName": "AmazonEC2ReportsAccess",
"UpdateDate": "2015-02-06T18:40:16+00:00",
"VersionId": "v1"
},
"AmazonEC2RoleforAWSCodeDeploy": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforAWSCodeDeploy",
"AttachmentCount": 0,
"CreateDate": "2017-03-20T17:14:10+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"s3:GetObject",
"s3:GetObjectVersion",
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIAZKXZ27TAJ4PVWGK",
"PolicyName": "AmazonEC2RoleforAWSCodeDeploy",
"UpdateDate": "2017-03-20T17:14:10+00:00",
"VersionId": "v2"
},
"AmazonEC2RoleforDataPipelineRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforDataPipelineRole",
"AttachmentCount": 0,
"CreateDate": "2016-02-22T17:24:05+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:*",
"datapipeline:*",
"dynamodb:*",
"ec2:Describe*",
"elasticmapreduce:AddJobFlowSteps",
"elasticmapreduce:Describe*",
"elasticmapreduce:ListInstance*",
"elasticmapreduce:ModifyInstanceGroups",
"rds:Describe*",
"redshift:DescribeClusters",
"redshift:DescribeClusterSecurityGroups",
"s3:*",
"sdb:*",
"sns:*",
"sqs:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJ3Z5I2WAJE5DN2J36",
"PolicyName": "AmazonEC2RoleforDataPipelineRole",
"UpdateDate": "2016-02-22T17:24:05+00:00",
"VersionId": "v3"
},
"AmazonEC2RoleforSSM": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM",
"AttachmentCount": 0,
"CreateDate": "2017-08-10T20:49:08+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"ssm:DescribeAssociation",
"ssm:GetDeployablePatchSnapshotForInstance",
"ssm:GetDocument",
"ssm:GetParameters",
"ssm:ListAssociations",
"ssm:ListInstanceAssociations",
"ssm:PutInventory",
"ssm:PutComplianceItems",
"ssm:UpdateAssociationStatus",
"ssm:UpdateInstanceAssociationStatus",
"ssm:UpdateInstanceInformation"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2messages:AcknowledgeMessage",
"ec2messages:DeleteMessage",
"ec2messages:FailMessage",
"ec2messages:GetEndpoint",
"ec2messages:GetMessages",
"ec2messages:SendReply"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"cloudwatch:PutMetricData"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:DescribeInstanceStatus"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ds:CreateComputer",
"ds:DescribeDirectories"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts",
"s3:ListBucketMultipartUploads"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::amazon-ssm-packages-*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAI6TL3SMY22S4KMMX6",
"PolicyName": "AmazonEC2RoleforSSM",
"UpdateDate": "2017-08-10T20:49:08+00:00",
"VersionId": "v4"
},
"AmazonEC2SpotFleetAutoscaleRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetAutoscaleRole",
"AttachmentCount": 0,
"CreateDate": "2016-08-19T18:27:22+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeSpotFleetRequests",
"ec2:ModifySpotFleetRequest"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"cloudwatch:DescribeAlarms"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIMFFRMIOBGDP2TAVE",
"PolicyName": "AmazonEC2SpotFleetAutoscaleRole",
"UpdateDate": "2016-08-19T18:27:22+00:00",
"VersionId": "v1"
},
"AmazonEC2SpotFleetRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetRole",
"AttachmentCount": 0,
"CreateDate": "2016-11-10T21:19:35+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeImages",
"ec2:DescribeSubnets",
"ec2:RequestSpotInstances",
"ec2:TerminateInstances",
"ec2:DescribeInstanceStatus",
"iam:PassRole"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIMRTKHWK7ESSNETSW",
"PolicyName": "AmazonEC2SpotFleetRole",
"UpdateDate": "2016-11-10T21:19:35+00:00",
"VersionId": "v3"
},
"AmazonEC2SpotFleetTaggingRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetTaggingRole",
"AttachmentCount": 0,
"CreateDate": "2017-07-26T19:10:35+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeImages",
"ec2:DescribeSubnets",
"ec2:RequestSpotInstances",
"ec2:TerminateInstances",
"ec2:DescribeInstanceStatus",
"ec2:CreateTags"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "iam:PassRole",
"Condition": {
"StringEquals": {
"iam:PassedToService": "ec2.amazonaws.com"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJ5U6UMLCEYLX5OLC4",
"PolicyName": "AmazonEC2SpotFleetTaggingRole",
"UpdateDate": "2017-07-26T19:10:35+00:00",
"VersionId": "v2"
},
"AmazonESFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonESFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-10-01T19:14:00+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"es:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJM6ZTCU24QL5PZCGC",
"PolicyName": "AmazonESFullAccess",
"UpdateDate": "2015-10-01T19:14:00+00:00",
"VersionId": "v1"
},
"AmazonESReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonESReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-10-01T19:18:24+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"es:Describe*",
"es:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJUDMRLOQ7FPAR46FQ",
"PolicyName": "AmazonESReadOnlyAccess",
"UpdateDate": "2015-10-01T19:18:24+00:00",
"VersionId": "v1"
},
"AmazonElastiCacheFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonElastiCacheFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:20+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "elasticache:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIA2V44CPHAUAAECKG",
"PolicyName": "AmazonElastiCacheFullAccess",
"UpdateDate": "2015-02-06T18:40:20+00:00",
"VersionId": "v1"
},
"AmazonElastiCacheReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonElastiCacheReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:21+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"elasticache:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIPDACSNQHSENWAKM2",
"PolicyName": "AmazonElastiCacheReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:21+00:00",
"VersionId": "v1"
},
"AmazonElasticFileSystemFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonElasticFileSystemFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T10:18:34+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeNetworkInterfaceAttribute",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcs",
"ec2:ModifyNetworkInterfaceAttribute",
"elasticfilesystem:*",
"kms:DescribeKey",
"kms:ListAliases"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJKXTMNVQGIDNCKPBC",
"PolicyName": "AmazonElasticFileSystemFullAccess",
"UpdateDate": "2017-08-14T10:18:34+00:00",
"VersionId": "v3"
},
"AmazonElasticFileSystemReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonElasticFileSystemReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T10:09:49+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeAvailabilityZones",
"ec2:DescribeNetworkInterfaceAttribute",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcs",
"elasticfilesystem:Describe*",
"kms:ListAliases"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIPN5S4NE5JJOKVC4Y",
"PolicyName": "AmazonElasticFileSystemReadOnlyAccess",
"UpdateDate": "2017-08-14T10:09:49+00:00",
"VersionId": "v3"
},
"AmazonElasticMapReduceFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonElasticMapReduceFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-09-20T19:27:37+00:00",
"DefaultVersionId": "v5",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:*",
"cloudformation:CreateStack",
"cloudformation:DescribeStackEvents",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:CancelSpotInstanceRequests",
"ec2:CreateRoute",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DeleteRoute",
"ec2:DeleteTags",
"ec2:DeleteSecurityGroup",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeAccountAttributes",
"ec2:DescribeInstances",
"ec2:DescribeKeyPairs",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSpotInstanceRequests",
"ec2:DescribeSpotPriceHistory",
"ec2:DescribeSubnets",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcs",
"ec2:DescribeRouteTables",
"ec2:DescribeNetworkAcls",
"ec2:CreateVpcEndpoint",
"ec2:ModifyImageAttribute",
"ec2:ModifyInstanceAttribute",
"ec2:RequestSpotInstances",
"ec2:RevokeSecurityGroupEgress",
"ec2:RunInstances",
"ec2:TerminateInstances",
"elasticmapreduce:*",
"iam:GetPolicy",
"iam:GetPolicyVersion",
"iam:ListRoles",
"iam:PassRole",
"kms:List*",
"s3:*",
"sdb:*",
"support:CreateCase",
"support:DescribeServices",
"support:DescribeSeverityLevels"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "iam:CreateServiceLinkedRole",
"Condition": {
"StringLike": {
"iam:AWSServiceName": "elasticmapreduce.amazonaws.com"
}
},
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/aws-service-role/elasticmapreduce.amazonaws.com/AWSServiceRoleForEMRCleanup"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIZP5JFP3AMSGINBB2",
"PolicyName": "AmazonElasticMapReduceFullAccess",
"UpdateDate": "2017-09-20T19:27:37+00:00",
"VersionId": "v5"
},
"AmazonElasticMapReduceReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonElasticMapReduceReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-05-22T23:00:19+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"elasticmapreduce:Describe*",
"elasticmapreduce:List*",
"elasticmapreduce:ViewEventsFromAllClustersInConsole",
"s3:GetObject",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"sdb:Select",
"cloudwatch:GetMetricStatistics"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIHP6NH2S6GYFCOINC",
"PolicyName": "AmazonElasticMapReduceReadOnlyAccess",
"UpdateDate": "2017-05-22T23:00:19+00:00",
"VersionId": "v2"
},
"AmazonElasticMapReduceRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole",
"AttachmentCount": 0,
"CreateDate": "2017-07-17T21:29:50+00:00",
"DefaultVersionId": "v8",
"Document": {
"Statement": [
{
"Action": [
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CancelSpotInstanceRequests",
"ec2:CreateNetworkInterface",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DeleteNetworkInterface",
"ec2:DeleteSecurityGroup",
"ec2:DeleteTags",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeAccountAttributes",
"ec2:DescribeDhcpOptions",
"ec2:DescribeImages",
"ec2:DescribeInstanceStatus",
"ec2:DescribeInstances",
"ec2:DescribeKeyPairs",
"ec2:DescribeNetworkAcls",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribePrefixLists",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSpotInstanceRequests",
"ec2:DescribeSpotPriceHistory",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcEndpoints",
"ec2:DescribeVpcEndpointServices",
"ec2:DescribeVpcs",
"ec2:DetachNetworkInterface",
"ec2:ModifyImageAttribute",
"ec2:ModifyInstanceAttribute",
"ec2:RequestSpotInstances",
"ec2:RevokeSecurityGroupEgress",
"ec2:RunInstances",
"ec2:TerminateInstances",
"ec2:DeleteVolume",
"ec2:DescribeVolumeStatus",
"ec2:DescribeVolumes",
"ec2:DetachVolume",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:ListInstanceProfiles",
"iam:ListRolePolicies",
"iam:PassRole",
"s3:CreateBucket",
"s3:Get*",
"s3:List*",
"sdb:BatchPutAttributes",
"sdb:Select",
"sqs:CreateQueue",
"sqs:Delete*",
"sqs:GetQueue*",
"sqs:PurgeQueue",
"sqs:ReceiveMessage",
"cloudwatch:PutMetricAlarm",
"cloudwatch:DescribeAlarms",
"cloudwatch:DeleteAlarms",
"application-autoscaling:RegisterScalableTarget",
"application-autoscaling:DeregisterScalableTarget",
"application-autoscaling:PutScalingPolicy",
"application-autoscaling:DeleteScalingPolicy",
"application-autoscaling:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIDI2BQT2LKXZG36TW",
"PolicyName": "AmazonElasticMapReduceRole",
"UpdateDate": "2017-07-17T21:29:50+00:00",
"VersionId": "v8"
},
"AmazonElasticMapReduceforAutoScalingRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforAutoScalingRole",
"AttachmentCount": 0,
"CreateDate": "2016-11-18T01:09:10+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:DescribeAlarms",
"elasticmapreduce:ListInstanceGroups",
"elasticmapreduce:ModifyInstanceGroups"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJSVXG6QHPE6VHDZ4Q",
"PolicyName": "AmazonElasticMapReduceforAutoScalingRole",
"UpdateDate": "2016-11-18T01:09:10+00:00",
"VersionId": "v1"
},
"AmazonElasticMapReduceforEC2Role": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role",
"AttachmentCount": 0,
"CreateDate": "2017-08-11T23:57:30+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:*",
"dynamodb:*",
"ec2:Describe*",
"elasticmapreduce:Describe*",
"elasticmapreduce:ListBootstrapActions",
"elasticmapreduce:ListClusters",
"elasticmapreduce:ListInstanceGroups",
"elasticmapreduce:ListInstances",
"elasticmapreduce:ListSteps",
"kinesis:CreateStream",
"kinesis:DeleteStream",
"kinesis:DescribeStream",
"kinesis:GetRecords",
"kinesis:GetShardIterator",
"kinesis:MergeShards",
"kinesis:PutRecord",
"kinesis:SplitShard",
"rds:Describe*",
"s3:*",
"sdb:*",
"sns:*",
"sqs:*",
"glue:CreateDatabase",
"glue:UpdateDatabase",
"glue:DeleteDatabase",
"glue:GetDatabase",
"glue:GetDatabases",
"glue:CreateTable",
"glue:UpdateTable",
"glue:DeleteTable",
"glue:GetTable",
"glue:GetTables",
"glue:GetTableVersions",
"glue:CreatePartition",
"glue:BatchCreatePartition",
"glue:UpdatePartition",
"glue:DeletePartition",
"glue:BatchDeletePartition",
"glue:GetPartition",
"glue:GetPartitions",
"glue:BatchGetPartition",
"glue:CreateUserDefinedFunction",
"glue:UpdateUserDefinedFunction",
"glue:DeleteUserDefinedFunction",
"glue:GetUserDefinedFunction",
"glue:GetUserDefinedFunctions"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIGALS5RCDLZLB3PGS",
"PolicyName": "AmazonElasticMapReduceforEC2Role",
"UpdateDate": "2017-08-11T23:57:30+00:00",
"VersionId": "v3"
},
"AmazonElasticTranscoderFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:24+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"elastictranscoder:*",
"cloudfront:*",
"s3:List*",
"s3:Put*",
"s3:Get*",
"s3:*MultipartUpload*",
"iam:CreateRole",
"iam:GetRolePolicy",
"iam:PassRole",
"iam:PutRolePolicy",
"iam:List*",
"sns:CreateTopic",
"sns:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ4D5OJU75P5ZJZVNY",
"PolicyName": "AmazonElasticTranscoderFullAccess",
"UpdateDate": "2015-02-06T18:40:24+00:00",
"VersionId": "v1"
},
"AmazonElasticTranscoderJobsSubmitter": {
"Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderJobsSubmitter",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:25+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"elastictranscoder:Read*",
"elastictranscoder:List*",
"elastictranscoder:*Job",
"elastictranscoder:*Preset",
"s3:List*",
"iam:List*",
"sns:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIN5WGARIKZ3E2UQOU",
"PolicyName": "AmazonElasticTranscoderJobsSubmitter",
"UpdateDate": "2015-02-06T18:40:25+00:00",
"VersionId": "v1"
},
"AmazonElasticTranscoderReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:26+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"elastictranscoder:Read*",
"elastictranscoder:List*",
"s3:List*",
"iam:List*",
"sns:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJGPP7GPMJRRJMEP3Q",
"PolicyName": "AmazonElasticTranscoderReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:26+00:00",
"VersionId": "v1"
},
"AmazonElasticTranscoderRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticTranscoderRole",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:26+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"s3:ListBucket",
"s3:Put*",
"s3:Get*",
"s3:*MultipartUpload*"
],
"Effect": "Allow",
"Resource": [
"*"
],
"Sid": "1"
},
{
"Action": [
"sns:Publish"
],
"Effect": "Allow",
"Resource": [
"*"
],
"Sid": "2"
},
{
"Action": [
"s3:*Policy*",
"sns:*Permission*",
"sns:*Delete*",
"s3:*Delete*",
"sns:*Remove*"
],
"Effect": "Deny",
"Resource": [
"*"
],
"Sid": "3"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJNW3WMKVXFJ2KPIQ2",
"PolicyName": "AmazonElasticTranscoderRole",
"UpdateDate": "2015-02-06T18:41:26+00:00",
"VersionId": "v1"
},
"AmazonElasticsearchServiceRolePolicy": {
"Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonElasticsearchServiceRolePolicy",
"AttachmentCount": 0,
"CreateDate": "2017-07-07T00:15:31+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "Stmt1480452973134"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/aws-service-role/",
"PolicyId": "ANPAJFEWZPHXKLCVHEUIC",
"PolicyName": "AmazonElasticsearchServiceRolePolicy",
"UpdateDate": "2017-07-07T00:15:31+00:00",
"VersionId": "v1"
},
"AmazonGlacierFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonGlacierFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:28+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "glacier:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJQSTZJWB2AXXAKHVQ",
"PolicyName": "AmazonGlacierFullAccess",
"UpdateDate": "2015-02-06T18:40:28+00:00",
"VersionId": "v1"
},
"AmazonGlacierReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonGlacierReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-05-05T18:46:10+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"glacier:DescribeJob",
"glacier:DescribeVault",
"glacier:GetDataRetrievalPolicy",
"glacier:GetJobOutput",
"glacier:GetVaultAccessPolicy",
"glacier:GetVaultLock",
"glacier:GetVaultNotifications",
"glacier:ListJobs",
"glacier:ListMultipartUploads",
"glacier:ListParts",
"glacier:ListTagsForVault",
"glacier:ListVaults"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI2D5NJKMU274MET4E",
"PolicyName": "AmazonGlacierReadOnlyAccess",
"UpdateDate": "2016-05-05T18:46:10+00:00",
"VersionId": "v2"
},
"AmazonInspectorFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonInspectorFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-09-12T17:42:57+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"inspector:*",
"ec2:DescribeInstances",
"ec2:DescribeTags",
"sns:ListTopics",
"events:DescribeRule",
"events:ListRuleNamesByTarget"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI7Y6NTA27NWNA5U5E",
"PolicyName": "AmazonInspectorFullAccess",
"UpdateDate": "2017-09-12T17:42:57+00:00",
"VersionId": "v3"
},
"AmazonInspectorReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonInspectorReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-09-12T16:53:06+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"inspector:Describe*",
"inspector:Get*",
"inspector:List*",
"inspector:LocalizeText",
"inspector:Preview*",
"ec2:DescribeInstances",
"ec2:DescribeTags",
"sns:ListTopics",
"events:DescribeRule",
"events:ListRuleNamesByTarget"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJXQNTHTEJ2JFRN2SE",
"PolicyName": "AmazonInspectorReadOnlyAccess",
"UpdateDate": "2017-09-12T16:53:06+00:00",
"VersionId": "v3"
},
"AmazonKinesisAnalyticsFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonKinesisAnalyticsFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-09-21T19:01:14+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "kinesisanalytics:*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"kinesis:CreateStream",
"kinesis:DeleteStream",
"kinesis:DescribeStream",
"kinesis:ListStreams",
"kinesis:PutRecord",
"kinesis:PutRecords"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"firehose:DescribeDeliveryStream",
"firehose:ListDeliveryStreams"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "logs:GetLogEvents",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"iam:ListPolicyVersions",
"iam:ListRoles"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "iam:PassRole",
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/service-role/kinesis-analytics*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJQOSKHTXP43R7P5AC",
"PolicyName": "AmazonKinesisAnalyticsFullAccess",
"UpdateDate": "2016-09-21T19:01:14+00:00",
"VersionId": "v1"
},
"AmazonKinesisAnalyticsReadOnly": {
"Arn": "arn:aws:iam::aws:policy/AmazonKinesisAnalyticsReadOnly",
"AttachmentCount": 0,
"CreateDate": "2016-09-21T18:16:43+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"kinesisanalytics:Describe*",
"kinesisanalytics:Get*",
"kinesisanalytics:List*"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"kinesis:DescribeStream",
"kinesis:ListStreams"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"firehose:DescribeDeliveryStream",
"firehose:ListDeliveryStreams"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "logs:GetLogEvents",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"iam:ListPolicyVersions",
"iam:ListRoles"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIJIEXZAFUK43U7ARK",
"PolicyName": "AmazonKinesisAnalyticsReadOnly",
"UpdateDate": "2016-09-21T18:16:43+00:00",
"VersionId": "v1"
},
"AmazonKinesisFirehoseFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonKinesisFirehoseFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-10-07T18:45:26+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"firehose:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJMZQMTZ7FRBFHHAHI",
"PolicyName": "AmazonKinesisFirehoseFullAccess",
"UpdateDate": "2015-10-07T18:45:26+00:00",
"VersionId": "v1"
},
"AmazonKinesisFirehoseReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonKinesisFirehoseReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-10-07T18:43:39+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"firehose:Describe*",
"firehose:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ36NT645INW4K24W6",
"PolicyName": "AmazonKinesisFirehoseReadOnlyAccess",
"UpdateDate": "2015-10-07T18:43:39+00:00",
"VersionId": "v1"
},
"AmazonKinesisFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonKinesisFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:29+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "kinesis:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIVF32HAMOXCUYRAYE",
"PolicyName": "AmazonKinesisFullAccess",
"UpdateDate": "2015-02-06T18:40:29+00:00",
"VersionId": "v1"
},
"AmazonKinesisReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonKinesisReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:30+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"kinesis:Get*",
"kinesis:List*",
"kinesis:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIOCMTDT5RLKZ2CAJO",
"PolicyName": "AmazonKinesisReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:30+00:00",
"VersionId": "v1"
},
"AmazonLexFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonLexFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-04-14T19:45:37+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:GetMetricStatistics",
"cloudwatch:DescribeAlarms",
"cloudwatch:DescribeAlarmsForMetric",
"kms:DescribeKey",
"kms:ListAliases",
"lambda:GetPolicy",
"lambda:ListFunctions",
"lex:*",
"polly:DescribeVoices",
"polly:SynthesizeSpeech"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"lambda:AddPermission",
"lambda:RemovePermission"
],
"Condition": {
"StringLike": {
"lambda:Principal": "lex.amazonaws.com"
}
},
"Effect": "Allow",
"Resource": "arn:aws:lambda:*:*:function:AmazonLex*"
},
{
"Action": [
"iam:GetRole",
"iam:DeleteRole"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots",
"arn:aws:iam::*:role/aws-service-role/channels.lex.amazonaws.com/AWSServiceRoleForLexChannels"
]
},
{
"Action": [
"iam:CreateServiceLinkedRole"
],
"Condition": {
"StringLike": {
"iam:AWSServiceName": "lex.amazonaws.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots"
]
},
{
"Action": [
"iam:DetachRolePolicy"
],
"Condition": {
"StringLike": {
"iam:PolicyArn": "arn:aws:iam::aws:policy/aws-service-role/AmazonLexBotPolicy"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots"
]
},
{
"Action": [
"iam:CreateServiceLinkedRole"
],
"Condition": {
"StringLike": {
"iam:AWSServiceName": "channels.lex.amazonaws.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/aws-service-role/channels.lex.amazonaws.com/AWSServiceRoleForLexChannels"
]
},
{
"Action": [
"iam:DetachRolePolicy"
],
"Condition": {
"StringLike": {
"iam:PolicyArn": "arn:aws:iam::aws:policy/aws-service-role/LexChannelPolicy"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/aws-service-role/channels.lex.amazonaws.com/AWSServiceRoleForLexChannels"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJVLXDHKVC23HRTKSI",
"PolicyName": "AmazonLexFullAccess",
"UpdateDate": "2017-04-14T19:45:37+00:00",
"VersionId": "v3"
},
"AmazonLexReadOnly": {
"Arn": "arn:aws:iam::aws:policy/AmazonLexReadOnly",
"AttachmentCount": 0,
"CreateDate": "2017-04-11T23:13:33+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"lex:GetBot",
"lex:GetBotAlias",
"lex:GetBotAliases",
"lex:GetBots",
"lex:GetBotChannelAssociation",
"lex:GetBotChannelAssociations",
"lex:GetBotVersions",
"lex:GetBuiltinIntent",
"lex:GetBuiltinIntents",
"lex:GetBuiltinSlotTypes",
"lex:GetIntent",
"lex:GetIntents",
"lex:GetIntentVersions",
"lex:GetSlotType",
"lex:GetSlotTypes",
"lex:GetSlotTypeVersions",
"lex:GetUtterancesView"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJGBI5LSMAJNDGBNAM",
"PolicyName": "AmazonLexReadOnly",
"UpdateDate": "2017-04-11T23:13:33+00:00",
"VersionId": "v1"
},
"AmazonLexRunBotsOnly": {
"Arn": "arn:aws:iam::aws:policy/AmazonLexRunBotsOnly",
"AttachmentCount": 0,
"CreateDate": "2017-04-11T23:06:24+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"lex:PostContent",
"lex:PostText"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJVZGB5CM3N6YWJHBE",
"PolicyName": "AmazonLexRunBotsOnly",
"UpdateDate": "2017-04-11T23:06:24+00:00",
"VersionId": "v1"
},
"AmazonMachineLearningBatchPredictionsAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningBatchPredictionsAccess",
"AttachmentCount": 0,
"CreateDate": "2015-04-09T17:12:19+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"machinelearning:CreateBatchPrediction",
"machinelearning:DeleteBatchPrediction",
"machinelearning:DescribeBatchPredictions",
"machinelearning:GetBatchPrediction",
"machinelearning:UpdateBatchPrediction"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAILOI4HTQSFTF3GQSC",
"PolicyName": "AmazonMachineLearningBatchPredictionsAccess",
"UpdateDate": "2015-04-09T17:12:19+00:00",
"VersionId": "v1"
},
"AmazonMachineLearningCreateOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningCreateOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-06-29T20:55:03+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"machinelearning:Add*",
"machinelearning:Create*",
"machinelearning:Delete*",
"machinelearning:Describe*",
"machinelearning:Get*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJDRUNIC2RYAMAT3CK",
"PolicyName": "AmazonMachineLearningCreateOnlyAccess",
"UpdateDate": "2016-06-29T20:55:03+00:00",
"VersionId": "v2"
},
"AmazonMachineLearningFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-04-09T17:25:41+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"machinelearning:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIWKW6AGSGYOQ5ERHC",
"PolicyName": "AmazonMachineLearningFullAccess",
"UpdateDate": "2015-04-09T17:25:41+00:00",
"VersionId": "v1"
},
"AmazonMachineLearningManageRealTimeEndpointOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningManageRealTimeEndpointOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-04-09T17:32:41+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"machinelearning:CreateRealtimeEndpoint",
"machinelearning:DeleteRealtimeEndpoint"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJJL3PC3VCSVZP6OCI",
"PolicyName": "AmazonMachineLearningManageRealTimeEndpointOnlyAccess",
"UpdateDate": "2015-04-09T17:32:41+00:00",
"VersionId": "v1"
},
"AmazonMachineLearningReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-04-09T17:40:02+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"machinelearning:Describe*",
"machinelearning:Get*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIW5VYBCGEX56JCINC",
"PolicyName": "AmazonMachineLearningReadOnlyAccess",
"UpdateDate": "2015-04-09T17:40:02+00:00",
"VersionId": "v1"
},
"AmazonMachineLearningRealTimePredictionOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningRealTimePredictionOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-04-09T17:44:06+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"machinelearning:Predict"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIWMCNQPRWMWT36GVQ",
"PolicyName": "AmazonMachineLearningRealTimePredictionOnlyAccess",
"UpdateDate": "2015-04-09T17:44:06+00:00",
"VersionId": "v1"
},
"AmazonMachineLearningRoleforRedshiftDataSource": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonMachineLearningRoleforRedshiftDataSource",
"AttachmentCount": 0,
"CreateDate": "2015-04-09T17:05:26+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateSecurityGroup",
"ec2:DescribeInternetGateways",
"ec2:DescribeSecurityGroups",
"ec2:RevokeSecurityGroupIngress",
"redshift:AuthorizeClusterSecurityGroupIngress",
"redshift:CreateClusterSecurityGroup",
"redshift:DescribeClusters",
"redshift:DescribeClusterSecurityGroups",
"redshift:ModifyCluster",
"redshift:RevokeClusterSecurityGroupIngress",
"s3:GetBucketLocation",
"s3:GetBucketPolicy",
"s3:GetObject",
"s3:PutBucketPolicy",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIQ5UDYYMNN42BM4AK",
"PolicyName": "AmazonMachineLearningRoleforRedshiftDataSource",
"UpdateDate": "2015-04-09T17:05:26+00:00",
"VersionId": "v1"
},
"AmazonMacieFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMacieFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T14:54:30+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"macie:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJJF2N5FR6S5TZN5OA",
"PolicyName": "AmazonMacieFullAccess",
"UpdateDate": "2017-08-14T14:54:30+00:00",
"VersionId": "v1"
},
"AmazonMacieServiceRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonMacieServiceRole",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T14:53:26+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"s3:Get*",
"s3:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJVV7PON3FPBL2PSGC",
"PolicyName": "AmazonMacieServiceRole",
"UpdateDate": "2017-08-14T14:53:26+00:00",
"VersionId": "v1"
},
"AmazonMacieSetupRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonMacieSetupRole",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T14:53:34+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cloudtrail:DescribeTrails",
"cloudtrail:GetEventSelectors",
"cloudtrail:GetTrailStatus",
"cloudtrail:ListTags",
"cloudtrail:LookupEvents",
"iam:ListAccountAliases",
"s3:GetBucket*",
"s3:ListBucket",
"s3:ListAllMyBuckets"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"cloudtrail:CreateTrail",
"cloudtrail:StartLogging",
"cloudtrail:StopLogging",
"cloudtrail:UpdateTrail",
"cloudtrail:DeleteTrail",
"cloudtrail:PutEventSelectors"
],
"Effect": "Allow",
"Resource": "arn:aws:cloudtrail:*:*:trail/AWSMacieTrail-DO-NOT-EDIT"
},
{
"Action": [
"s3:CreateBucket",
"s3:DeleteBucket",
"s3:DeleteBucketPolicy",
"s3:DeleteBucketWebsite",
"s3:DeleteObject",
"s3:DeleteObjectTagging",
"s3:DeleteObjectVersion",
"s3:DeleteObjectVersionTagging",
"s3:DeleteReplicationConfiguration",
"s3:PutBucketPolicy"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::awsmacie-*",
"arn:aws:s3:::awsmacietrail-*",
"arn:aws:s3:::*-awsmacietrail-*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJ5DC6UBVKND7ADSKA",
"PolicyName": "AmazonMacieSetupRole",
"UpdateDate": "2017-08-14T14:53:34+00:00",
"VersionId": "v1"
},
"AmazonMechanicalTurkFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMechanicalTurkFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-12-11T19:08:19+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"mechanicalturk:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJDGCL5BET73H5QIQC",
"PolicyName": "AmazonMechanicalTurkFullAccess",
"UpdateDate": "2015-12-11T19:08:19+00:00",
"VersionId": "v1"
},
"AmazonMechanicalTurkReadOnly": {
"Arn": "arn:aws:iam::aws:policy/AmazonMechanicalTurkReadOnly",
"AttachmentCount": 0,
"CreateDate": "2017-02-27T21:45:50+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"mechanicalturk:Get*",
"mechanicalturk:Search*",
"mechanicalturk:List*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIO5IY3G3WXSX5PPRM",
"PolicyName": "AmazonMechanicalTurkReadOnly",
"UpdateDate": "2017-02-27T21:45:50+00:00",
"VersionId": "v2"
},
"AmazonMobileAnalyticsFinancialReportAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsFinancialReportAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:35+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"mobileanalytics:GetReports",
"mobileanalytics:GetFinancialReports"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJKJHO2R27TXKCWBU4",
"PolicyName": "AmazonMobileAnalyticsFinancialReportAccess",
"UpdateDate": "2015-02-06T18:40:35+00:00",
"VersionId": "v1"
},
"AmazonMobileAnalyticsFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:34+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "mobileanalytics:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIJIKLU2IJ7WJ6DZFG",
"PolicyName": "AmazonMobileAnalyticsFullAccess",
"UpdateDate": "2015-02-06T18:40:34+00:00",
"VersionId": "v1"
},
"AmazonMobileAnalyticsNon-financialReportAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsNon-financialReportAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:36+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "mobileanalytics:GetReports",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIQLKQ4RXPUBBVVRDE",
"PolicyName": "AmazonMobileAnalyticsNon-financialReportAccess",
"UpdateDate": "2015-02-06T18:40:36+00:00",
"VersionId": "v1"
},
"AmazonMobileAnalyticsWriteOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonMobileAnalyticsWriteOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:37+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "mobileanalytics:PutEvents",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ5TAWBBQC2FAL3G6G",
"PolicyName": "AmazonMobileAnalyticsWriteOnlyAccess",
"UpdateDate": "2015-02-06T18:40:37+00:00",
"VersionId": "v1"
},
"AmazonPollyFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonPollyFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-11-30T18:59:06+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"polly:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJUZOYQU6XQYPR7EWS",
"PolicyName": "AmazonPollyFullAccess",
"UpdateDate": "2016-11-30T18:59:06+00:00",
"VersionId": "v1"
},
"AmazonPollyReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonPollyReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-11-30T18:59:24+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"polly:DescribeVoices",
"polly:GetLexicon",
"polly:ListLexicons",
"polly:SynthesizeSpeech"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ5FENL3CVPL2FPDLA",
"PolicyName": "AmazonPollyReadOnlyAccess",
"UpdateDate": "2016-11-30T18:59:24+00:00",
"VersionId": "v1"
},
"AmazonRDSDirectoryServiceAccess": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonRDSDirectoryServiceAccess",
"AttachmentCount": 0,
"CreateDate": "2016-02-26T02:02:05+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ds:DescribeDirectories",
"ds:AuthorizeApplication",
"ds:UnauthorizeApplication"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIL4KBY57XWMYUHKUU",
"PolicyName": "AmazonRDSDirectoryServiceAccess",
"UpdateDate": "2016-02-26T02:02:05+00:00",
"VersionId": "v1"
},
"AmazonRDSEnhancedMonitoringRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole",
"AttachmentCount": 1,
"CreateDate": "2015-11-11T19:58:29+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:PutRetentionPolicy"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:RDS*"
],
"Sid": "EnableCreationAndManagementOfRDSCloudwatchLogGroups"
},
{
"Action": [
"logs:CreateLogStream",
"logs:PutLogEvents",
"logs:DescribeLogStreams",
"logs:GetLogEvents"
],
"Effect": "Allow",
"Resource": [
"arn:aws:logs:*:*:log-group:RDS*:log-stream:*"
],
"Sid": "EnableCreationAndManagementOfRDSCloudwatchLogStreams"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJV7BS425S4PTSSVGK",
"PolicyName": "AmazonRDSEnhancedMonitoringRole",
"UpdateDate": "2015-11-11T19:58:29+00:00",
"VersionId": "v1"
},
"AmazonRDSFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonRDSFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-09-14T23:40:45+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"rds:*",
"cloudwatch:DescribeAlarms",
"cloudwatch:GetMetricStatistics",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeInternetGateways",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcs",
"sns:ListSubscriptions",
"sns:ListTopics",
"logs:DescribeLogStreams",
"logs:GetLogEvents"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "pi:*",
"Effect": "Allow",
"Resource": "arn:aws:pi:*:*:metrics/rds/*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI3R4QMOG6Q5A4VWVG",
"PolicyName": "AmazonRDSFullAccess",
"UpdateDate": "2017-09-14T23:40:45+00:00",
"VersionId": "v4"
},
"AmazonRDSReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-28T21:36:32+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"rds:Describe*",
"rds:ListTagsForResource",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeInternetGateways",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcs"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"cloudwatch:GetMetricStatistics",
"logs:DescribeLogStreams",
"logs:GetLogEvents"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJKTTTYV2IIHKLZ346",
"PolicyName": "AmazonRDSReadOnlyAccess",
"UpdateDate": "2017-08-28T21:36:32+00:00",
"VersionId": "v3"
},
"AmazonRedshiftFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonRedshiftFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-09-19T18:27:44+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"redshift:*",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:DescribeInternetGateways",
"sns:CreateTopic",
"sns:Get*",
"sns:List*",
"cloudwatch:Describe*",
"cloudwatch:Get*",
"cloudwatch:List*",
"cloudwatch:PutMetricAlarm",
"cloudwatch:EnableAlarmActions",
"cloudwatch:DisableAlarmActions"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "iam:CreateServiceLinkedRole",
"Condition": {
"StringLike": {
"iam:AWSServiceName": "redshift.amazonaws.com"
}
},
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/aws-service-role/redshift.amazonaws.com/AWSServiceRoleForRedshift"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAISEKCHH4YDB46B5ZO",
"PolicyName": "AmazonRedshiftFullAccess",
"UpdateDate": "2017-09-19T18:27:44+00:00",
"VersionId": "v2"
},
"AmazonRedshiftReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonRedshiftReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:51+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"redshift:Describe*",
"redshift:ViewQueriesInConsole",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:DescribeInternetGateways",
"sns:Get*",
"sns:List*",
"cloudwatch:Describe*",
"cloudwatch:List*",
"cloudwatch:Get*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIGD46KSON64QBSEZM",
"PolicyName": "AmazonRedshiftReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:51+00:00",
"VersionId": "v1"
},
"AmazonRedshiftServiceLinkedRolePolicy": {
"Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonRedshiftServiceLinkedRolePolicy",
"AttachmentCount": 0,
"CreateDate": "2017-09-18T19:19:45+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeSubnets",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeAddress",
"ec2:AssociateAddress",
"ec2:DisassociateAddress",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/aws-service-role/",
"PolicyId": "ANPAJPY2VXNRUYOY3SRZS",
"PolicyName": "AmazonRedshiftServiceLinkedRolePolicy",
"UpdateDate": "2017-09-18T19:19:45+00:00",
"VersionId": "v1"
},
"AmazonRekognitionFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonRekognitionFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-11-30T14:40:44+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"rekognition:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIWDAOK6AIFDVX6TT6",
"PolicyName": "AmazonRekognitionFullAccess",
"UpdateDate": "2016-11-30T14:40:44+00:00",
"VersionId": "v1"
},
"AmazonRekognitionReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonRekognitionReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-11-30T14:58:06+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"rekognition:CompareFaces",
"rekognition:DetectFaces",
"rekognition:DetectLabels",
"rekognition:ListCollections",
"rekognition:ListFaces",
"rekognition:SearchFaces",
"rekognition:SearchFacesByImage"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAILWSUHXUY4ES43SA4",
"PolicyName": "AmazonRekognitionReadOnlyAccess",
"UpdateDate": "2016-11-30T14:58:06+00:00",
"VersionId": "v1"
},
"AmazonRoute53DomainsFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonRoute53DomainsFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:56+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"route53:CreateHostedZone",
"route53domains:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIPAFBMIYUILMOKL6G",
"PolicyName": "AmazonRoute53DomainsFullAccess",
"UpdateDate": "2015-02-06T18:40:56+00:00",
"VersionId": "v1"
},
"AmazonRoute53DomainsReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonRoute53DomainsReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:57+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"route53domains:Get*",
"route53domains:List*"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIDRINP6PPTRXYVQCI",
"PolicyName": "AmazonRoute53DomainsReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:57+00:00",
"VersionId": "v1"
},
"AmazonRoute53FullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonRoute53FullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-02-14T21:25:53+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"route53:*",
"route53domains:*",
"cloudfront:ListDistributions",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticbeanstalk:DescribeEnvironments",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetBucketWebsiteConfiguration",
"ec2:DescribeVpcs",
"ec2:DescribeRegions",
"sns:ListTopics",
"sns:ListSubscriptionsByTopic",
"cloudwatch:DescribeAlarms",
"cloudwatch:GetMetricStatistics"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJWVDLG5RPST6PHQ3A",
"PolicyName": "AmazonRoute53FullAccess",
"UpdateDate": "2017-02-14T21:25:53+00:00",
"VersionId": "v2"
},
"AmazonRoute53ReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonRoute53ReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-11-15T21:15:16+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"route53:Get*",
"route53:List*",
"route53:TestDNSAnswer"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAITOYK2ZAOQFXV2JNC",
"PolicyName": "AmazonRoute53ReadOnlyAccess",
"UpdateDate": "2016-11-15T21:15:16+00:00",
"VersionId": "v2"
},
"AmazonS3FullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonS3FullAccess",
"AttachmentCount": 1,
"CreateDate": "2015-02-06T18:40:58+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "s3:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIFIR6V6BVTRAHWINE",
"PolicyName": "AmazonS3FullAccess",
"UpdateDate": "2015-02-06T18:40:58+00:00",
"VersionId": "v1"
},
"AmazonS3ReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:59+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"s3:Get*",
"s3:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIZTJ4DXE7G6AGAE6M",
"PolicyName": "AmazonS3ReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:59+00:00",
"VersionId": "v1"
},
"AmazonSESFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonSESFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:02+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ses:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ2P4NXCHAT7NDPNR4",
"PolicyName": "AmazonSESFullAccess",
"UpdateDate": "2015-02-06T18:41:02+00:00",
"VersionId": "v1"
},
"AmazonSESReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonSESReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:03+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ses:Get*",
"ses:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAINV2XPFRMWJJNSCGI",
"PolicyName": "AmazonSESReadOnlyAccess",
"UpdateDate": "2015-02-06T18:41:03+00:00",
"VersionId": "v1"
},
"AmazonSNSFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonSNSFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:05+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"sns:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJWEKLCXXUNT2SOLSG",
"PolicyName": "AmazonSNSFullAccess",
"UpdateDate": "2015-02-06T18:41:05+00:00",
"VersionId": "v1"
},
"AmazonSNSReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonSNSReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:06+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"sns:GetTopicAttributes",
"sns:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIZGQCQTFOFPMHSB6W",
"PolicyName": "AmazonSNSReadOnlyAccess",
"UpdateDate": "2015-02-06T18:41:06+00:00",
"VersionId": "v1"
},
"AmazonSNSRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonSNSRole",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:30+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"logs:PutMetricFilter",
"logs:PutRetentionPolicy"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJK5GQB7CIK7KHY2GA",
"PolicyName": "AmazonSNSRole",
"UpdateDate": "2015-02-06T18:41:30+00:00",
"VersionId": "v1"
},
"AmazonSQSFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonSQSFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:07+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"sqs:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI65L554VRJ33ECQS6",
"PolicyName": "AmazonSQSFullAccess",
"UpdateDate": "2015-02-06T18:41:07+00:00",
"VersionId": "v1"
},
"AmazonSQSReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonSQSReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:08+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"sqs:GetQueueAttributes",
"sqs:ListQueues"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIUGSSQY362XGCM6KW",
"PolicyName": "AmazonSQSReadOnlyAccess",
"UpdateDate": "2015-02-06T18:41:08+00:00",
"VersionId": "v1"
},
"AmazonSSMAutomationApproverAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonSSMAutomationApproverAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-07T23:07:28+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ssm:DescribeAutomationExecutions",
"ssm:GetAutomationExecution",
"ssm:SendAutomationSignal"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIDSSXIRWBSLWWIORC",
"PolicyName": "AmazonSSMAutomationApproverAccess",
"UpdateDate": "2017-08-07T23:07:28+00:00",
"VersionId": "v1"
},
"AmazonSSMAutomationRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonSSMAutomationRole",
"AttachmentCount": 0,
"CreateDate": "2017-07-24T23:29:12+00:00",
"DefaultVersionId": "v5",
"Document": {
"Statement": [
{
"Action": [
"lambda:InvokeFunction"
],
"Effect": "Allow",
"Resource": [
"arn:aws:lambda:*:*:function:Automation*"
]
},
{
"Action": [
"ec2:CreateImage",
"ec2:CopyImage",
"ec2:DeregisterImage",
"ec2:DescribeImages",
"ec2:DeleteSnapshot",
"ec2:StartInstances",
"ec2:RunInstances",
"ec2:StopInstances",
"ec2:TerminateInstances",
"ec2:DescribeInstanceStatus",
"ec2:CreateTags",
"ec2:DeleteTags",
"ec2:DescribeTags",
"cloudformation:CreateStack",
"cloudformation:DescribeStackEvents",
"cloudformation:DescribeStacks",
"cloudformation:UpdateStack",
"cloudformation:DeleteStack"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ssm:*"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"sns:Publish"
],
"Effect": "Allow",
"Resource": [
"arn:aws:sns:*:*:Automation*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJIBQCTBCXD2XRNB6W",
"PolicyName": "AmazonSSMAutomationRole",
"UpdateDate": "2017-07-24T23:29:12+00:00",
"VersionId": "v5"
},
"AmazonSSMFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonSSMFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-03-07T21:09:12+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:PutMetricData",
"ds:CreateComputer",
"ds:DescribeDirectories",
"ec2:DescribeInstanceStatus",
"logs:*",
"ssm:*",
"ec2messages:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJA7V6HI4ISQFMDYAG",
"PolicyName": "AmazonSSMFullAccess",
"UpdateDate": "2016-03-07T21:09:12+00:00",
"VersionId": "v2"
},
"AmazonSSMMaintenanceWindowRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AmazonSSMMaintenanceWindowRole",
"AttachmentCount": 0,
"CreateDate": "2017-08-09T20:49:14+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"ssm:GetAutomationExecution",
"ssm:GetParameters",
"ssm:ListCommands",
"ssm:SendCommand",
"ssm:StartAutomationExecution"
],
"Effect": "Allow",
"Resource": [
"*"
],
"Sid": "Stmt1477803259000"
},
{
"Action": [
"lambda:InvokeFunction"
],
"Effect": "Allow",
"Resource": [
"arn:aws:lambda:*:*:function:SSM*",
"arn:aws:lambda:*:*:function:*:SSM*"
],
"Sid": "Stmt1477803259001"
},
{
"Action": [
"states:DescribeExecution",
"states:StartExecution"
],
"Effect": "Allow",
"Resource": [
"arn:aws:states:*:*:stateMachine:SSM*",
"arn:aws:states:*:*:execution:SSM*"
],
"Sid": "Stmt1477803259002"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJV3JNYSTZ47VOXYME",
"PolicyName": "AmazonSSMMaintenanceWindowRole",
"UpdateDate": "2017-08-09T20:49:14+00:00",
"VersionId": "v2"
},
"AmazonSSMReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-05-29T17:44:19+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ssm:Describe*",
"ssm:Get*",
"ssm:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJODSKQGGJTHRYZ5FC",
"PolicyName": "AmazonSSMReadOnlyAccess",
"UpdateDate": "2015-05-29T17:44:19+00:00",
"VersionId": "v1"
},
"AmazonVPCCrossAccountNetworkInterfaceOperations": {
"Arn": "arn:aws:iam::aws:policy/AmazonVPCCrossAccountNetworkInterfaceOperations",
"AttachmentCount": 0,
"CreateDate": "2017-07-18T20:47:16+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeRouteTables",
"ec2:CreateRoute",
"ec2:DeleteRoute",
"ec2:ReplaceRoute"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:DescribeNetworkInterfaces",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:CreateNetworkInterfacePermission",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:DescribeNetworkInterfaceAttribute",
"ec2:DescribeVpcs",
"ec2:DescribeSubnets"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:AssignPrivateIpAddresses",
"ec2:UnassignPrivateIpAddresses"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ53Y4ZY5OHP4CNRJC",
"PolicyName": "AmazonVPCCrossAccountNetworkInterfaceOperations",
"UpdateDate": "2017-07-18T20:47:16+00:00",
"VersionId": "v1"
},
"AmazonVPCFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonVPCFullAccess",
"AttachmentCount": 1,
"CreateDate": "2015-12-17T17:25:44+00:00",
"DefaultVersionId": "v5",
"Document": {
"Statement": [
{
"Action": [
"ec2:AcceptVpcPeeringConnection",
"ec2:AllocateAddress",
"ec2:AssignPrivateIpAddresses",
"ec2:AssociateAddress",
"ec2:AssociateDhcpOptions",
"ec2:AssociateRouteTable",
"ec2:AttachClassicLinkVpc",
"ec2:AttachInternetGateway",
"ec2:AttachNetworkInterface",
"ec2:AttachVpnGateway",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateCustomerGateway",
"ec2:CreateDhcpOptions",
"ec2:CreateFlowLogs",
"ec2:CreateInternetGateway",
"ec2:CreateNatGateway",
"ec2:CreateNetworkAcl",
"ec2:CreateNetworkAcl",
"ec2:CreateNetworkAclEntry",
"ec2:CreateNetworkInterface",
"ec2:CreateRoute",
"ec2:CreateRouteTable",
"ec2:CreateSecurityGroup",
"ec2:CreateSubnet",
"ec2:CreateTags",
"ec2:CreateVpc",
"ec2:CreateVpcEndpoint",
"ec2:CreateVpcPeeringConnection",
"ec2:CreateVpnConnection",
"ec2:CreateVpnConnectionRoute",
"ec2:CreateVpnGateway",
"ec2:DeleteCustomerGateway",
"ec2:DeleteDhcpOptions",
"ec2:DeleteFlowLogs",
"ec2:DeleteInternetGateway",
"ec2:DeleteNatGateway",
"ec2:DeleteNetworkAcl",
"ec2:DeleteNetworkAclEntry",
"ec2:DeleteNetworkInterface",
"ec2:DeleteRoute",
"ec2:DeleteRouteTable",
"ec2:DeleteSecurityGroup",
"ec2:DeleteSubnet",
"ec2:DeleteTags",
"ec2:DeleteVpc",
"ec2:DeleteVpcEndpoints",
"ec2:DeleteVpcPeeringConnection",
"ec2:DeleteVpnConnection",
"ec2:DeleteVpnConnectionRoute",
"ec2:DeleteVpnGateway",
"ec2:DescribeAddresses",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeClassicLinkInstances",
"ec2:DescribeCustomerGateways",
"ec2:DescribeDhcpOptions",
"ec2:DescribeFlowLogs",
"ec2:DescribeInstances",
"ec2:DescribeInternetGateways",
"ec2:DescribeKeyPairs",
"ec2:DescribeMovingAddresses",
"ec2:DescribeNatGateways",
"ec2:DescribeNetworkAcls",
"ec2:DescribeNetworkInterfaceAttribute",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribePrefixLists",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcClassicLink",
"ec2:DescribeVpcEndpoints",
"ec2:DescribeVpcEndpointServices",
"ec2:DescribeVpcPeeringConnections",
"ec2:DescribeVpcs",
"ec2:DescribeVpnConnections",
"ec2:DescribeVpnGateways",
"ec2:DetachClassicLinkVpc",
"ec2:DetachInternetGateway",
"ec2:DetachNetworkInterface",
"ec2:DetachVpnGateway",
"ec2:DisableVgwRoutePropagation",
"ec2:DisableVpcClassicLink",
"ec2:DisassociateAddress",
"ec2:DisassociateRouteTable",
"ec2:EnableVgwRoutePropagation",
"ec2:EnableVpcClassicLink",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ModifySubnetAttribute",
"ec2:ModifyVpcAttribute",
"ec2:ModifyVpcEndpoint",
"ec2:MoveAddressToVpc",
"ec2:RejectVpcPeeringConnection",
"ec2:ReleaseAddress",
"ec2:ReplaceNetworkAclAssociation",
"ec2:ReplaceNetworkAclEntry",
"ec2:ReplaceRoute",
"ec2:ReplaceRouteTableAssociation",
"ec2:ResetNetworkInterfaceAttribute",
"ec2:RestoreAddressToClassic",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress",
"ec2:UnassignPrivateIpAddresses"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJBWPGNOVKZD3JI2P2",
"PolicyName": "AmazonVPCFullAccess",
"UpdateDate": "2015-12-17T17:25:44+00:00",
"VersionId": "v5"
},
"AmazonVPCReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonVPCReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-12-17T17:25:56+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeAddresses",
"ec2:DescribeClassicLinkInstances",
"ec2:DescribeCustomerGateways",
"ec2:DescribeDhcpOptions",
"ec2:DescribeFlowLogs",
"ec2:DescribeInternetGateways",
"ec2:DescribeMovingAddresses",
"ec2:DescribeNatGateways",
"ec2:DescribeNetworkAcls",
"ec2:DescribeNetworkInterfaceAttribute",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribePrefixLists",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcClassicLink",
"ec2:DescribeVpcEndpoints",
"ec2:DescribeVpcEndpointServices",
"ec2:DescribeVpcPeeringConnections",
"ec2:DescribeVpcs",
"ec2:DescribeVpnConnections",
"ec2:DescribeVpnGateways"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIICZJNOJN36GTG6CM",
"PolicyName": "AmazonVPCReadOnlyAccess",
"UpdateDate": "2015-12-17T17:25:56+00:00",
"VersionId": "v4"
},
"AmazonWorkMailFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonWorkMailFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-04-20T08:35:49+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"ds:AuthorizeApplication",
"ds:CheckAlias",
"ds:CreateAlias",
"ds:CreateDirectory",
"ds:CreateIdentityPoolDirectory",
"ds:CreateDomain",
"ds:DeleteAlias",
"ds:DeleteDirectory",
"ds:DescribeDirectories",
"ds:ExtendDirectory",
"ds:GetDirectoryLimits",
"ds:ListAuthorizedApplications",
"ds:UnauthorizeApplication",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateNetworkInterface",
"ec2:CreateSecurityGroup",
"ec2:CreateSubnet",
"ec2:CreateTags",
"ec2:CreateVpc",
"ec2:DeleteSecurityGroup",
"ec2:DeleteSubnet",
"ec2:DeleteVpc",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeDomains",
"ec2:DescribeRouteTables",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress",
"kms:DescribeKey",
"kms:ListAliases",
"ses:*",
"workmail:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJQVKNMT7SVATQ4AUY",
"PolicyName": "AmazonWorkMailFullAccess",
"UpdateDate": "2017-04-20T08:35:49+00:00",
"VersionId": "v3"
},
"AmazonWorkMailReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonWorkMailReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:42+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ses:Describe*",
"ses:Get*",
"workmail:Describe*",
"workmail:Get*",
"workmail:List*",
"workmail:Search*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJHF7J65E2QFKCWAJM",
"PolicyName": "AmazonWorkMailReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:42+00:00",
"VersionId": "v1"
},
"AmazonWorkSpacesAdmin": {
"Arn": "arn:aws:iam::aws:policy/AmazonWorkSpacesAdmin",
"AttachmentCount": 0,
"CreateDate": "2016-08-18T23:08:42+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"workspaces:CreateWorkspaces",
"workspaces:DescribeWorkspaces",
"workspaces:RebootWorkspaces",
"workspaces:RebuildWorkspaces",
"workspaces:TerminateWorkspaces",
"workspaces:DescribeWorkspaceDirectories",
"workspaces:DescribeWorkspaceBundles",
"workspaces:ModifyWorkspaceProperties",
"workspaces:StopWorkspaces",
"workspaces:StartWorkspaces",
"workspaces:DescribeWorkspacesConnectionStatus",
"workspaces:CreateTags",
"workspaces:DeleteTags",
"workspaces:DescribeTags",
"kms:ListKeys",
"kms:ListAliases",
"kms:DescribeKey"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ26AU6ATUQCT5KVJU",
"PolicyName": "AmazonWorkSpacesAdmin",
"UpdateDate": "2016-08-18T23:08:42+00:00",
"VersionId": "v2"
},
"AmazonWorkSpacesApplicationManagerAdminAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonWorkSpacesApplicationManagerAdminAccess",
"AttachmentCount": 0,
"CreateDate": "2015-04-09T14:03:18+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "wam:AuthenticatePackager",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJPRL4KYETIH7XGTSS",
"PolicyName": "AmazonWorkSpacesApplicationManagerAdminAccess",
"UpdateDate": "2015-04-09T14:03:18+00:00",
"VersionId": "v1"
},
"AmazonZocaloFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonZocaloFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:13+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"zocalo:*",
"ds:*",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateNetworkInterface",
"ec2:CreateSecurityGroup",
"ec2:CreateSubnet",
"ec2:CreateTags",
"ec2:CreateVpc",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:DeleteNetworkInterface",
"ec2:DeleteSecurityGroup",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJLCDXYRINDMUXEVL6",
"PolicyName": "AmazonZocaloFullAccess",
"UpdateDate": "2015-02-06T18:41:13+00:00",
"VersionId": "v1"
},
"AmazonZocaloReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AmazonZocaloReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:14+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"zocalo:Describe*",
"ds:DescribeDirectories",
"ec2:DescribeVpcs",
"ec2:DescribeSubnets"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAISRCSSJNS3QPKZJPM",
"PolicyName": "AmazonZocaloReadOnlyAccess",
"UpdateDate": "2015-02-06T18:41:14+00:00",
"VersionId": "v1"
},
"ApplicationAutoScalingForAmazonAppStreamAccess": {
"Arn": "arn:aws:iam::aws:policy/service-role/ApplicationAutoScalingForAmazonAppStreamAccess",
"AttachmentCount": 0,
"CreateDate": "2017-02-06T21:39:56+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"appstream:UpdateFleet",
"appstream:DescribeFleets"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"cloudwatch:DescribeAlarms"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIEL3HJCCWFVHA6KPG",
"PolicyName": "ApplicationAutoScalingForAmazonAppStreamAccess",
"UpdateDate": "2017-02-06T21:39:56+00:00",
"VersionId": "v1"
},
"AutoScalingConsoleFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AutoScalingConsoleFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-01-12T19:43:16+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateKeyPair",
"ec2:CreateSecurityGroup",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeImages",
"ec2:DescribeKeyPairs",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"ec2:DescribeVpcClassicLink",
"ec2:ImportKeyPair"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "elasticloadbalancing:Describe*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"cloudwatch:ListMetrics",
"cloudwatch:GetMetricStatistics",
"cloudwatch:PutMetricAlarm",
"cloudwatch:Describe*"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "autoscaling:*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"sns:ListSubscriptions",
"sns:ListTopics"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIYEN6FJGYYWJFFCZW",
"PolicyName": "AutoScalingConsoleFullAccess",
"UpdateDate": "2017-01-12T19:43:16+00:00",
"VersionId": "v1"
},
"AutoScalingConsoleReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AutoScalingConsoleReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-01-12T19:48:53+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:DescribeVpcs",
"ec2:DescribeVpcClassicLink",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeSubnets"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "elasticloadbalancing:Describe*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"cloudwatch:ListMetrics",
"cloudwatch:GetMetricStatistics",
"cloudwatch:Describe*"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "autoscaling:Describe*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"sns:ListSubscriptions",
"sns:ListTopics"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI3A7GDXOYQV3VUQMK",
"PolicyName": "AutoScalingConsoleReadOnlyAccess",
"UpdateDate": "2017-01-12T19:48:53+00:00",
"VersionId": "v1"
},
"AutoScalingFullAccess": {
"Arn": "arn:aws:iam::aws:policy/AutoScalingFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-01-12T19:31:58+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "autoscaling:*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "cloudwatch:PutMetricAlarm",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIAWRCSJDDXDXGPCFU",
"PolicyName": "AutoScalingFullAccess",
"UpdateDate": "2017-01-12T19:31:58+00:00",
"VersionId": "v1"
},
"AutoScalingNotificationAccessRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/AutoScalingNotificationAccessRole",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:22+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"sqs:SendMessage",
"sqs:GetQueueUrl",
"sns:Publish"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIO2VMUPGDC5PZVXVA",
"PolicyName": "AutoScalingNotificationAccessRole",
"UpdateDate": "2015-02-06T18:41:22+00:00",
"VersionId": "v1"
},
"AutoScalingReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/AutoScalingReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-01-12T19:39:35+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "autoscaling:Describe*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIAFWUVLC2LPLSFTFG",
"PolicyName": "AutoScalingReadOnlyAccess",
"UpdateDate": "2017-01-12T19:39:35+00:00",
"VersionId": "v1"
},
"Billing": {
"Arn": "arn:aws:iam::aws:policy/job-function/Billing",
"AttachmentCount": 0,
"CreateDate": "2016-11-10T17:33:18+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"aws-portal:*Billing",
"aws-portal:*Usage",
"aws-portal:*PaymentMethods",
"budgets:ViewBudget",
"budgets:ModifyBudget"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/job-function/",
"PolicyId": "ANPAIFTHXT6FFMIRT7ZEA",
"PolicyName": "Billing",
"UpdateDate": "2016-11-10T17:33:18+00:00",
"VersionId": "v1"
},
"CloudFrontFullAccess": {
"Arn": "arn:aws:iam::aws:policy/CloudFrontFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-01-21T17:03:57+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"s3:ListAllMyBuckets"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::*"
},
{
"Action": [
"acm:ListCertificates",
"cloudfront:*",
"iam:ListServerCertificates",
"waf:ListWebACLs",
"waf:GetWebACL"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIPRV52SH6HDCCFY6U",
"PolicyName": "CloudFrontFullAccess",
"UpdateDate": "2016-01-21T17:03:57+00:00",
"VersionId": "v3"
},
"CloudFrontReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/CloudFrontReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-01-21T17:03:28+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"acm:ListCertificates",
"cloudfront:Get*",
"cloudfront:List*",
"iam:ListServerCertificates",
"route53:List*",
"waf:ListWebACLs",
"waf:GetWebACL"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJJZMNYOTZCNQP36LG",
"PolicyName": "CloudFrontReadOnlyAccess",
"UpdateDate": "2016-01-21T17:03:28+00:00",
"VersionId": "v3"
},
"CloudSearchFullAccess": {
"Arn": "arn:aws:iam::aws:policy/CloudSearchFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:39:56+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cloudsearch:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIM6OOWKQ7L7VBOZOC",
"PolicyName": "CloudSearchFullAccess",
"UpdateDate": "2015-02-06T18:39:56+00:00",
"VersionId": "v1"
},
"CloudSearchReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/CloudSearchReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:39:57+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cloudsearch:Describe*",
"cloudsearch:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJWPLX7N7BCC3RZLHW",
"PolicyName": "CloudSearchReadOnlyAccess",
"UpdateDate": "2015-02-06T18:39:57+00:00",
"VersionId": "v1"
},
"CloudWatchActionsEC2Access": {
"Arn": "arn:aws:iam::aws:policy/CloudWatchActionsEC2Access",
"AttachmentCount": 0,
"CreateDate": "2015-07-07T00:00:33+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:Describe*",
"ec2:Describe*",
"ec2:RebootInstances",
"ec2:StopInstances",
"ec2:TerminateInstances"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIOWD4E3FVSORSZTGU",
"PolicyName": "CloudWatchActionsEC2Access",
"UpdateDate": "2015-07-07T00:00:33+00:00",
"VersionId": "v1"
},
"CloudWatchEventsBuiltInTargetExecutionAccess": {
"Arn": "arn:aws:iam::aws:policy/service-role/CloudWatchEventsBuiltInTargetExecutionAccess",
"AttachmentCount": 0,
"CreateDate": "2016-01-14T18:35:49+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"ec2:Describe*",
"ec2:RebootInstances",
"ec2:StopInstances",
"ec2:TerminateInstances",
"ec2:CreateSnapshot"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "CloudWatchEventsBuiltInTargetExecutionAccess"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIC5AQ5DATYSNF4AUM",
"PolicyName": "CloudWatchEventsBuiltInTargetExecutionAccess",
"UpdateDate": "2016-01-14T18:35:49+00:00",
"VersionId": "v1"
},
"CloudWatchEventsFullAccess": {
"Arn": "arn:aws:iam::aws:policy/CloudWatchEventsFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-01-14T18:37:08+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "events:*",
"Effect": "Allow",
"Resource": "*",
"Sid": "CloudWatchEventsFullAccess"
},
{
"Action": "iam:PassRole",
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/AWS_Events_Invoke_Targets",
"Sid": "IAMPassRoleForCloudWatchEvents"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJZLOYLNHESMYOJAFU",
"PolicyName": "CloudWatchEventsFullAccess",
"UpdateDate": "2016-01-14T18:37:08+00:00",
"VersionId": "v1"
},
"CloudWatchEventsInvocationAccess": {
"Arn": "arn:aws:iam::aws:policy/service-role/CloudWatchEventsInvocationAccess",
"AttachmentCount": 0,
"CreateDate": "2016-01-14T18:36:33+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"kinesis:PutRecord"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "CloudWatchEventsInvocationAccess"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJJXD6JKJLK2WDLZNO",
"PolicyName": "CloudWatchEventsInvocationAccess",
"UpdateDate": "2016-01-14T18:36:33+00:00",
"VersionId": "v1"
},
"CloudWatchEventsReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/CloudWatchEventsReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-10T17:25:34+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"events:DescribeRule",
"events:ListRuleNamesByTarget",
"events:ListRules",
"events:ListTargetsByRule",
"events:TestEventPattern",
"events:DescribeEventBus"
],
"Effect": "Allow",
"Resource": "*",
"Sid": "CloudWatchEventsReadOnlyAccess"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIILJPXXA6F7GYLYBS",
"PolicyName": "CloudWatchEventsReadOnlyAccess",
"UpdateDate": "2017-08-10T17:25:34+00:00",
"VersionId": "v2"
},
"CloudWatchFullAccess": {
"Arn": "arn:aws:iam::aws:policy/CloudWatchFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:00+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"autoscaling:Describe*",
"cloudwatch:*",
"logs:*",
"sns:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIKEABORKUXN6DEAZU",
"PolicyName": "CloudWatchFullAccess",
"UpdateDate": "2015-02-06T18:40:00+00:00",
"VersionId": "v1"
},
"CloudWatchLogsFullAccess": {
"Arn": "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:02+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"logs:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ3ZGNWK2R5HW5BQFO",
"PolicyName": "CloudWatchLogsFullAccess",
"UpdateDate": "2015-02-06T18:40:02+00:00",
"VersionId": "v1"
},
"CloudWatchLogsReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-14T22:22:16+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"logs:Describe*",
"logs:Get*",
"logs:List*",
"logs:TestMetricFilter",
"logs:FilterLogEvents"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ2YIYDYSNNEHK3VKW",
"PolicyName": "CloudWatchLogsReadOnlyAccess",
"UpdateDate": "2017-08-14T22:22:16+00:00",
"VersionId": "v3"
},
"CloudWatchReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/CloudWatchReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:40:01+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"autoscaling:Describe*",
"cloudwatch:Describe*",
"cloudwatch:Get*",
"cloudwatch:List*",
"logs:Get*",
"logs:Describe*",
"logs:TestMetricFilter",
"sns:Get*",
"sns:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJN23PDQP7SZQAE3QE",
"PolicyName": "CloudWatchReadOnlyAccess",
"UpdateDate": "2015-02-06T18:40:01+00:00",
"VersionId": "v1"
},
"DataScientist": {
"Arn": "arn:aws:iam::aws:policy/job-function/DataScientist",
"AttachmentCount": 0,
"CreateDate": "2016-11-10T17:28:48+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"autoscaling:*",
"cloudwatch:*",
"cloudformation:CreateStack",
"cloudformation:DescribeStackEvents",
"datapipeline:Describe*",
"datapipeline:ListPipelines",
"datapipeline:GetPipelineDefinition",
"datapipeline:QueryObjects",
"dynamodb:*",
"ec2:CancelSpotInstanceRequests",
"ec2:CancelSpotFleetRequests",
"ec2:CreateTags",
"ec2:DeleteTags",
"ec2:Describe*",
"ec2:ModifyImageAttribute",
"ec2:ModifyInstanceAttribute",
"ec2:ModifySpotFleetRequest",
"ec2:RequestSpotInstances",
"ec2:RequestSpotFleet",
"elasticfilesystem:*",
"elasticmapreduce:*",
"es:*",
"firehose:*",
"iam:GetInstanceProfile",
"iam:GetRole",
"iam:GetPolicy",
"iam:GetPolicyVersion",
"iam:ListRoles",
"kinesis:*",
"kms:List*",
"lambda:Create*",
"lambda:Delete*",
"lambda:Get*",
"lambda:InvokeFunction",
"lambda:PublishVersion",
"lambda:Update*",
"lambda:List*",
"machinelearning:*",
"sdb:*",
"rds:*",
"sns:ListSubscriptions",
"sns:ListTopics",
"logs:DescribeLogStreams",
"logs:GetLogEvents",
"redshift:*",
"s3:CreateBucket",
"sns:CreateTopic",
"sns:Get*",
"sns:List*"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:Abort*",
"s3:DeleteObject",
"s3:Get*",
"s3:List*",
"s3:PutAccelerateConfiguration",
"s3:PutBucketLogging",
"s3:PutBucketNotification",
"s3:PutBucketTagging",
"s3:PutObject",
"s3:Replicate*",
"s3:RestoreObject"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:RunInstances",
"ec2:TerminateInstances"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:GetRole",
"iam:PassRole"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/DataPipelineDefaultRole",
"arn:aws:iam::*:role/DataPipelineDefaultResourceRole",
"arn:aws:iam::*:role/EMR_EC2_DefaultRole",
"arn:aws:iam::*:role/EMR_DefaultRole",
"arn:aws:iam::*:role/kinesis-*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/job-function/",
"PolicyId": "ANPAJ5YHI2BQW7EQFYDXS",
"PolicyName": "DataScientist",
"UpdateDate": "2016-11-10T17:28:48+00:00",
"VersionId": "v1"
},
"DatabaseAdministrator": {
"Arn": "arn:aws:iam::aws:policy/job-function/DatabaseAdministrator",
"AttachmentCount": 0,
"CreateDate": "2016-11-10T17:25:43+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cloudwatch:DeleteAlarms",
"cloudwatch:Describe*",
"cloudwatch:DisableAlarmActions",
"cloudwatch:EnableAlarmActions",
"cloudwatch:Get*",
"cloudwatch:List*",
"cloudwatch:PutMetricAlarm",
"datapipeline:ActivatePipeline",
"datapipeline:CreatePipeline",
"datapipeline:DeletePipeline",
"datapipeline:DescribeObjects",
"datapipeline:DescribePipelines",
"datapipeline:GetPipelineDefinition",
"datapipeline:ListPipelines",
"datapipeline:PutPipelineDefinition",
"datapipeline:QueryObjects",
"dynamodb:*",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeInternetGateways",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVpcs",
"elasticache:*",
"iam:ListRoles",
"iam:GetRole",
"kms:ListKeys",
"lambda:CreateEventSourceMapping",
"lambda:CreateFunction",
"lambda:DeleteEventSourceMapping",
"lambda:DeleteFunction",
"lambda:GetFunctionConfiguration",
"lambda:ListEventSourceMappings",
"lambda:ListFunctions",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:FilterLogEvents",
"logs:GetLogEvents",
"logs:Create*",
"logs:PutLogEvents",
"logs:PutMetricFilter",
"rds:*",
"redshift:*",
"s3:CreateBucket",
"sns:CreateTopic",
"sns:DeleteTopic",
"sns:Get*",
"sns:List*",
"sns:SetTopicAttributes",
"sns:Subscribe",
"sns:Unsubscribe"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:AbortMultipartUpload",
"s3:DeleteObject*",
"s3:Get*",
"s3:List*",
"s3:PutAccelerateConfiguration",
"s3:PutBucketTagging",
"s3:PutBucketVersioning",
"s3:PutBucketWebsite",
"s3:PutLifecycleConfiguration",
"s3:PutReplicationConfiguration",
"s3:PutObject*",
"s3:Replicate*",
"s3:RestoreObject"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:GetRole",
"iam:PassRole"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/rds-monitoring-role",
"arn:aws:iam::*:role/rdbms-lambda-access",
"arn:aws:iam::*:role/lambda_exec_role",
"arn:aws:iam::*:role/lambda-dynamodb-*",
"arn:aws:iam::*:role/lambda-vpc-execution-role",
"arn:aws:iam::*:role/DataPipelineDefaultRole",
"arn:aws:iam::*:role/DataPipelineDefaultResourceRole"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/job-function/",
"PolicyId": "ANPAIGBMAW4VUQKOQNVT6",
"PolicyName": "DatabaseAdministrator",
"UpdateDate": "2016-11-10T17:25:43+00:00",
"VersionId": "v1"
},
"IAMFullAccess": {
"Arn": "arn:aws:iam::aws:policy/IAMFullAccess",
"AttachmentCount": 2,
"CreateDate": "2015-02-06T18:40:38+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "iam:*",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI7XKCFMBPM3QQRRVQ",
"PolicyName": "IAMFullAccess",
"UpdateDate": "2015-02-06T18:40:38+00:00",
"VersionId": "v1"
},
"IAMReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/IAMReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2016-09-06T17:06:37+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"iam:GenerateCredentialReport",
"iam:GenerateServiceLastAccessedDetails",
"iam:Get*",
"iam:List*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJKSO7NDY4T57MWDSQ",
"PolicyName": "IAMReadOnlyAccess",
"UpdateDate": "2016-09-06T17:06:37+00:00",
"VersionId": "v3"
},
"IAMSelfManageServiceSpecificCredentials": {
"Arn": "arn:aws:iam::aws:policy/IAMSelfManageServiceSpecificCredentials",
"AttachmentCount": 0,
"CreateDate": "2016-12-22T17:25:18+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"iam:CreateServiceSpecificCredential",
"iam:ListServiceSpecificCredentials",
"iam:UpdateServiceSpecificCredential",
"iam:DeleteServiceSpecificCredential",
"iam:ResetServiceSpecificCredential"
],
"Effect": "Allow",
"Resource": "arn:aws:iam::*:user/${aws:username}"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAI4VT74EMXK2PMQJM2",
"PolicyName": "IAMSelfManageServiceSpecificCredentials",
"UpdateDate": "2016-12-22T17:25:18+00:00",
"VersionId": "v1"
},
"IAMUserChangePassword": {
"Arn": "arn:aws:iam::aws:policy/IAMUserChangePassword",
"AttachmentCount": 1,
"CreateDate": "2016-11-15T23:18:55+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"iam:ChangePassword"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:user/${aws:username}"
]
},
{
"Action": [
"iam:GetAccountPasswordPolicy"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ4L4MM2A7QIEB56MS",
"PolicyName": "IAMUserChangePassword",
"UpdateDate": "2016-11-15T23:18:55+00:00",
"VersionId": "v2"
},
"IAMUserSSHKeys": {
"Arn": "arn:aws:iam::aws:policy/IAMUserSSHKeys",
"AttachmentCount": 1,
"CreateDate": "2015-07-09T17:08:54+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"iam:DeleteSSHPublicKey",
"iam:GetSSHPublicKey",
"iam:ListSSHPublicKeys",
"iam:UpdateSSHPublicKey",
"iam:UploadSSHPublicKey"
],
"Effect": "Allow",
"Resource": "arn:aws:iam::*:user/${aws:username}"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJTSHUA4UXGXU7ANUA",
"PolicyName": "IAMUserSSHKeys",
"UpdateDate": "2015-07-09T17:08:54+00:00",
"VersionId": "v1"
},
"NetworkAdministrator": {
"Arn": "arn:aws:iam::aws:policy/job-function/NetworkAdministrator",
"AttachmentCount": 0,
"CreateDate": "2017-03-20T18:44:58+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"autoscaling:Describe*",
"ec2:AllocateAddress",
"ec2:AssignPrivateIpAddresses",
"ec2:AssociateAddress",
"ec2:AssociateDhcpOptions",
"ec2:AssociateRouteTable",
"ec2:AttachInternetGateway",
"ec2:AttachNetworkInterface",
"ec2:AttachVpnGateway",
"ec2:CreateCustomerGateway",
"ec2:CreateDhcpOptions",
"ec2:CreateFlowLogs",
"ec2:CreateInternetGateway",
"ec2:CreateNatGateway",
"ec2:CreateNetworkAcl",
"ec2:CreateNetworkAcl",
"ec2:CreateNetworkAclEntry",
"ec2:CreateNetworkInterface",
"ec2:CreateRoute",
"ec2:CreateRouteTable",
"ec2:CreateSecurityGroup",
"ec2:CreateSubnet",
"ec2:CreateTags",
"ec2:CreateVpc",
"ec2:CreateVpcEndpoint",
"ec2:CreateVpnConnection",
"ec2:CreateVpnConnectionRoute",
"ec2:CreateVpnGateway",
"ec2:CreatePlacementGroup",
"ec2:DeletePlacementGroup",
"ec2:DescribePlacementGroups",
"ec2:DeleteFlowLogs",
"ec2:DeleteNatGateway",
"ec2:DeleteNetworkInterface",
"ec2:DeleteSubnet",
"ec2:DeleteTags",
"ec2:DeleteVpc",
"ec2:DeleteVpcEndpoints",
"ec2:DeleteVpnConnection",
"ec2:DeleteVpnConnectionRoute",
"ec2:DeleteVpnGateway",
"ec2:DescribeAddresses",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeClassicLinkInstances",
"ec2:DescribeCustomerGateways",
"ec2:DescribeVpcClassicLinkDnsSupport",
"ec2:DescribeDhcpOptions",
"ec2:DescribeFlowLogs",
"ec2:DescribeInstances",
"ec2:DescribeInternetGateways",
"ec2:DescribeKeyPairs",
"ec2:DescribeMovingAddresses",
"ec2:DescribeNatGateways",
"ec2:DescribeNetworkAcls",
"ec2:DescribeNetworkInterfaceAttribute",
"ec2:DescribeNetworkInterfaces",
"ec2:DescribePrefixLists",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVpcAttribute",
"ec2:DescribeVpcClassicLink",
"ec2:DescribeVpcEndpoints",
"ec2:DescribeVpcEndpointServices",
"ec2:DescribeVpcPeeringConnections",
"ec2:DescribeVpcs",
"ec2:DescribeVpnConnections",
"ec2:DescribeVpnGateways",
"ec2:DetachInternetGateway",
"ec2:DetachNetworkInterface",
"ec2:DetachVpnGateway",
"ec2:DisableVgwRoutePropagation",
"ec2:DisassociateAddress",
"ec2:DisassociateRouteTable",
"ec2:EnableVgwRoutePropagation",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ModifySubnetAttribute",
"ec2:ModifyVpcAttribute",
"ec2:ModifyVpcEndpoint",
"ec2:MoveAddressToVpc",
"ec2:ReleaseAddress",
"ec2:ReplaceNetworkAclAssociation",
"ec2:ReplaceNetworkAclEntry",
"ec2:ReplaceRoute",
"ec2:ReplaceRouteTableAssociation",
"ec2:ResetNetworkInterfaceAttribute",
"ec2:RestoreAddressToClassic",
"ec2:UnassignPrivateIpAddresses",
"directconnect:*",
"route53:*",
"route53domains:*",
"cloudfront:ListDistributions",
"elasticloadbalancing:*",
"elasticbeanstalk:Describe*",
"elasticbeanstalk:List*",
"elasticbeanstalk:RetrieveEnvironmentInfo",
"elasticbeanstalk:RequestEnvironmentInfo",
"sns:ListTopics",
"sns:ListSubscriptionsByTopic",
"sns:CreateTopic",
"cloudwatch:DescribeAlarms",
"cloudwatch:PutMetricAlarm",
"cloudwatch:DeleteAlarms",
"cloudwatch:GetMetricStatistics",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:GetLogEvents"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:AcceptVpcPeeringConnection",
"ec2:AttachClassicLinkVpc",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateVpcPeeringConnection",
"ec2:DeleteCustomerGateway",
"ec2:DeleteDhcpOptions",
"ec2:DeleteInternetGateway",
"ec2:DeleteNetworkAcl",
"ec2:DeleteNetworkAclEntry",
"ec2:DeleteRoute",
"ec2:DeleteRouteTable",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DeleteVpcPeeringConnection",
"ec2:DetachClassicLinkVpc",
"ec2:DisableVpcClassicLink",
"ec2:EnableVpcClassicLink",
"ec2:GetConsoleScreenshot",
"ec2:RejectVpcPeeringConnection",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetBucketWebsiteConfiguration"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:GetRole",
"iam:ListRoles",
"iam:PassRole"
],
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/flow-logs-*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/job-function/",
"PolicyId": "ANPAJPNMADZFJCVPJVZA2",
"PolicyName": "NetworkAdministrator",
"UpdateDate": "2017-03-20T18:44:58+00:00",
"VersionId": "v2"
},
"PowerUserAccess": {
"Arn": "arn:aws:iam::aws:policy/PowerUserAccess",
"AttachmentCount": 0,
"CreateDate": "2016-12-06T18:11:16+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Effect": "Allow",
"NotAction": [
"iam:*",
"organizations:*"
],
"Resource": "*"
},
{
"Action": "organizations:DescribeOrganization",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJYRXTHIB4FOVS3ZXS",
"PolicyName": "PowerUserAccess",
"UpdateDate": "2016-12-06T18:11:16+00:00",
"VersionId": "v2"
},
"QuickSightAccessForS3StorageManagementAnalyticsReadOnly": {
"Arn": "arn:aws:iam::aws:policy/service-role/QuickSightAccessForS3StorageManagementAnalyticsReadOnly",
"AttachmentCount": 0,
"CreateDate": "2017-07-21T00:02:14+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"s3:GetObject",
"s3:GetObjectMetadata"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::s3-analytics-export-shared-*"
]
},
{
"Action": [
"s3:GetAnalyticsConfiguration",
"s3:ListAllMyBuckets",
"s3:GetBucketLocation"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIFWG3L3WDMR4I7ZJW",
"PolicyName": "QuickSightAccessForS3StorageManagementAnalyticsReadOnly",
"UpdateDate": "2017-07-21T00:02:14+00:00",
"VersionId": "v3"
},
"RDSCloudHsmAuthorizationRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/RDSCloudHsmAuthorizationRole",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:29+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"cloudhsm:CreateLunaClient",
"cloudhsm:GetClientConfiguration",
"cloudhsm:DeleteLunaClient",
"cloudhsm:DescribeLunaClient",
"cloudhsm:ModifyLunaClient",
"cloudhsm:DescribeHapg",
"cloudhsm:ModifyHapg",
"cloudhsm:GetConfig"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAIWKFXRLQG2ROKKXLE",
"PolicyName": "RDSCloudHsmAuthorizationRole",
"UpdateDate": "2015-02-06T18:41:29+00:00",
"VersionId": "v1"
},
"ReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/ReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-07-20T17:43:06+00:00",
"DefaultVersionId": "v29",
"Document": {
"Statement": [
{
"Action": [
"acm:Describe*",
"acm:Get*",
"acm:List*",
"apigateway:GET",
"application-autoscaling:Describe*",
"appstream:Describe*",
"appstream:Get*",
"appstream:List*",
"athena:List*",
"athena:Batch*",
"athena:Get*",
"autoscaling:Describe*",
"batch:List*",
"batch:Describe*",
"clouddirectory:List*",
"clouddirectory:BatchRead",
"clouddirectory:Get*",
"clouddirectory:LookupPolicy",
"cloudformation:Describe*",
"cloudformation:Get*",
"cloudformation:List*",
"cloudformation:Estimate*",
"cloudformation:Preview*",
"cloudfront:Get*",
"cloudfront:List*",
"cloudhsm:List*",
"cloudhsm:Describe*",
"cloudhsm:Get*",
"cloudsearch:Describe*",
"cloudsearch:List*",
"cloudtrail:Describe*",
"cloudtrail:Get*",
"cloudtrail:List*",
"cloudtrail:LookupEvents",
"cloudwatch:Describe*",
"cloudwatch:Get*",
"cloudwatch:List*",
"codebuild:BatchGet*",
"codebuild:List*",
"codecommit:BatchGet*",
"codecommit:Get*",
"codecommit:GitPull",
"codecommit:List*",
"codedeploy:BatchGet*",
"codedeploy:Get*",
"codedeploy:List*",
"codepipeline:List*",
"codepipeline:Get*",
"codestar:List*",
"codestar:Describe*",
"codestar:Get*",
"codestar:Verify*",
"cognito-identity:List*",
"cognito-identity:Describe*",
"cognito-identity:Lookup*",
"cognito-sync:List*",
"cognito-sync:Describe*",
"cognito-sync:Get*",
"cognito-sync:QueryRecords",
"cognito-idp:AdminList*",
"cognito-idp:List*",
"cognito-idp:Describe*",
"cognito-idp:Get*",
"config:Deliver*",
"config:Describe*",
"config:Get*",
"config:List*",
"connect:List*",
"connect:Describe*",
"connect:Get*",
"datapipeline:Describe*",
"datapipeline:EvaluateExpression",
"datapipeline:Get*",
"datapipeline:List*",
"datapipeline:QueryObjects",
"datapipeline:Validate*",
"directconnect:Describe*",
"directconnect:Confirm*",
"devicefarm:List*",
"devicefarm:Get*",
"discovery:Describe*",
"discovery:List*",
"discovery:Get*",
"dms:Describe*",
"dms:List*",
"dms:Test*",
"ds:Check*",
"ds:Describe*",
"ds:Get*",
"ds:List*",
"ds:Verify*",
"dynamodb:BatchGet*",
"dynamodb:Describe*",
"dynamodb:Get*",
"dynamodb:List*",
"dynamodb:Query",
"dynamodb:Scan",
"ec2:Describe*",
"ec2:Get*",
"ec2messages:Get*",
"ecr:BatchCheck*",
"ecr:BatchGet*",
"ecr:Describe*",
"ecr:Get*",
"ecr:List*",
"ecs:Describe*",
"ecs:List*",
"elasticache:Describe*",
"elasticache:List*",
"elasticbeanstalk:Check*",
"elasticbeanstalk:Describe*",
"elasticbeanstalk:List*",
"elasticbeanstalk:Request*",
"elasticbeanstalk:Retrieve*",
"elasticbeanstalk:Validate*",
"elasticfilesystem:Describe*",
"elasticloadbalancing:Describe*",
"elasticmapreduce:Describe*",
"elasticmapreduce:List*",
"elasticmapreduce:View*",
"elastictranscoder:List*",
"elastictranscoder:Read*",
"es:Describe*",
"es:List*",
"es:ESHttpGet",
"es:ESHttpHead",
"events:Describe*",
"events:List*",
"events:Test*",
"firehose:Describe*",
"firehose:List*",
"gamelift:List*",
"gamelift:Get*",
"gamelift:Describe*",
"gamelift:RequestUploadCredentials",
"gamelift:ResolveAlias",
"gamelift:Search*",
"glacier:List*",
"glacier:Describe*",
"glacier:Get*",
"health:Describe*",
"health:Get*",
"health:List*",
"iam:Generate*",
"iam:Get*",
"iam:List*",
"iam:Simulate*",
"importexport:Get*",
"importexport:List*",
"inspector:Describe*",
"inspector:Get*",
"inspector:List*",
"inspector:Preview*",
"inspector:LocalizeText",
"iot:Describe*",
"iot:Get*",
"iot:List*",
"kinesisanalytics:Describe*",
"kinesisanalytics:Discover*",
"kinesisanalytics:Get*",
"kinesisanalytics:List*",
"kinesis:Describe*",
"kinesis:Get*",
"kinesis:List*",
"kms:Describe*",
"kms:Get*",
"kms:List*",
"lambda:List*",
"lambda:Get*",
"lex:Get*",
"lightsail:Get*",
"lightsail:Is*",
"lightsail:Download*",
"logs:Describe*",
"logs:Get*",
"logs:FilterLogEvents",
"logs:ListTagsLogGroup",
"logs:TestMetricFilter",
"machinelearning:Describe*",
"machinelearning:Get*",
"mobileanalytics:Get*",
"mobilehub:Get*",
"mobilehub:List*",
"mobilehub:Validate*",
"mobilehub:Verify*",
"mobiletargeting:Get*",
"opsworks:Describe*",
"opsworks:Get*",
"opsworks-cm:Describe*",
"organizations:Describe*",
"organizations:List*",
"polly:Describe*",
"polly:Get*",
"polly:List*",
"polly:SynthesizeSpeech",
"rekognition:CompareFaces",
"rekognition:Detect*",
"rekognition:List*",
"rekognition:Search*",
"rds:Describe*",
"rds:List*",
"rds:Download*",
"redshift:Describe*",
"redshift:View*",
"redshift:Get*",
"route53:Get*",
"route53:List*",
"route53:Test*",
"route53domains:Check*",
"route53domains:Get*",
"route53domains:List*",
"route53domains:View*",
"s3:Get*",
"s3:List*",
"s3:Head*",
"sdb:Get*",
"sdb:List*",
"sdb:Select*",
"servicecatalog:List*",
"servicecatalog:Scan*",
"servicecatalog:Search*",
"servicecatalog:Describe*",
"ses:Get*",
"ses:List*",
"ses:Describe*",
"ses:Verify*",
"shield:Describe*",
"shield:List*",
"sns:Get*",
"sns:List*",
"sns:Check*",
"sqs:Get*",
"sqs:List*",
"sqs:Receive*",
"ssm:Describe*",
"ssm:Get*",
"ssm:List*",
"states:List*",
"states:Describe*",
"states:GetExecutionHistory",
"storagegateway:Describe*",
"storagegateway:List*",
"sts:Get*",
"swf:Count*",
"swf:Describe*",
"swf:Get*",
"swf:List*",
"tag:Get*",
"trustedadvisor:Describe*",
"waf:Get*",
"waf:List*",
"waf-regional:List*",
"waf-regional:Get*",
"workdocs:Describe*",
"workdocs:Get*",
"workdocs:CheckAlias",
"workmail:Describe*",
"workmail:Get*",
"workmail:List*",
"workmail:Search*",
"workspaces:Describe*",
"xray:BatchGet*",
"xray:Get*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAILL3HVNFSB6DCOWYQ",
"PolicyName": "ReadOnlyAccess",
"UpdateDate": "2017-07-20T17:43:06+00:00",
"VersionId": "v29"
},
"ResourceGroupsandTagEditorFullAccess": {
"Arn": "arn:aws:iam::aws:policy/ResourceGroupsandTagEditorFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:39:53+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"tag:getResources",
"tag:getTagKeys",
"tag:getTagValues",
"tag:addResourceTags",
"tag:removeResourceTags"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJNOS54ZFXN4T2Y34A",
"PolicyName": "ResourceGroupsandTagEditorFullAccess",
"UpdateDate": "2015-02-06T18:39:53+00:00",
"VersionId": "v1"
},
"ResourceGroupsandTagEditorReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/ResourceGroupsandTagEditorReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:39:54+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"tag:getResources",
"tag:getTagKeys",
"tag:getTagValues"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJHXQTPI5I5JKAIU74",
"PolicyName": "ResourceGroupsandTagEditorReadOnlyAccess",
"UpdateDate": "2015-02-06T18:39:54+00:00",
"VersionId": "v1"
},
"SecurityAudit": {
"Arn": "arn:aws:iam::aws:policy/SecurityAudit",
"AttachmentCount": 0,
"CreateDate": "2017-07-12T20:16:44+00:00",
"DefaultVersionId": "v12",
"Document": {
"Statement": [
{
"Action": [
"acm:ListCertificates",
"acm:DescribeCertificate",
"cloudformation:getStackPolicy",
"logs:describeLogGroups",
"logs:describeMetricFilters",
"autoscaling:Describe*",
"cloudformation:DescribeStack*",
"cloudformation:GetTemplate",
"cloudformation:ListStack*",
"cloudfront:Get*",
"cloudfront:List*",
"cloudtrail:DescribeTrails",
"cloudtrail:GetTrailStatus",
"cloudtrail:ListTags",
"cloudwatch:Describe*",
"codecommit:BatchGetRepositories",
"codecommit:GetBranch",
"codecommit:GetObjectIdentifier",
"codecommit:GetRepository",
"codecommit:List*",
"codedeploy:Batch*",
"codedeploy:Get*",
"codedeploy:List*",
"config:Deliver*",
"config:Describe*",
"config:Get*",
"datapipeline:DescribeObjects",
"datapipeline:DescribePipelines",
"datapipeline:EvaluateExpression",
"datapipeline:GetPipelineDefinition",
"datapipeline:ListPipelines",
"datapipeline:QueryObjects",
"datapipeline:ValidatePipelineDefinition",
"directconnect:Describe*",
"dynamodb:ListTables",
"ec2:Describe*",
"ecs:Describe*",
"ecs:List*",
"elasticache:Describe*",
"elasticbeanstalk:Describe*",
"elasticloadbalancing:Describe*",
"elasticmapreduce:DescribeJobFlows",
"elasticmapreduce:ListClusters",
"elasticmapreduce:ListInstances",
"es:ListDomainNames",
"es:Describe*",
"firehose:Describe*",
"firehose:List*",
"glacier:DescribeVault",
"glacier:GetVaultAccessPolicy",
"glacier:ListVaults",
"iam:GenerateCredentialReport",
"iam:Get*",
"iam:List*",
"kms:Describe*",
"kms:Get*",
"kms:List*",
"lambda:GetPolicy",
"lambda:ListFunctions",
"rds:Describe*",
"rds:DownloadDBLogFilePortion",
"rds:ListTagsForResource",
"redshift:Describe*",
"route53:GetChange",
"route53:GetCheckerIpRanges",
"route53:GetGeoLocation",
"route53:GetHealthCheck",
"route53:GetHealthCheckCount",
"route53:GetHealthCheckLastFailureReason",
"route53:GetHostedZone",
"route53:GetHostedZoneCount",
"route53:GetReusableDelegationSet",
"route53:ListGeoLocations",
"route53:ListHealthChecks",
"route53:ListHostedZones",
"route53:ListHostedZonesByName",
"route53:ListResourceRecordSets",
"route53:ListReusableDelegationSets",
"route53:ListTagsForResource",
"route53:ListTagsForResources",
"route53domains:GetDomainDetail",
"route53domains:GetOperationDetail",
"route53domains:ListDomains",
"route53domains:ListOperations",
"route53domains:ListTagsForDomain",
"s3:GetBucket*",
"s3:GetAccelerateConfiguration",
"s3:GetAnalyticsConfiguration",
"s3:GetInventoryConfiguration",
"s3:GetMetricsConfiguration",
"s3:GetReplicationConfiguration",
"s3:GetLifecycleConfiguration",
"s3:GetObjectAcl",
"s3:GetObjectVersionAcl",
"s3:ListAllMyBuckets",
"sdb:DomainMetadata",
"sdb:ListDomains",
"ses:GetIdentityDkimAttributes",
"ses:GetIdentityVerificationAttributes",
"ses:ListIdentities",
"sns:GetTopicAttributes",
"sns:ListSubscriptionsByTopic",
"sns:ListTopics",
"sqs:GetQueueAttributes",
"sqs:ListQueues",
"tag:GetResources",
"tag:GetTagKeys"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIX2T3QCXHR2OGGCTO",
"PolicyName": "SecurityAudit",
"UpdateDate": "2017-07-12T20:16:44+00:00",
"VersionId": "v12"
},
"ServerMigrationConnector": {
"Arn": "arn:aws:iam::aws:policy/ServerMigrationConnector",
"AttachmentCount": 0,
"CreateDate": "2016-10-24T21:45:56+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": "iam:GetUser",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"sms:SendMessage",
"sms:GetMessages"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"s3:CreateBucket",
"s3:DeleteBucket",
"s3:DeleteObject",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:ListBucket",
"s3:PutObject",
"s3:PutObjectAcl",
"s3:PutLifecycleConfiguration",
"s3:AbortMultipartUpload",
"s3:ListBucketMultipartUploads",
"s3:ListMultipartUploadParts"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::sms-b-*",
"arn:aws:s3:::import-to-ec2-*",
"arn:aws:s3:::server-migration-service-upgrade",
"arn:aws:s3:::server-migration-service-upgrade/*",
"arn:aws:s3:::connector-platform-upgrade-info/*",
"arn:aws:s3:::connector-platform-upgrade-info",
"arn:aws:s3:::connector-platform-upgrade-bundles/*",
"arn:aws:s3:::connector-platform-upgrade-bundles",
"arn:aws:s3:::connector-platform-release-notes/*",
"arn:aws:s3:::connector-platform-release-notes"
]
},
{
"Action": "awsconnector:*",
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"SNS:Publish"
],
"Effect": "Allow",
"Resource": "arn:aws:sns:*:*:metrics-sns-topic-for-*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJKZRWXIPK5HSG3QDQ",
"PolicyName": "ServerMigrationConnector",
"UpdateDate": "2016-10-24T21:45:56+00:00",
"VersionId": "v1"
},
"ServerMigrationServiceRole": {
"Arn": "arn:aws:iam::aws:policy/service-role/ServerMigrationServiceRole",
"AttachmentCount": 0,
"CreateDate": "2017-06-16T18:02:04+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"ec2:ModifySnapshotAttribute",
"ec2:CopySnapshot",
"ec2:CopyImage",
"ec2:Describe*",
"ec2:DeleteSnapshot",
"ec2:DeregisterImage",
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJMBH3M6BO63XFW2D4",
"PolicyName": "ServerMigrationServiceRole",
"UpdateDate": "2017-06-16T18:02:04+00:00",
"VersionId": "v2"
},
"ServiceCatalogAdminFullAccess": {
"Arn": "arn:aws:iam::aws:policy/ServiceCatalogAdminFullAccess",
"AttachmentCount": 0,
"CreateDate": "2016-11-11T18:40:24+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"catalog-admin:*",
"catalog-user:*",
"cloudformation:CreateStack",
"cloudformation:CreateUploadBucket",
"cloudformation:DeleteStack",
"cloudformation:DescribeStackEvents",
"cloudformation:DescribeStacks",
"cloudformation:GetTemplateSummary",
"cloudformation:SetStackPolicy",
"cloudformation:ValidateTemplate",
"cloudformation:UpdateStack",
"iam:GetGroup",
"iam:GetRole",
"iam:GetUser",
"iam:ListGroups",
"iam:ListRoles",
"iam:ListUsers",
"iam:PassRole",
"s3:CreateBucket",
"s3:GetObject",
"s3:PutObject",
"servicecatalog:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIKTX42IAS75B7B7BY",
"PolicyName": "ServiceCatalogAdminFullAccess",
"UpdateDate": "2016-11-11T18:40:24+00:00",
"VersionId": "v2"
},
"ServiceCatalogAdminReadOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/ServiceCatalogAdminReadOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-08T18:57:36+00:00",
"DefaultVersionId": "v5",
"Document": {
"Statement": [
{
"Action": [
"catalog-admin:DescribeConstraints",
"catalog-admin:DescribeListingForProduct",
"catalog-admin:DescribeListings",
"catalog-admin:DescribePortfolios",
"catalog-admin:DescribeProductVersions",
"catalog-admin:GetPortfolioCount",
"catalog-admin:GetPortfolios",
"catalog-admin:GetProductCounts",
"catalog-admin:ListAllPortfolioConstraints",
"catalog-admin:ListPortfolioConstraints",
"catalog-admin:ListPortfolios",
"catalog-admin:ListPrincipalConstraints",
"catalog-admin:ListProductConstraints",
"catalog-admin:ListResourceUsers",
"catalog-admin:ListTagsForResource",
"catalog-admin:SearchListings",
"catalog-user:*",
"cloudformation:DescribeStackEvents",
"cloudformation:DescribeStacks",
"cloudformation:GetTemplateSummary",
"iam:GetGroup",
"iam:GetRole",
"iam:GetUser",
"iam:ListGroups",
"iam:ListRoles",
"iam:ListUsers",
"s3:GetObject",
"servicecatalog:DescribeTagOption",
"servicecatalog:GetTagOptionMigrationStatus",
"servicecatalog:ListResourcesForTagOption",
"servicecatalog:ListTagOptions",
"servicecatalog:AccountLevelDescribeRecord",
"servicecatalog:AccountLevelListRecordHistory",
"servicecatalog:AccountLevelScanProvisionedProducts",
"servicecatalog:DescribeProduct",
"servicecatalog:DescribeProductView",
"servicecatalog:DescribeProvisioningParameters",
"servicecatalog:DescribeProvisionedProduct",
"servicecatalog:DescribeRecord",
"servicecatalog:ListLaunchPaths",
"servicecatalog:ListRecordHistory",
"servicecatalog:ScanProvisionedProducts",
"servicecatalog:SearchProducts",
"servicecatalog:DescribeConstraint",
"servicecatalog:DescribeProductAsAdmin",
"servicecatalog:DescribePortfolio",
"servicecatalog:DescribeProvisioningArtifact",
"servicecatalog:ListAcceptedPortfolioShares",
"servicecatalog:ListConstraintsForPortfolio",
"servicecatalog:ListPortfolioAccess",
"servicecatalog:ListPortfolios",
"servicecatalog:ListPortfoliosForProduct",
"servicecatalog:ListPrincipalsForPortfolio",
"servicecatalog:ListProvisioningArtifacts",
"servicecatalog:SearchProductsAsAdmin"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ7XOUSS75M4LIPKO4",
"PolicyName": "ServiceCatalogAdminReadOnlyAccess",
"UpdateDate": "2017-08-08T18:57:36+00:00",
"VersionId": "v5"
},
"ServiceCatalogEndUserAccess": {
"Arn": "arn:aws:iam::aws:policy/ServiceCatalogEndUserAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-08T18:58:57+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"catalog-user:*",
"s3:GetObject",
"servicecatalog:DescribeProduct",
"servicecatalog:DescribeProductView",
"servicecatalog:DescribeProvisioningParameters",
"servicecatalog:ListLaunchPaths",
"servicecatalog:SearchProducts"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"servicecatalog:ListRecordHistory",
"servicecatalog:DescribeProvisionedProduct",
"servicecatalog:DescribeRecord",
"servicecatalog:ScanProvisionedProducts"
],
"Condition": {
"StringEquals": {
"servicecatalog:userLevel": "self"
}
},
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJ56OMCO72RI4J5FSA",
"PolicyName": "ServiceCatalogEndUserAccess",
"UpdateDate": "2017-08-08T18:58:57+00:00",
"VersionId": "v4"
},
"ServiceCatalogEndUserFullAccess": {
"Arn": "arn:aws:iam::aws:policy/ServiceCatalogEndUserFullAccess",
"AttachmentCount": 0,
"CreateDate": "2017-08-08T18:58:54+00:00",
"DefaultVersionId": "v4",
"Document": {
"Statement": [
{
"Action": [
"catalog-user:*",
"cloudformation:CreateStack",
"cloudformation:DeleteStack",
"cloudformation:DescribeStackEvents",
"cloudformation:DescribeStacks",
"cloudformation:GetTemplateSummary",
"cloudformation:SetStackPolicy",
"cloudformation:ValidateTemplate",
"cloudformation:UpdateStack",
"servicecatalog:DescribeProduct",
"servicecatalog:DescribeProductView",
"servicecatalog:DescribeProvisioningParameters",
"servicecatalog:ListLaunchPaths",
"servicecatalog:ProvisionProduct",
"servicecatalog:SearchProducts",
"s3:GetObject"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"servicecatalog:DescribeProvisionedProduct",
"servicecatalog:DescribeRecord",
"servicecatalog:ListRecordHistory",
"servicecatalog:ScanProvisionedProducts",
"servicecatalog:TerminateProvisionedProduct",
"servicecatalog:UpdateProvisionedProduct"
],
"Condition": {
"StringEquals": {
"servicecatalog:userLevel": "self"
}
},
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAJIW7AFFOONVKW75KU",
"PolicyName": "ServiceCatalogEndUserFullAccess",
"UpdateDate": "2017-08-08T18:58:54+00:00",
"VersionId": "v4"
},
"SimpleWorkflowFullAccess": {
"Arn": "arn:aws:iam::aws:policy/SimpleWorkflowFullAccess",
"AttachmentCount": 0,
"CreateDate": "2015-02-06T18:41:04+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"swf:*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/",
"PolicyId": "ANPAIFE3AV6VE7EANYBVM",
"PolicyName": "SimpleWorkflowFullAccess",
"UpdateDate": "2015-02-06T18:41:04+00:00",
"VersionId": "v1"
},
"SupportUser": {
"Arn": "arn:aws:iam::aws:policy/job-function/SupportUser",
"AttachmentCount": 0,
"CreateDate": "2017-05-17T23:11:51+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"support:*",
"acm:DescribeCertificate",
"acm:GetCertificate",
"acm:List*",
"apigateway:GET",
"appstream:Get*",
"autoscaling:Describe*",
"aws-marketplace:ViewSubscriptions",
"cloudformation:Describe*",
"cloudformation:Get*",
"cloudformation:List*",
"cloudformation:EstimateTemplateCost",
"cloudfront:Get*",
"cloudfront:List*",
"cloudsearch:Describe*",
"cloudsearch:List*",
"cloudtrail:DescribeTrails",
"cloudtrail:GetTrailStatus",
"cloudtrail:LookupEvents",
"cloudtrail:ListTags",
"cloudtrail:ListPublicKeys",
"cloudwatch:Describe*",
"cloudwatch:Get*",
"cloudwatch:List*",
"codecommit:BatchGetRepositories",
"codecommit:Get*",
"codecommit:List*",
"codedeploy:Batch*",
"codedeploy:Get*",
"codedeploy:List*",
"codepipeline:AcknowledgeJob",
"codepipeline:AcknowledgeThirdPartyJob",
"codepipeline:ListActionTypes",
"codepipeline:ListPipelines",
"codepipeline:PollForJobs",
"codepipeline:PollForThirdPartyJobs",
"codepipeline:GetPipelineState",
"codepipeline:GetPipeline",
"cognito-identity:List*",
"cognito-identity:LookupDeveloperIdentity",
"cognito-identity:Describe*",
"cognito-idp:Describe*",
"cognito-sync:Describe*",
"cognito-sync:GetBulkPublishDetails",
"cognito-sync:GetCognitoEvents",
"cognito-sync:GetIdentityPoolConfiguration",
"cognito-sync:List*",
"config:DescribeConfigurationRecorders",
"config:DescribeConfigurationRecorderStatus",
"config:DescribeConfigRuleEvaluationStatus",
"config:DescribeConfigRules",
"config:DescribeDeliveryChannels",
"config:DescribeDeliveryChannelStatus",
"config:GetResourceConfigHistory",
"config:ListDiscoveredResources",
"datapipeline:DescribeObjects",
"datapipeline:DescribePipelines",
"datapipeline:GetPipelineDefinition",
"datapipeline:ListPipelines",
"datapipeline:QueryObjects",
"datapipeline:ReportTaskProgress",
"datapipeline:ReportTaskRunnerHeartbeat",
"devicefarm:List*",
"devicefarm:Get*",
"directconnect:Describe*",
"discovery:Describe*",
"discovery:ListConfigurations",
"dms:Describe*",
"dms:List*",
"ds:DescribeDirectories",
"ds:DescribeSnapshots",
"ds:GetDirectoryLimits",
"ds:GetSnapshotLimits",
"ds:ListAuthorizedApplications",
"dynamodb:DescribeLimits",
"dynamodb:DescribeTable",
"dynamodb:ListTables",
"ec2:Describe*",
"ec2:DescribeHosts",
"ec2:describeIdentityIdFormat",
"ec2:DescribeIdFormat",
"ec2:DescribeInstanceAttribute",
"ec2:DescribeNatGateways",
"ec2:DescribeReservedInstancesModifications",
"ec2:DescribeTags",
"ec2:GetFlowLogsCount",
"ecr:GetRepositoryPolicy",
"ecr:BatchCheckLayerAvailability",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecs:Describe*",
"ecs:List*",
"elasticache:Describe*",
"elasticache:List*",
"elasticbeanstalk:Check*",
"elasticbeanstalk:Describe*",
"elasticbeanstalk:List*",
"elasticbeanstalk:RequestEnvironmentInfo",
"elasticbeanstalk:RetrieveEnvironmentInfo",
"elasticbeanstalk:ValidateConfigurationSettings",
"elasticfilesystem:Describe*",
"elasticloadbalancing:Describe*",
"elasticmapreduce:Describe*",
"elasticmapreduce:List*",
"elastictranscoder:List*",
"elastictranscoder:ReadJob",
"elasticfilesystem:DescribeFileSystems",
"es:Describe*",
"es:List*",
"es:ESHttpGet",
"es:ESHttpHead",
"events:DescribeRule",
"events:List*",
"events:TestEventPattern",
"firehose:Describe*",
"firehose:List*",
"gamelift:List*",
"gamelift:Describe*",
"glacier:ListVaults",
"glacier:DescribeVault",
"glacier:DescribeJob",
"glacier:Get*",
"glacier:List*",
"iam:GenerateCredentialReport",
"iam:GenerateServiceLastAccessedDetails",
"iam:Get*",
"iam:List*",
"importexport:GetStatus",
"importexport:ListJobs",
"importexport:GetJobDetail",
"inspector:Describe*",
"inspector:List*",
"inspector:GetAssessmentTelemetry",
"inspector:LocalizeText",
"iot:Describe*",
"iot:Get*",
"iot:List*",
"kinesisanalytics:DescribeApplication",
"kinesisanalytics:DiscoverInputSchema",
"kinesisanalytics:GetApplicationState",
"kinesisanalytics:ListApplications",
"kinesis:Describe*",
"kinesis:Get*",
"kinesis:List*",
"kms:Describe*",
"kms:Get*",
"kms:List*",
"lambda:List*",
"lambda:Get*",
"logs:Describe*",
"logs:TestMetricFilter",
"machinelearning:Describe*",
"machinelearning:Get*",
"mobilehub:GetProject",
"mobilehub:List*",
"mobilehub:ValidateProject",
"mobilehub:VerifyServiceRole",
"opsworks:Describe*",
"rds:Describe*",
"rds:ListTagsForResource",
"redshift:Describe*",
"route53:Get*",
"route53:List*",
"route53domains:CheckDomainAvailability",
"route53domains:GetDomainDetail",
"route53domains:GetOperationDetail",
"route53domains:List*",
"s3:List*",
"sdb:GetAttributes",
"sdb:List*",
"sdb:Select*",
"servicecatalog:SearchProducts",
"servicecatalog:DescribeProduct",
"servicecatalog:DescribeProductView",
"servicecatalog:ListLaunchPaths",
"servicecatalog:DescribeProvisioningParameters",
"servicecatalog:ListRecordHistory",
"servicecatalog:DescribeRecord",
"servicecatalog:ScanProvisionedProducts",
"ses:Get*",
"ses:List*",
"sns:Get*",
"sns:List*",
"sqs:GetQueueAttributes",
"sqs:GetQueueUrl",
"sqs:ListQueues",
"sqs:ReceiveMessage",
"ssm:List*",
"ssm:Describe*",
"storagegateway:Describe*",
"storagegateway:List*",
"swf:Count*",
"swf:Describe*",
"swf:Get*",
"swf:List*",
"waf:Get*",
"waf:List*",
"workspaces:Describe*",
"workdocs:Describe*",
"workmail:Describe*",
"workmail:Get*",
"workspaces:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/job-function/",
"PolicyId": "ANPAI3V4GSSN5SJY3P2RO",
"PolicyName": "SupportUser",
"UpdateDate": "2017-05-17T23:11:51+00:00",
"VersionId": "v2"
},
"SystemAdministrator": {
"Arn": "arn:aws:iam::aws:policy/job-function/SystemAdministrator",
"AttachmentCount": 0,
"CreateDate": "2017-03-24T17:45:43+00:00",
"DefaultVersionId": "v2",
"Document": {
"Statement": [
{
"Action": [
"acm:Describe*",
"acm:Get*",
"acm:List*",
"acm:Request*",
"acm:Resend*",
"autoscaling:*",
"cloudtrail:DescribeTrails",
"cloudtrail:GetTrailStatus",
"cloudtrail:ListPublicKeys",
"cloudtrail:ListTags",
"cloudtrail:LookupEvents",
"cloudtrail:StartLogging",
"cloudtrail:StopLogging",
"cloudwatch:*",
"codecommit:BatchGetRepositories",
"codecommit:CreateBranch",
"codecommit:CreateRepository",
"codecommit:Get*",
"codecommit:GitPull",
"codecommit:GitPush",
"codecommit:List*",
"codecommit:Put*",
"codecommit:Test*",
"codecommit:Update*",
"codedeploy:*",
"codepipeline:*",
"config:*",
"ds:*",
"ec2:Allocate*",
"ec2:AssignPrivateIpAddresses*",
"ec2:Associate*",
"ec2:Allocate*",
"ec2:AttachInternetGateway",
"ec2:AttachNetworkInterface",
"ec2:AttachVpnGateway",
"ec2:Bundle*",
"ec2:Cancel*",
"ec2:Copy*",
"ec2:CreateCustomerGateway",
"ec2:CreateDhcpOptions",
"ec2:CreateFlowLogs",
"ec2:CreateImage",
"ec2:CreateInstanceExportTask",
"ec2:CreateInternetGateway",
"ec2:CreateKeyPair",
"ec2:CreateNatGateway",
"ec2:CreateNetworkInterface",
"ec2:CreatePlacementGroup",
"ec2:CreateReservedInstancesListing",
"ec2:CreateRoute",
"ec2:CreateRouteTable",
"ec2:CreateSecurityGroup",
"ec2:CreateSnapshot",
"ec2:CreateSpotDatafeedSubscription",
"ec2:CreateSubnet",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:CreateVpc",
"ec2:CreateVpcEndpoint",
"ec2:CreateVpnConnection",
"ec2:CreateVpnConnectionRoute",
"ec2:CreateVpnGateway",
"ec2:DeleteFlowLogs",
"ec2:DeleteKeyPair",
"ec2:DeleteNatGateway",
"ec2:DeleteNetworkInterface",
"ec2:DeletePlacementGroup",
"ec2:DeleteSnapshot",
"ec2:DeleteSpotDatafeedSubscription",
"ec2:DeleteSubnet",
"ec2:DeleteTags",
"ec2:DeleteVpc",
"ec2:DeleteVpcEndpoints",
"ec2:DeleteVpnConnection",
"ec2:DeleteVpnConnectionRoute",
"ec2:DeleteVpnGateway",
"ec2:DeregisterImage",
"ec2:Describe*",
"ec2:DetachInternetGateway",
"ec2:DetachNetworkInterface",
"ec2:DetachVpnGateway",
"ec2:DisableVgwRoutePropagation",
"ec2:DisableVpcClassicLinkDnsSupport",
"ec2:DisassociateAddress",
"ec2:DisassociateRouteTable",
"ec2:EnableVgwRoutePropagation",
"ec2:EnableVolumeIO",
"ec2:EnableVpcClassicLinkDnsSupport",
"ec2:GetConsoleOutput",
"ec2:GetHostReservationPurchasePreview",
"ec2:GetPasswordData",
"ec2:Import*",
"ec2:Modify*",
"ec2:MonitorInstances",
"ec2:MoveAddressToVpc",
"ec2:Purchase*",
"ec2:RegisterImage",
"ec2:Release*",
"ec2:Replace*",
"ec2:ReportInstanceStatus",
"ec2:Request*",
"ec2:Reset*",
"ec2:RestoreAddressToClassic",
"ec2:RunScheduledInstances",
"ec2:UnassignPrivateIpAddresses",
"ec2:UnmonitorInstances",
"elasticloadbalancing:*",
"events:*",
"iam:GetAccount*",
"iam:GetContextKeys*",
"iam:GetCredentialReport",
"iam:ListAccountAliases",
"iam:ListGroups",
"iam:ListOpenIDConnectProviders",
"iam:ListPolicies",
"iam:ListPoliciesGrantingServiceAccess",
"iam:ListRoles",
"iam:ListSAMLProviders",
"iam:ListServerCertificates",
"iam:Simulate*",
"iam:UpdateServerCertificate",
"iam:UpdateSigningCertificate",
"kinesis:ListStreams",
"kinesis:PutRecord",
"kms:CreateAlias",
"kms:CreateKey",
"kms:DeleteAlias",
"kms:Describe*",
"kms:GenerateRandom",
"kms:Get*",
"kms:List*",
"kms:Encrypt",
"kms:ReEncrypt*",
"lambda:Create*",
"lambda:Delete*",
"lambda:Get*",
"lambda:InvokeFunction",
"lambda:List*",
"lambda:PublishVersion",
"lambda:Update*",
"logs:*",
"rds:Describe*",
"rds:ListTagsForResource",
"route53:*",
"route53domains:*",
"ses:*",
"sns:*",
"sqs:*",
"trustedadvisor:*"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:AcceptVpcPeeringConnection",
"ec2:AttachClassicLinkVpc",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupEgress",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateVpcPeeringConnection",
"ec2:DeleteCustomerGateway",
"ec2:DeleteDhcpOptions",
"ec2:DeleteInternetGateway",
"ec2:DeleteNetworkAcl*",
"ec2:DeleteRoute",
"ec2:DeleteRouteTable",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DeleteVpcPeeringConnection",
"ec2:DetachClassicLinkVpc",
"ec2:DetachVolume",
"ec2:DisableVpcClassicLink",
"ec2:EnableVpcClassicLink",
"ec2:GetConsoleScreenshot",
"ec2:RebootInstances",
"ec2:RejectVpcPeeringConnection",
"ec2:RevokeSecurityGroupEgress",
"ec2:RevokeSecurityGroupIngress",
"ec2:RunInstances",
"ec2:StartInstances",
"ec2:StopInstances",
"ec2:TerminateInstances"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "s3:*",
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:GetAccessKeyLastUsed",
"iam:GetGroup*",
"iam:GetInstanceProfile",
"iam:GetLoginProfile",
"iam:GetOpenIDConnectProvider",
"iam:GetPolicy*",
"iam:GetRole*",
"iam:GetSAMLProvider",
"iam:GetSSHPublicKey",
"iam:GetServerCertificate",
"iam:GetServiceLastAccessed*",
"iam:GetUser*",
"iam:ListAccessKeys",
"iam:ListAttached*",
"iam:ListEntitiesForPolicy",
"iam:ListGroupPolicies",
"iam:ListGroupsForUser",
"iam:ListInstanceProfiles*",
"iam:ListMFADevices",
"iam:ListPolicyVersions",
"iam:ListRolePolicies",
"iam:ListSSHPublicKeys",
"iam:ListSigningCertificates",
"iam:ListUserPolicies",
"iam:Upload*"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:GetRole",
"iam:ListRoles",
"iam:PassRole"
],
"Effect": "Allow",
"Resource": [
"arn:aws:iam::*:role/rds-monitoring-role",
"arn:aws:iam::*:role/ec2-sysadmin-*",
"arn:aws:iam::*:role/ecr-sysadmin-*",
"arn:aws:iam::*:role/lamdba-sysadmin-*"
]
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/job-function/",
"PolicyId": "ANPAITJPEZXCYCBXANDSW",
"PolicyName": "SystemAdministrator",
"UpdateDate": "2017-03-24T17:45:43+00:00",
"VersionId": "v2"
},
"VMImportExportRoleForAWSConnector": {
"Arn": "arn:aws:iam::aws:policy/service-role/VMImportExportRoleForAWSConnector",
"AttachmentCount": 0,
"CreateDate": "2015-09-03T20:48:59+00:00",
"DefaultVersionId": "v1",
"Document": {
"Statement": [
{
"Action": [
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::import-to-ec2-*"
]
},
{
"Action": [
"ec2:ModifySnapshotAttribute",
"ec2:CopySnapshot",
"ec2:RegisterImage",
"ec2:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/service-role/",
"PolicyId": "ANPAJFLQOOJ6F5XNX4LAW",
"PolicyName": "VMImportExportRoleForAWSConnector",
"UpdateDate": "2015-09-03T20:48:59+00:00",
"VersionId": "v1"
},
"ViewOnlyAccess": {
"Arn": "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess",
"AttachmentCount": 0,
"CreateDate": "2017-06-26T22:35:31+00:00",
"DefaultVersionId": "v3",
"Document": {
"Statement": [
{
"Action": [
"acm:ListCertificates",
"athena:List*",
"aws-marketplace:ViewSubscriptions",
"autoscaling:Describe*",
"batch:ListJobs",
"clouddirectory:ListAppliedSchemaArns",
"clouddirectory:ListDevelopmentSchemaArns",
"clouddirectory:ListDirectories",
"clouddirectory:ListPublishedSchemaArns",
"cloudformation:List*",
"cloudformation:DescribeStacks",
"cloudfront:List*",
"cloudhsm:ListAvailableZones",
"cloudhsm:ListLunaClients",
"cloudhsm:ListHapgs",
"cloudhsm:ListHsms",
"cloudsearch:List*",
"cloudsearch:DescribeDomains",
"cloudtrail:DescribeTrails",
"cloudtrail:LookupEvents",
"cloudwatch:List*",
"cloudwatch:GetMetricData",
"codebuild:ListBuilds*",
"codebuild:ListProjects",
"codecommit:List*",
"codedeploy:List*",
"codedeploy:Get*",
"codepipeline:ListPipelines",
"codestar:List*",
"codestar:Verify*",
"cognito-idp:List*",
"cognito-identity:ListIdentities",
"cognito-identity:ListIdentityPools",
"cognito-sync:ListDatasets",
"connect:List*",
"config:List*",
"config:Describe*",
"datapipeline:ListPipelines",
"datapipeline:DescribePipelines",
"datapipeline:GetAccountLimits",
"devicefarm:List*",
"directconnect:Describe*",
"discovery:List*",
"dms:List*",
"ds:DescribeDirectories",
"dynamodb:ListTables",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAddresses",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeBundleTasks",
"ec2:DescribeClassicLinkInstances",
"ec2:DescribeConversionTasks",
"ec2:DescribeCustomerGateways",
"ec2:DescribeDhcpOptions",
"ec2:DescribeExportTasks",
"ec2:DescribeFlowLogs",
"ec2:DescribeHost*",
"ec2:DescribeIdentityIdFormat",
"ec2:DescribeIdFormat",
"ec2:DescribeImage*",
"ec2:DescribeImport*",
"ec2:DescribeInstance*",
"ec2:DescribeInternetGateways",
"ec2:DescribeKeyPairs",
"ec2:DescribeMovingAddresses",
"ec2:DescribeNatGateways",
"ec2:DescribeNetwork*",
"ec2:DescribePlacementGroups",
"ec2:DescribePrefixLists",
"ec2:DescribeRegions",
"ec2:DescribeReserved*",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSnapshot*",
"ec2:DescribeSpot*",
"ec2:DescribeSubnets",
"ec2:DescribeVolume*",
"ec2:DescribeVpc*",
"ec2:DescribeVpnGateways",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecs:List*",
"elasticache:Describe*",
"elasticbeanstalk:DescribeApplicationVersions",
"elasticbeanstalk:DescribeApplications",
"elasticbeanstalk:DescribeEnvironments",
"elasticbeanstalk:ListAvailableSolutionStacks",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"elasticfilesystem:DescribeFileSystems",
"elasticmapreduce:List*",
"elastictranscoder:List*",
"es:DescribeElasticsearchDomain",
"es:DescribeElasticsearchDomains",
"es:ListDomainNames",
"events:ListRuleNamesByTarget",
"events:ListRules",
"events:ListTargetsByRule",
"firehose:List*",
"firehose:DescribeDeliveryStream",
"gamelift:List*",
"glacier:List*",
"iam:List*",
"iam:GetAccountSummary",
"iam:GetLoginProfile",
"importexport:ListJobs",
"inspector:List*",
"iot:List*",
"kinesis:ListStreams",
"kinesisanalytics:ListApplications",
"kms:ListKeys",
"lambda:List*",
"lex:GetBotAliases",
"lex:GetBotChannelAssociations",
"lex:GetBots",
"lex:GetBotVersions",
"lex:GetIntents",
"lex:GetIntentVersions",
"lex:GetSlotTypes",
"lex:GetSlotTypeVersions",
"lex:GetUtterancesView",
"lightsail:GetBlueprints",
"lightsail:GetBundles",
"lightsail:GetInstances",
"lightsail:GetInstanceSnapshots",
"lightsail:GetKeyPair",
"lightsail:GetRegions",
"lightsail:GetStaticIps",
"lightsail:IsVpcPeered",
"logs:Describe*",
"machinelearning:Describe*",
"mobilehub:ListAvailableFeatures",
"mobilehub:ListAvailableRegions",
"mobilehub:ListProjects",
"opsworks:Describe*",
"opsworks-cm:Describe*",
"organizations:List*",
"mobiletargeting:GetApplicationSettings",
"mobiletargeting:GetCampaigns",
"mobiletargeting:GetImportJobs",
"mobiletargeting:GetSegments",
"polly:Describe*",
"polly:List*",
"rds:Describe*",
"redshift:DescribeClusters",
"redshift:DescribeEvents",
"redshift:ViewQueriesInConsole",
"route53:List*",
"route53:Get*",
"route53domains:List*",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"sdb:List*",
"servicecatalog:List*",
"ses:List*",
"shield:List*",
"states:ListActivities",
"states:ListStateMachines",
"sns:List*",
"sqs:ListQueues",
"ssm:ListAssociations",
"ssm:ListDocuments",
"storagegateway:ListGateways",
"storagegateway:ListLocalDisks",
"storagegateway:ListVolumeRecoveryPoints",
"storagegateway:ListVolumes",
"swf:List*",
"trustedadvisor:Describe*",
"waf:List*",
"waf-regional:List*",
"workdocs:DescribeAvailableDirectories",
"workdocs:DescribeInstances",
"workmail:Describe*",
"workspaces:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
},
"IsAttachable": true,
"IsDefaultVersion": true,
"Path": "/job-function/",
"PolicyId": "ANPAID22R6XPJATWOFDK6",
"PolicyName": "ViewOnlyAccess",
"UpdateDate": "2017-06-26T22:35:31+00:00",
"VersionId": "v3"
}
}"""
|
botify-labs/moto
|
moto/iam/aws_managed_policies.py
|
Python
|
apache-2.0
| 495,649 | 0.000176 |
# (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import codecs
from ansible.errors import AnsibleParserError
from ansible.parsing.quoting import unquote
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
'''
### FIXME: args should already be a unicode string
from ansible.utils.unicode import to_unicode
args = to_unicode(args, nonstring='passthru')
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError as ve:
if 'no closing quotation' in str(ve).lower():
raise AnsibleParsingError("error parsing argument string, try quoting the entire line.")
else:
raise
raw_params = []
for orig_x in vargs:
x = _decode_escapes(orig_x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(orig_x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = ' '.join(raw_params)
return options
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx-1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on white space
args = args.strip()
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for itemidx,item in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for idx,token in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes:
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
params[-1] = "%s\n%s" % (params[-1], token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise AnsibleParserError("failed at splitting arguments, either an unbalanced jinja2 block or quotes: {}".format(args))
return params
|
dr0pz0ne/sibble
|
lib/ansible/parsing/splitter.py
|
Python
|
gpl-3.0
| 10,657 | 0.002721 |
#!/usr/bin/python
import sys, copy, tarfile
""" - Splits Percolator output into decoy and target files.
- Extracts unique PSM/peptides/proteins out of a Percolator output file.
- Merges Percolator output files
Usage: python percolator_output_modifier.py command psm/peptides/proteins [score] infile outfile [outfile2]
"""
try:
from lxml import etree
except Exception:
sys.stderr.write('Failed to import lxml module.')
def readPercout(fname):
doc = None
try:
doc = etree.parse(fname)
except Exception:
sys.stderr.write('Could not parse XML provided in %s or error reading file. \n' % (fname))
return doc
def splitTargetDecoy(doc, args, ns):
""" Splits XML into target/decoy/notspecified elements.
then calls refill function to create a new XML ElementTree with results.
Usage: splitTargetDecoy('test.xml', ['psms', 'peptides', 'proteins'])
"""
for arg in args:
assert arg in ['psms', 'peptides', 'proteins'], Exception('Filtering argument must be one or more of: psms, peptides, proteins.')
output_elts = {
'psms' : doc.xpath('//xmlns:psms', namespaces=ns),
'peptides' : doc.xpath('//xmlns:peptides', namespaces=ns),
'proteins' : doc.xpath('//xmlns:proteins', namespaces=ns)
}
def do_split(name, tree, ns):
""" Does the actual target/decoy splitting. Returns lists of classified elements."""
# check if not passed an empty tree
if len(tree) == 0:
sys.stdout.write('Cannot output %s since it is not in the provided input XML. Continuing.\n' % name)
return [etree.Element('none')]*3
# check if decoy attribute is in tree at all
if tree[0].xpath('//@xmlns:decoy', namespaces=ns) == []:
sys.stderr.write('Percolator output has no specified target/decoy spectra, unable to split.\n')
return [etree.Element('none')]*3
# filter
ttree = []
dtree = []
discarded = []
# ttree = etree.Element(name, ns)
# dtree = etree.Element(name, ns)
# discarded = etree.Element(name, ns)
searchElt = name[:-1]
for element in tree[0].xpath('xmlns:%s' % searchElt, namespaces=ns):
if element.xpath('@xmlns:decoy', namespaces=ns) == ['true']:
dtree.append(element)
elif element.xpath('@xmlns:decoy', namespaces=ns) == ['false']:
ttree.append(element)
else:
discarded.append(element)
return ttree, dtree, discarded
# call splitfunction, then check output
target,decoy,discarded = {},{},{}
for arg in args:
target[arg], decoy[arg], discarded[arg] = do_split(arg, output_elts[arg], ns)
for arg in args:
if [target[arg], decoy[arg], discarded[arg]] != 3*['none']:
break
sys.stderr.write('No data matching %s has been found in Percolator output \
file.\n' % [x for x in args])
# clean parsed elementtree and make new ones
target = refillTree(doc, output_elts.keys(), target, ns)
decoy = refillTree(doc, output_elts.keys(), decoy, ns)
discarded = refillTree(doc, output_elts.keys(), discarded, ns)
# sys.stdout.write('%s\n' % etree.tostring(target))
return target, decoy #, discarded?
def refillTree(doc, oldelements, newelements, ns):
"""Takes an ElementTree, takes out specified oldelements (by tag). Replaces
with list of newelements. Returns a new ElementTree.
"""
# remove specified old elements
newdoc = copy.deepcopy(doc)
root = newdoc.getroot()
elements_toremove = []
for el in oldelements:
removes = root.xpath('xmlns:%s' % el, namespaces=ns) # element 'psms' or 'peptides'
for remove in removes:
children = remove.getchildren()
for el in children:
remove.remove(el)
# put in new ones
for node in root.getchildren():
try:
for child in newelements[node.tag[node.tag.index('}')+1:]]:
node.append(child)
except: # PSMs, peptides should be in this order and not in newelements-dict's arbitrary order.
pass
return etree.ElementTree(root)
def filterUniques(tar, to_filter, score, ns):
""" Filters unique psms/peptides/proteins from (multiple) Percolator output XML files.
Takes a tarred set of XML files, a filtering query (e.g. psms), a score to
filter on and a namespace.
Outputs an ElementTree.
"""
for tf in to_filter:
assert tf in ['psms', 'peptides', 'proteins'], Exception('filterUnique function needs a specified to_filter list of psms, peptides, proteins.')
assert score in ['q','pep','p'], Exception('filterUnique function needs a specified score to filter on of q, pep or p.')
try:
with tarfile.open(tar, 'r') as f:
members = f.getmembers()
f.extractall()
except:
sys.stderr.write('Could not extract Percolator files from dataset: %s \n' % tar)
return 1
docs = []
for fn in members:
docs.append(etree.parse(fn.name))
# lookup dicts
scores = {'q':'q_value', 'pep':'pep', 'p':'p_value'}
filt_el_dict = {'psms':'xmlns:peptide_seq', 'peptides':'@xmlns:peptide_id' }
# result dict
filtered = {'psms':{}, 'peptides':{}, 'proteins':{} }
for doc in docs:
for filt_el in to_filter:
feattree = doc.xpath('//xmlns:%s' % filt_el, namespaces=ns)
if feattree == []:
sys.stdout.write('%s not found in (one of the) Percolator output documents. Continuing...\n' % filt_el)
continue
for feat in feattree[0]:
# It's actually faster to loop through the feat's children,
# but this is 2-line code and still readable.
featscore = float(feat.xpath('xmlns:%s' % scores[score], namespaces=ns)[0].text)
seq = feat.xpath('%s' % filt_el_dict[filt_el], namespaces=ns)
try: # psm seqs are parsed here
seq = seq[0].attrib['seq']
except Exception: ## caught when parsing peptide seqs (different format)
seq = str(seq[0])
if seq not in filtered[filt_el]:
filtered[filt_el][seq] = feat
elif featscore < filtered[filt_el][seq]:
#FIXME now it only works for LOWER than scores (eg q-vals, pep, but not for scores that are better when higher)
filtered[filt_el][seq] = feat
# make trees from filtered dicts
for filt_el in filtered:
outlist = []
for feat in filtered[filt_el].values():
outlist.append(feat)
filtered[filt_el] = outlist
# node = etree.Element(filt_el)
# node.extend(filtered[filt_el].values())
# filtered[filt_el] = node
outdoc = refillTree(docs[0], ['psms', 'peptides', 'proteins'], filtered, ns)
return outdoc
def mergeXML(datasets, ns):
tomerge = ['psms', 'peptides', 'proteins']
tomerge_el = ['psms', 'peptides', 'proteins']
outdoc = readPercout(datasets[0])
root = outdoc.getroot()
for el in tomerge:
tomerge_el.extend(root.xpath('//xmlns:%s' % el, namespaces=ns))
for fn in datasets[1:]:
doc = readPercout(fn)
for tm, tme in zip(tomerge, tomerge_el):
print tm,tme,"both"
for el in doc.xpath('//xmlns:%s' % tm, namespaces=ns):
print el,"el"
tme.append(el)
print tme,"tme"
print tomerge_el,"to merge el"
return outdoc
def untar(tar):
try:
with tarfile.open(tar, 'r') as f:
members = f.getmembers()
f.extractall()
return [x.name for x in members]
except:
sys.stderr.write('Could not extract Percolator files from dataset: %s \n' % tar)
return 1
def writeXML(*args):
"""Takes a filename and _ElementTree arguments in tuples ((fn, tree), (fn2,tree2),...)
and writes an XML file.
"""
try:
# hello! shouldn't these be exceptions instead?
assert False not in [isinstance(x, tuple) for x in args] # arguments are tuples
assert False not in [len(x)>2 for x in args] # tuples are 2-length
assert False not in [isinstance(x[0], str) for x in args] # first arg from tuple is string for fn.
assert False not in [isinstance(x[1], etree._ElementTree) for x in args] # second arg from tuple is ElementTree.
except:
Exception('function writeXML takes arguments in form of tuples (filename, XMLtree)')
for fn, doc in args:
try:
outfile = open(fn, 'w')
except Exception:
sys.stderr('Unable to write XML output to file %s \n' % fn)
doc.write(outfile)
def parseOptions(args):
ns = {'xmlns':'http://per-colator.com/percolator_out/13', 'xmlns:p' : 'http://per-colator.com/percolator_out/13', \
'xmlns:xsi':'http://www.w3.org/2001/XMLSchema-instance', \
'xsi:schemaLocation':'http://per-colator.com/percolator_out/13,https://github.com/percolator/percolator/raw/pout-1-3/src/xml/percolator_out.xsd', \
'p:majorVersion':'2', 'p:minorVersion':'04', 'p:percolator_version':'Percolator version 2.04'}
if args[0] == 'splittd':
fname = args[2]
doc = readPercout(fname)
target, decoy = splitTargetDecoy(doc, args[1].split(','), ns)
writeXML((args[3], target), (args[4], decoy))
elif args[0] == 'filter_uni':
tosplit = args[2].split(',')
uniques = filterUniques(args[1], tosplit, args[3], ns)
writeXML((args[4], uniques))
elif args[0] == 'merge': # is there any use for this?
with open(args[1]) as fp:
datasets = fp.read()
print datasets,"these are the datasets"
datasets = [opt.strip() for opt in datasets.strip().split('\n')]
sys.stdout.write('\ndatasets: %s' % datasets)
merged = mergeXML(datasets, ns)
writeXML((args[2], merged))
elif args[0] == 'untar_merge':
files = untar(args[1])
sys.stdout.write('%s'%files)
merged = mergeXML(files, ns)
writeXML((args[2], merged))
else:
sys.stderr.write('Argument %s not recognized.\n' % args[0])
return 1
def main():
parseOptions(sys.argv[1:])
if __name__ == '__main__':
main()
|
wohllab/milkyway_proteomics
|
galaxy_milkyway_files/tools/wohl-proteomics/MSGFcrux/percolator_output_modifier_fractionated.py
|
Python
|
mit
| 10,590 | 0.011143 |
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# Attachment attributes
INSTANCE_ID = 'instance_id'
TENANT_ID = 'tenant_id'
TENANT_NAME = 'tenant_name'
HOST_NAME = 'host_name'
# Network attributes
NET_ID = 'id'
NET_NAME = 'name'
NET_VLAN_ID = 'vlan_id'
NET_VLAN_NAME = 'vlan_name'
NET_PORTS = 'ports'
CREDENTIAL_ID = 'credential_id'
CREDENTIAL_NAME = 'credential_name'
CREDENTIAL_USERNAME = 'user_name'
CREDENTIAL_PASSWORD = 'password'
CREDENTIAL_TYPE = 'type'
MASKED_PASSWORD = '********'
USERNAME = 'username'
PASSWORD = 'password'
LOGGER_COMPONENT_NAME = "cisco_plugin"
NEXUS_PLUGIN = 'nexus_plugin'
VSWITCH_PLUGIN = 'vswitch_plugin'
DEVICE_IP = 'device_ip'
NETWORK_ADMIN = 'network_admin'
NETWORK = 'network'
PORT = 'port'
BASE_PLUGIN_REF = 'base_plugin_ref'
CONTEXT = 'context'
SUBNET = 'subnet'
#### N1Kv CONSTANTS
# Special vlan_id value in n1kv_vlan_allocations table indicating flat network
FLAT_VLAN_ID = -1
# Topic for tunnel notifications between the plugin and agent
TUNNEL = 'tunnel'
# Maximum VXLAN range configurable for one network profile.
MAX_VXLAN_RANGE = 1000000
# Values for network_type
NETWORK_TYPE_FLAT = 'flat'
NETWORK_TYPE_VLAN = 'vlan'
NETWORK_TYPE_VXLAN = 'vxlan'
NETWORK_TYPE_LOCAL = 'local'
NETWORK_TYPE_NONE = 'none'
NETWORK_TYPE_TRUNK = 'trunk'
NETWORK_TYPE_MULTI_SEGMENT = 'multi-segment'
# Values for network sub_type
NETWORK_TYPE_OVERLAY = 'overlay'
NETWORK_SUBTYPE_NATIVE_VXLAN = 'native_vxlan'
NETWORK_SUBTYPE_TRUNK_VLAN = NETWORK_TYPE_VLAN
NETWORK_SUBTYPE_TRUNK_VXLAN = NETWORK_TYPE_OVERLAY
# Prefix for VM Network name
VM_NETWORK_NAME_PREFIX = 'vmn_'
DEFAULT_HTTP_TIMEOUT = 15
SET = 'set'
INSTANCE = 'instance'
PROPERTIES = 'properties'
NAME = 'name'
ID = 'id'
POLICY = 'policy'
TENANT_ID_NOT_SET = 'TENANT_ID_NOT_SET'
ENCAPSULATIONS = 'encapsulations'
STATE = 'state'
ONLINE = 'online'
MAPPINGS = 'mappings'
MAPPING = 'mapping'
SEGMENTS = 'segments'
SEGMENT = 'segment'
BRIDGE_DOMAIN_SUFFIX = '_bd'
LOGICAL_NETWORK_SUFFIX = '_log_net'
ENCAPSULATION_PROFILE_SUFFIX = '_profile'
UUID_LENGTH = 36
# Nexus vlan and vxlan segment range
NEXUS_VLAN_RESERVED_MIN = 3968
NEXUS_VLAN_RESERVED_MAX = 4047
NEXUS_VXLAN_MIN = 4096
NEXUS_VXLAN_MAX = 16000000
|
onecloud/neutron
|
neutron/plugins/cisco/common/cisco_constants.py
|
Python
|
apache-2.0
| 2,838 | 0.000352 |
"""Main label index.
Revision ID: e679554261b2
Revises: e2be4ab896d3
Create Date: 2019-05-09 18:55:24.472216
"""
# revision identifiers, used by Alembic.
revision = 'e679554261b2'
down_revision = 'e2be4ab896d3'
from alembic import op
def upgrade():
op.create_index(
op.f('ix_label_main_label_id'), 'label', ['main_label_id'], unique=False
)
def downgrade():
op.drop_index(op.f('ix_label_main_label_id'), table_name='label')
|
hasgeek/funnel
|
migrations/versions/e679554261b2_main_label_index.py
|
Python
|
agpl-3.0
| 452 | 0.004425 |
from flask.ext.assets import Bundle
from . import wa
js_libs = Bundle('js/libs/jquery.min.js',
'js/libs/bootstrap.min.js',
'js/libs/lodash.min.js',
#filters='jsmin',
output='js/libs.js')
js_board = Bundle('js/libs/drawingboard.min.js',
#filters='jsmin',
output='js/board.js')
js_main = Bundle('js/main.js',
#filters='jsmin',
output='js/snh.js')
css_main = Bundle('css/bootstrap.min.css',
'css/font-awesome.min.css',
'css/main.css',
filters='cssmin',
output='css/snh.css')
css_board = Bundle('css/drawingboard.min.css',
filters='cssmin',
output='css/board.css')
wa.register('js_libs', js_libs)
wa.register('js_board', js_board)
wa.register('js_main', js_main)
wa.register('css_main', css_main)
wa.register('css_board', css_board)
|
luizdepra/sketch_n_hit
|
app/assets.py
|
Python
|
mit
| 984 | 0.003049 |
#!/usr/bin/env python3
import csv
import sqlite3
# requires python3
# requires sqlite3
#
sqldb = sqlite3.connect(':memory:')
def main:
while True:
input_location = input("Please provide the pathname of the file you wish to extract data from. Enter a blank line when you are done.")
if input_location = False:
break
else:
mi = inputfile(input_locaton)
allinputs.append(mi)
mi = False
for ifile in allinputs:
class inputfile:
def __init__(self, location):
self.location = location
if not os.path.isfile(self.location):
print "file not found"
open( self.location, newline='' ) as csvfile
csvread = csv.rader( csvfile, delimiter=',' quotechar='' )
class perfitem_group(inputfile):
def __init__(self)
def
class perfitem(perfitem_group):
def __init__(self)
def mkdict
class row(inputfile):
def init(self):
self =
with open( inputfile.self.location, newline='' ) as csvfile:
csvread = csv.reader( csvfile, delimiter=',', quotechar='' )
def get:
return row
allfiles = False
while True:
# while allfiles <> "":
|
fedallah/dsperf
|
pytest_2.py
|
Python
|
mit
| 1,084 | 0.057196 |
__problem_title__ = "Eleven-free integers"
__problem_url___ = "https://projecteuler.net/problem=442"
__problem_description__ = "An integer is called if its decimal expansion does not contain any " \
"substring representing a power of 11 except 1. For example, 2404 and " \
"13431 are eleven-free, while 911 and 4121331 are not. Let E( ) be the " \
"th positive eleven-free integer. For example, E(3) = 3, E(200) = 213 " \
"and E(500 000) = 531563. Find E(10 )."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
jrichte43/ProjectEuler
|
Problem-0442/solutions.py
|
Python
|
gpl-3.0
| 936 | 0.00641 |
import sublime
import sublime_plugin
OPTIONS_LAST_REGEX = "jump_caret_last_regex"
class CaretJumpCommand(sublime_plugin.TextCommand):
def run(self, edit, jump=True, jump_to=None, repeat_previous_jump=False):
view = self.view
def get_next_sels(user_input):
new_sels = []
for sel in view.sel():
next_sel = view.find(user_input, sel.end(), sublime.IGNORECASE)
if next_sel.begin() != -1:
new_sels.append(next_sel)
return new_sels
def jump_last_regex():
last_reg = self.view.settings().get(OPTIONS_LAST_REGEX)
if last_reg:
select_next_regex(last_reg)
def select_next_regex(user_input):
view.erase_regions("caret_jump_preview")
if not user_input:
# jump_last_regex()
return
self.view.settings().set(OPTIONS_LAST_REGEX, user_input)
new_sels = get_next_sels(user_input)
if jump and new_sels:
view.sel().clear()
view.sel().add_all(new_sels)
def input_changed(user_input):
new_sels = get_next_sels(user_input)
view.add_regions("caret_jump_preview",
new_sels,
"source, text",
"dot",
sublime.DRAW_OUTLINED)
def input_canceled():
view.erase_regions("caret_jump_preview")
selection = view.substr(view.sel()[0]) if view.sel() else ""
if jump_to:
select_next_regex(jump_to)
elif repeat_previous_jump:
jump_last_regex()
else:
default = selection if selection \
else self.view.settings().get(OPTIONS_LAST_REGEX, "")
view.window().show_input_panel("Seach for",
default,
select_next_regex,
input_changed,
input_canceled)
|
r-stein/sublime-text-caret-jump
|
caret_jump.py
|
Python
|
mit
| 2,134 | 0 |
import wx
import svd
import my
class View(wx.Panel):
def __init__(self, parent, data=None):
wx.Panel.__init__(self, parent)
self.data = {}
self.tree = wx.TreeCtrl(self)
self.tree.AddRoot('FROM_RUSSIA_WITH_LOVE')
self.Bind(wx.EVT_SIZE, self.onResize)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
self.Bind(wx.EVT_TREE_BEGIN_DRAG, self.onDrag)
self.Bind(wx.EVT_TREE_END_DRAG, self.onDrop)
msizer = wx.BoxSizer(wx.VERTICAL)
msizer.Add(self.tree, 1, wx.EXPAND | wx.ALL, 3)
self.SetSizerAndFit(msizer)
def OnSelChanged(self, event):
item = self.tree.GetFocusedItem()
obj = self.tree.GetPyData(item)
my.post_event(self.GetGrandParent(), my.EVT_SELECTED, obj)
def Reload(self, obj):
item = self.data.get(obj, 0)
if item:
self.tree.SetItemText(item, obj.name)
def Remove(self, obj):
item = self.data.pop(obj, 0)
if item:
if self.tree.IsSelected(item):
self.tree.SelectItem(self.tree.GetPrevVisible(item))
self.tree.Delete(item)
def Append(self, obj):
pi = self.data.get(obj.parent, 0)
ni = self.tree.AppendItem(pi, obj.name)
self.tree.SetPyData(ni, obj)
self.data[obj] = ni
if isinstance(obj, svd.peripheral):
for x in obj.registers:
self.Append(x)
def LoadDevice(self, device):
tree = self.tree
tree.Freeze()
tree.DeleteAllItems()
self.data.clear()
root = tree.AddRoot(device.name)
tree.SetPyData(root, device)
self.data[device] = root
for p in device.peripherals:
pi = tree.AppendItem(root, p.name)
tree.SetPyData(pi, p)
self.data[p] = pi
for r in p.registers:
ri = tree.AppendItem(pi, r.name)
tree.SetPyData(ri, r)
self.data[r] = ri
tree.UnselectAll()
tree.Expand(root)
tree.SelectItem(root)
tree.Thaw()
def AddItem(self, obj):
pass
def DelItem(self, obj):
if obj == self.tree:
item = self.tree.GetSelection()
if item.IsOk():
data = self.tree.GetPyData(item)
if isinstance(data, svd.device):
return
if wx.OK != wx.MessageBox('%s will be deleted' % (data.name),
'Confirm item deletion',
wx.OK | wx.CANCEL | wx.ICON_QUESTION):
return
if isinstance(data, svd.register):
data.parent.delRegister(data)
if isinstance(data, svd.peripheral):
# checking if item have a references
refs = [x for x in data.parent.peripherals if x.ref == data]
if refs:
if wx.OK != wx.MessageBox('%s has a references. Delete all ?' % (data.name),
'Confirm item deletion',
wx.OK | wx.CANCEL | wx.ICON_QUESTION):
return
for x in refs:
data.parent.delPeripheral(x)
self.Remove(x)
data.parent.delPeripheral(data)
self.Remove(data)
def CloneItem(self, obj):
if obj == self.tree:
item = self.tree.GetSelection()
if item.IsOk():
data = self.tree.GetPyData(item)
if isinstance(data, svd.device):
return
if wx.OK != wx.MessageBox('%s will be cloned' % (data.name),
'Confirm item clone',
wx.OK | wx.CANCEL | wx.ICON_QUESTION):
return
if isinstance(data, svd.peripheral):
xml = data.toXML()
p = data.parent
new = svd.peripheral(p, xml)
new.name = '%s_CLONE' % (data.name)
p.addPeripheral(new)
elif isinstance(data, svd.register):
xml = data.toXML()
p = data.parent
new = svd.register(p, xml)
new.name = '%s_CLONE' % (data.name)
p.addRegister(new)
self.Append(new)
def SelectItem(self, obj):
item = self.data.get(obj, None)
if item and item.IsOk():
self.tree.SelectItem(item, True)
def onResize(self, event):
self.Layout()
def onDrag(self, event):
item = event.GetItem()
data = self.tree.GetPyData(item)
if isinstance(data, svd.peripheral) or isinstance(data, svd.register):
self.dragitem = item
event.Allow()
else:
self.dragitem = None
event.Veto()
def onDrop(self, event):
dropitem = event.GetItem()
dropdata = self.tree.GetPyData(dropitem)
dragitem = self.dragitem
dragdata = self.tree.GetPyData(dragitem)
self.dragitem = None
if isinstance(dragdata, svd.peripheral) and isinstance(dropdata, svd.peripheral):
# move peripheral under peripheral
if dragdata == dropdata:
return # trivial. nothing to do
parent = dragdata.parent
if dragdata.ref and parent.peripherals.index(dropdata) < parent.peripherals.index(dragdata.ref):
return # can't put reference before original
for x in parent.peripherals:
if x.ref == dragdata and parent.peripherals.index(dropdata) >= parent.peripherals.index(x):
return # can't put original after reference
item = self.tree.InsertItem(self.data[parent], dropitem, dragdata.name)
parent.movePeripheral(dropdata, dragdata)
self.tree.SetPyData(item, dragdata)
self.data[dragdata] = item
for x in dragdata.registers:
nr = self.tree.AppendItem(item, x._name)
self.tree.SetPyData(nr, x)
self.data[x] = nr
self.tree.Delete(dragitem)
elif isinstance(dragdata, svd.register) and isinstance(dropdata, svd.peripheral):
# move register to other peripheral
item = None
for x in reversed(dropdata.registers):
if x._offset == dragdata._offset:
# deftination offset is busy
return
if x._offset < dragdata._offset:
item = self.tree.InsertItem(dropitem, self.data[x], dragdata.name)
break
if item is None:
item = self.tree.PrependItem(dropitem, dragdata.name)
parent = dragdata.parent
parent.delRegister(dragdata)
dropdata.addRegister(dragdata)
self.tree.SetPyData(item, dragdata)
self.data[dragdata] = item
self.tree.Delete(dragitem)
|
dmitrystu/svd_editor
|
modules/tview.py
|
Python
|
apache-2.0
| 7,250 | 0.001517 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_vlan
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages VLAN resources and attributes.
description:
- Manages VLAN configurations on NX-OS switches.
author: Jason Edelman (@jedelman8)
options:
vlan_id:
description:
- Single VLAN ID.
required: false
default: null
vlan_range:
description:
- Range of VLANs such as 2-10 or 2,5,10-15, etc.
required: false
default: null
name:
description:
- Name of VLAN.
required: false
default: null
vlan_state:
description:
- Manage the vlan operational state of the VLAN
(equivalent to state {active | suspend} command.
required: false
default: active
choices: ['active','suspend']
admin_state:
description:
- Manage the VLAN administrative state of the VLAN equivalent
to shut/no shut in VLAN config mode.
required: false
default: up
choices: ['up','down']
mapped_vni:
description:
- The Virtual Network Identifier (VNI) ID that is mapped to the
VLAN. Valid values are integer and keyword 'default'.
required: false
default: null
version_added: "2.2"
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure a range of VLANs are not present on the switch
nxos_vlan:
vlan_range: "2-10,20,50,55-60,100-150"
host: 68.170.147.165
username: cisco
password: cisco
state: absent
transport: nxapi
- name: Ensure VLAN 50 exists with the name WEB and is in the shutdown state
nxos_vlan:
vlan_id: 50
host: 68.170.147.165
admin_state: down
name: WEB
transport: nxapi
username: cisco
password: cisco
- name: Ensure VLAN is NOT on the device
nxos_vlan:
vlan_id: 50
host: 68.170.147.165
state: absent
transport: nxapi
username: cisco
password: cisco
'''
RETURN = '''
proposed_vlans_list:
description: list of VLANs being proposed
returned: when debug enabled
type: list
sample: ["100"]
existing_vlans_list:
description: list of existing VLANs on the switch prior to making changes
returned: when debug enabled
type: list
sample: ["1", "2", "3", "4", "5", "20"]
end_state_vlans_list:
description: list of VLANs after the module is executed
returned: when debug enabled
type: list
sample: ["1", "2", "3", "4", "5", "20", "100"]
proposed:
description: k/v pairs of parameters passed into module (does not include
vlan_id or vlan_range)
returned: when debug enabled
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan",
"vlan_state": "suspend", "mapped_vni": "5000"}
existing:
description: k/v pairs of existing vlan or null when using vlan_range
returned: when debug enabled
type: dict
sample: {"admin_state": "down", "name": "app_vlan",
"vlan_id": "20", "vlan_state": "suspend", "mapped_vni": ""}
end_state:
description: k/v pairs of the VLAN after executing module or null
when using vlan_range
returned: when debug enabled
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan", "vlan_id": "20",
"vlan_state": "suspend", "mapped_vni": "5000"}
updates:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
commands:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.nxos import run_commands, load_config, get_config
from ansible.module_utils.basic import AnsibleModule
def vlan_range_to_list(vlans):
result = []
if vlans:
for part in vlans.split(','):
if part == 'none':
break
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return numerical_sort(result)
return result
def numerical_sort(string_int_list):
"""Sort list of strings (VLAN IDs) that are digits in numerical order.
"""
as_int_list = []
as_str_list = []
for vlan in string_int_list:
as_int_list.append(int(vlan))
as_int_list.sort()
for vlan in as_int_list:
as_str_list.append(str(vlan))
return as_str_list
def build_commands(vlans, state):
commands = []
for vlan in vlans:
if state == 'present':
command = 'vlan {0}'.format(vlan)
commands.append(command)
elif state == 'absent':
command = 'no vlan {0}'.format(vlan)
commands.append(command)
return commands
def get_vlan_config_commands(vlan, vid):
"""Build command list required for VLAN configuration
"""
reverse_value_map = {
"admin_state": {
"down": "shutdown",
"up": "no shutdown"
}
}
if vlan.get('admin_state'):
# apply value map when making change to the admin state
# note: would need to be a loop or more in depth check if
# value map has more than 1 key
vlan = apply_value_map(reverse_value_map, vlan)
VLAN_ARGS = {
'name': 'name {0}',
'vlan_state': 'state {0}',
'admin_state': '{0}',
'mode': 'mode {0}',
'mapped_vni': 'vn-segment {0}'
}
commands = []
for param, value in vlan.items():
if param == 'mapped_vni' and value == 'default':
command = 'no vn-segment'
else:
command = VLAN_ARGS.get(param).format(vlan.get(param))
if command:
commands.append(command)
commands.insert(0, 'vlan ' + vid)
commands.append('exit')
return commands
def get_list_of_vlans(module):
body = run_commands(module, ['show vlan | json'])
vlan_list = []
vlan_table = body[0].get('TABLE_vlanbrief')['ROW_vlanbrief']
if isinstance(vlan_table, list):
for vlan in vlan_table:
vlan_list.append(str(vlan['vlanshowbr-vlanid-utf']))
else:
vlan_list.append('1')
return vlan_list
def get_vni(vlanid, module):
flags = str('all | section vlan.{0}'.format(vlanid)).split(' ')
body = get_config(module, flags=flags)
#command = 'show run all | section vlan.{0}'.format(vlanid)
#body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
value = ''
if body:
REGEX = re.compile(r'(?:vn-segment\s)(?P<value>.*)$', re.M)
if 'vn-segment' in body:
value = REGEX.search(body).group('value')
return value
def get_vlan(vlanid, module):
"""Get instance of VLAN as a dictionary
"""
command = 'show vlan id %s | json' % vlanid
body = run_commands(module, [command])
#command = 'show vlan id ' + vlanid
#body = execute_show_command(command, module)
try:
vlan_table = body[0]['TABLE_vlanbriefid']['ROW_vlanbriefid']
except (TypeError, IndexError):
return {}
key_map = {
"vlanshowbr-vlanid-utf": "vlan_id",
"vlanshowbr-vlanname": "name",
"vlanshowbr-vlanstate": "vlan_state",
"vlanshowbr-shutstate": "admin_state"
}
vlan = apply_key_map(key_map, vlan_table)
value_map = {
"admin_state": {
"shutdown": "down",
"noshutdown": "up"
}
}
vlan = apply_value_map(value_map, vlan)
vlan['mapped_vni'] = get_vni(vlanid, module)
return vlan
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = str(value)
return new_dict
def apply_value_map(value_map, resource):
for key, value in value_map.items():
resource[key] = value[resource.get(key)]
return resource
def main():
argument_spec = dict(
vlan_id=dict(required=False, type='str'),
vlan_range=dict(required=False),
name=dict(required=False),
vlan_state=dict(choices=['active', 'suspend'], required=False),
mapped_vni=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
admin_state=dict(choices=['up', 'down'], required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['vlan_range', 'name'],
['vlan_id', 'vlan_range']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
warnings = list()
check_args(module, warnings)
vlan_range = module.params['vlan_range']
vlan_id = module.params['vlan_id']
name = module.params['name']
vlan_state = module.params['vlan_state']
admin_state = module.params['admin_state']
mapped_vni = module.params['mapped_vni']
state = module.params['state']
changed = False
if vlan_id:
if not vlan_id.isdigit():
module.fail_json(msg='vlan_id must be a valid VLAN ID')
args = dict(name=name, vlan_state=vlan_state,
admin_state=admin_state, mapped_vni=mapped_vni)
proposed = dict((k, v) for k, v in args.items() if v is not None)
proposed_vlans_list = numerical_sort(vlan_range_to_list(
vlan_id or vlan_range))
existing_vlans_list = numerical_sort(get_list_of_vlans(module))
commands = []
existing = {}
if vlan_range:
if state == 'present':
# These are all of the VLANs being proposed that don't
# already exist on the switch
vlans_delta = list(
set(proposed_vlans_list).difference(existing_vlans_list))
commands = build_commands(vlans_delta, state)
elif state == 'absent':
# VLANs that are common between what is being proposed and
# what is on the switch
vlans_common = list(
set(proposed_vlans_list).intersection(existing_vlans_list))
commands = build_commands(vlans_common, state)
else:
existing = get_vlan(vlan_id, module)
if state == 'absent':
if existing:
commands = ['no vlan ' + vlan_id]
elif state == 'present':
if (existing.get('mapped_vni') == '0' and
proposed.get('mapped_vni') == 'default'):
proposed.pop('mapped_vni')
delta = dict(set(
proposed.items()).difference(existing.items()))
if delta or not existing:
commands = get_vlan_config_commands(delta, vlan_id)
end_state = existing
end_state_vlans_list = existing_vlans_list
if commands:
if existing.get('mapped_vni') and state != 'absent':
if (existing.get('mapped_vni') != proposed.get('mapped_vni') and
existing.get('mapped_vni') != '0' and proposed.get('mapped_vni') != 'default'):
commands.insert(1, 'no vn-segment')
if module.check_mode:
module.exit_json(changed=True,
commands=commands)
else:
load_config(module, commands)
changed = True
end_state_vlans_list = numerical_sort(get_list_of_vlans(module))
if 'configure' in commands:
commands.pop(0)
if vlan_id:
end_state = get_vlan(vlan_id, module)
results = {
'commands': commands,
'updates': commands,
'changed': changed,
'warnings': warnings
}
if module._debug:
results.update({
'proposed_vlans_list': proposed_vlans_list,
'existing_vlans_list': existing_vlans_list,
'proposed': proposed,
'existing': existing,
'end_state': end_state,
'end_state_vlans_list': end_state_vlans_list
})
module.exit_json(**results)
if __name__ == '__main__':
main()
|
t0mk/ansible
|
lib/ansible/modules/network/nxos/nxos_vlan.py
|
Python
|
gpl-3.0
| 13,902 | 0.001511 |
# -*- coding: utf-8 -*-
from os import path
from gluon import current
from gluon.html import *
from s3 import s3_represent_facilities, s3_register_validation
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
T = current.T
auth = current.auth
db = current.db
s3db = current.s3db
request = current.request
appname = request.application
response = current.response
s3 = response.s3
settings = current.deployment_settings
view = path.join(request.folder, "private", "templates",
"RGIMS", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
title = settings.get_system_name()
response.title = title
# flag for the link change
# (condition, warehouse_id)
flag = (False, 0)
# change of link will happen
# if pe_id is part of the inv_warehouse
wh_table = s3db.table('inv_warehouse')
if wh_table:
auth_table = db((db.auth_membership.user_id == auth.user_id) &
(db.auth_membership.pe_id == wh_table.pe_id))
for entity in auth_table.select(wh_table.id):
if entity.id:
flag = (True, entity.id)
break
if flag[0]:
# Menu Boxes
menu_btns = [#div, label, app, function
["sit", T("Request"), "inv", "warehouse/%s/req" % flag[1]],
["dec", T("Send"), "inv", "warehouse/%s/send" % flag[1]],
["res", T("Receive"), "inv", "warehouse/%s/recv" % flag[1]]
]
else:
# Menu Boxes
menu_btns = [#div, label, app, function
["sit", T("Request"), "req", "req"],
["dec", T("Send"), "inv", "send"],
["res", T("Receive"), "inv", "recv"]
]
menu_divs = {"facility": DIV( H3("Map"),
_id = "facility_box", _class = "menu_box"),
"sit": DIV(
_id = "menu_div_sit", _class = "menu_div"),
"dec": DIV(
_id = "menu_div_dec", _class = "menu_div"),
"res": DIV(
_id = "menu_div_res", _class = "menu_div"),
}
for div, label, app, function in menu_btns:
if settings.has_module(app):
# @ToDo: Also check permissions (e.g. for anonymous users)
menu_divs[div].append(A(DIV(label,
_class = "menu-btn-r"),
_class = "menu-btn-l",
_href = URL(app,function)
)
)
div_arrow = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \
appname),
_class = "div_arrow")
sit_dec_res_box = DIV(menu_divs["sit"],
div_arrow,
menu_divs["dec"],
div_arrow,
menu_divs["res"],
_id = "sit_dec_res_box",
_class = "menu_box fleft swidth"
#div_additional,
)
facility_box = menu_divs["facility"]
facility_box.append(A(IMG(_src = "/%s/static/img/map_icon_128.png" % \
appname),
_href = URL(c="gis", f="index"),
_title = T("Map")
)
)
# Check logged in AND permissions
_s3 = current.session.s3
AUTHENTICATED = _s3.system_roles.AUTHENTICATED
roles = _s3.roles
if AUTHENTICATED in roles and \
auth.s3_has_permission("read", db.org_organisation):
auth.permission.controller = "org"
auth.permission.function = "site"
permitted_facilities = auth.permitted_facilities(redirect_on_error=False)
manage_facility_box = ""
if permitted_facilities:
facility_list = s3_represent_facilities(db, permitted_facilities,
link=False)
facility_list = sorted(facility_list, key=lambda fac: fac[1])
facility_opts = [OPTION(opt[1], _value = opt[0])
for opt in facility_list]
facility_opts.insert(0, OPTION("Please Select a Warehouse"))
if facility_list:
manage_facility_box = DIV(H3(T("Manage Your Warehouse")),
SELECT(_id = "manage_facility_select",
_style = "max-width:400px;",
*facility_opts
),
A(T("Go"),
_href = URL(c="default", f="site",
args=[facility_list[0][0]]),
#_disabled = "disabled",
_id = "manage_facility_btn",
_class = "action-btn"
),
_id = "manage_facility_box",
_class = "menu_box fleft")
s3.jquery_ready.append(
'''$('#manage_facility_select').change(function(){
$('#manage_facility_btn').attr('href',S3.Ap.concat('/default/site/',$('#manage_facility_select').val()))
})''')
else:
manage_facility_box = DIV()
else:
manage_facility_box = ""
# Login/Registration forms
self_registration = settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
request.args = ["register"]
if settings.get_terms_of_service():
auth.messages.submit_button = T("I accept. Create my account.")
else:
auth.messages.submit_button = T("Register")
register_form = auth()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
# Add client-side validation
s3_register_validation()
if s3.debug:
s3.scripts.append("/%s/static/scripts/jquery.validate.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/jquery.validate.min.js" % appname)
if request.env.request_method == "POST":
post_script = \
'''$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')'''
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system" % \
dict(login=B(T("login")))))))
return dict(title = title,
sit_dec_res_box = sit_dec_res_box,
facility_box = facility_box,
manage_facility_box = manage_facility_box,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# END =========================================================================
|
flavour/rgims_as_diff
|
private/templates/RGIMS/controllers.py
|
Python
|
mit
| 9,573 | 0.011804 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
import pytest
from translate.convert import dtd2po, po2dtd, test_convert
from translate.misc import wStringIO
from translate.storage import dtd, po
class TestPO2DTD:
def setup_method(self, method):
warnings.resetwarnings()
def teardown_method(self, method):
warnings.resetwarnings()
def po2dtd(self, posource, remove_untranslated=False):
"""helper that converts po source to dtd source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
convertor = po2dtd.po2dtd(remove_untranslated=remove_untranslated)
outputdtd = convertor.convertstore(inputpo)
return outputdtd
def merge2dtd(self, dtdsource, posource):
"""helper that merges po translations to dtd source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
templatefile = wStringIO.StringIO(dtdsource)
templatedtd = dtd.dtdfile(templatefile)
convertor = po2dtd.redtd(templatedtd)
outputdtd = convertor.convertstore(inputpo)
return outputdtd
def convertdtd(self, posource, dtdtemplate, remove_untranslated=False):
"""helper to exercise the command line function"""
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(dtdtemplate)
assert po2dtd.convertdtd(inputfile, outputfile, templatefile,
remove_untranslated=remove_untranslated)
return outputfile.getvalue()
def roundtripsource(self, dtdsource):
"""converts dtd source to po and back again, returning the resulting source"""
dtdinputfile = wStringIO.StringIO(dtdsource)
dtdinputfile2 = wStringIO.StringIO(dtdsource)
pooutputfile = wStringIO.StringIO()
dtd2po.convertdtd(dtdinputfile, pooutputfile, dtdinputfile2)
posource = pooutputfile.getvalue()
poinputfile = wStringIO.StringIO(posource)
dtdtemplatefile = wStringIO.StringIO(dtdsource)
dtdoutputfile = wStringIO.StringIO()
po2dtd.convertdtd(poinputfile, dtdoutputfile, dtdtemplatefile)
dtdresult = dtdoutputfile.getvalue()
print_string = "Original DTD:\n%s\n\nPO version:\n%s\n\n"
print_string = print_string + "Output DTD:\n%s\n################"
print(print_string % (dtdsource, posource, dtdresult))
return dtdresult
def roundtripstring(self, entitystring):
"""Just takes the contents of a ENTITY definition (with quotes) and does a roundtrip on that"""
dtdintro, dtdoutro = '<!ENTITY Test.RoundTrip ', '>\n'
dtdsource = dtdintro + entitystring + dtdoutro
dtdresult = self.roundtripsource(dtdsource)
assert dtdresult.startswith(dtdintro) and dtdresult.endswith(dtdoutro)
return dtdresult[len(dtdintro):-len(dtdoutro)]
def check_roundtrip(self, dtdsource, dtdcompare=None):
"""Checks that the round-tripped string is the same as dtdcompare.
If no dtdcompare string is provided then the round-tripped string is
compared with the original string.
The reason why sometimes another string is provided to compare with the
resulting string from the roundtrip is that if the original string
contains some characters, like " character, or escapes like ",
then when the roundtrip is performed those characters or escapes are
escaped, rendering a round-tripped string which differs from the
original one.
"""
if not dtdcompare:
dtdcompare = dtdsource
assert self.roundtripstring(dtdsource) == dtdcompare
def test_joinlines(self):
"""tests that po lines are joined seamlessly (bug 16)"""
multilinepo = '''#: pref.menuPath\nmsgid ""\n"<span>Tools > Options</"\n"span>"\nmsgstr ""\n'''
dtdfile = self.po2dtd(multilinepo)
dtdsource = str(dtdfile)
assert "</span>" in dtdsource
def test_escapedstr(self):
"""tests that \n in msgstr is escaped correctly in dtd"""
multilinepo = '''#: pref.menuPath\nmsgid "Hello\\nEveryone"\nmsgstr "Good day\\nAll"\n'''
dtdfile = self.po2dtd(multilinepo)
dtdsource = str(dtdfile)
assert "Good day\nAll" in dtdsource
def test_missingaccesskey(self):
"""tests that proper warnings are given if access key is missing"""
simplepo = '''#: simple.label
#: simple.accesskey
msgid "Simple &String"
msgstr "Dimpled Ring"
'''
simpledtd = '''<!ENTITY simple.label "Simple String">
<!ENTITY simple.accesskey "S">'''
warnings.simplefilter("error")
assert pytest.raises(Warning, self.merge2dtd, simpledtd, simplepo)
def test_accesskeycase(self):
"""tests that access keys come out with the same case as the original, regardless"""
simplepo_template = '''#: simple.label\n#: simple.accesskey\nmsgid "%s"\nmsgstr "%s"\n'''
simpledtd_template = '''<!ENTITY simple.label "Simple %s">\n<!ENTITY simple.accesskey "%s">'''
possibilities = [
#(en label, en akey, en po, af po, af label, expected af akey)
("Sis", "S", "&Sis", "&Sies", "Sies", "S"),
("Sis", "s", "Si&s", "&Sies", "Sies", "S"),
("Sis", "S", "&Sis", "Sie&s", "Sies", "s"),
("Sis", "s", "Si&s", "Sie&s", "Sies", "s"),
# untranslated strings should have the casing of the source
("Sis", "S", "&Sis", "", "Sis", "S"),
("Sis", "s", "Si&s", "", "Sis", "s"),
("Suck", "S", "&Suck", "", "Suck", "S"),
("Suck", "s", "&Suck", "", "Suck", "s"),
]
for (en_label, en_akey, po_source, po_target, target_label, target_akey) in possibilities:
simplepo = simplepo_template % (po_source, po_target)
simpledtd = simpledtd_template % (en_label, en_akey)
dtdfile = self.merge2dtd(simpledtd, simplepo)
dtdfile.makeindex()
accel = dtd.unquotefromdtd(dtdfile.id_index["simple.accesskey"].definition)
assert accel == target_akey
def test_accesskey_types(self):
"""tests that we can detect the various styles of accesskey"""
simplepo_template = '''#: simple.%s\n#: simple.%s\nmsgid "&File"\nmsgstr "F&aele"\n'''
simpledtd_template = '''<!ENTITY simple.%s "File">\n<!ENTITY simple.%s "a">'''
for label in ("label", "title"):
for accesskey in ("accesskey", "accessKey", "akey"):
simplepo = simplepo_template % (label, accesskey)
simpledtd = simpledtd_template % (label, accesskey)
dtdfile = self.merge2dtd(simpledtd, simplepo)
dtdfile.makeindex()
assert dtd.unquotefromdtd(dtdfile.id_index["simple.%s" % accesskey].definition) == "a"
def test_ampersandfix(self):
"""tests that invalid ampersands are fixed in the dtd"""
simplestring = '''#: simple.string\nmsgid "Simple String"\nmsgstr "Dimpled &Ring"\n'''
dtdfile = self.po2dtd(simplestring)
dtdsource = str(dtdfile)
assert "Dimpled Ring" in dtdsource
po_snippet = r'''#: searchIntegration.label
#: searchIntegration.accesskey
msgid "Allow &searchIntegration.engineName; to &search messages"
msgstr "&searchIntegration.engineName; &ileti aramasına izin ver"
'''
dtd_snippet = r'''<!ENTITY searchIntegration.accesskey "s">
<!ENTITY searchIntegration.label "Allow &searchIntegration.engineName; to search messages">'''
dtdfile = self.merge2dtd(dtd_snippet, po_snippet)
dtdsource = str(dtdfile)
print(dtdsource)
assert '"&searchIntegration.engineName; ileti aramasına izin ver"' in dtdsource
def test_accesskey_missing(self):
"""tests that missing ampersands use the source accesskey"""
po_snippet = r'''#: key.label
#: key.accesskey
msgid "&Search"
msgstr "Ileti"
'''
dtd_snippet = r'''<!ENTITY key.accesskey "S">
<!ENTITY key.label "Ileti">'''
dtdfile = self.merge2dtd(dtd_snippet, po_snippet)
dtdsource = str(dtdfile)
print(dtdsource)
assert '"Ileti"' in dtdsource
assert '""' not in dtdsource
assert '"S"' in dtdsource
def test_accesskey_and_amp_case_no_accesskey(self):
"""tests that accesskey and & can work together
If missing we use the source accesskey"""
po_snippet = r'''#: key.label
#: key.accesskey
msgid "Colour & &Light"
msgstr "Lig en Kleur"
'''
dtd_snippet = r'''<!ENTITY key.accesskey "L">
<!ENTITY key.label "Colour & Light">'''
dtdfile = self.merge2dtd(dtd_snippet, po_snippet)
dtdsource = str(dtdfile)
print(dtdsource)
assert '"Lig en Kleur"' in dtdsource
assert '"L"' in dtdsource
def test_accesskey_and_amp_case_no_amp(self):
"""tests that accesskey and & can work together
If present we use the target accesskey"""
po_snippet = r'''#: key.label
#: key.accesskey
msgid "Colour & &Light"
msgstr "Lig en &Kleur"
'''
dtd_snippet = r'''<!ENTITY key.accesskey "L">
<!ENTITY key.label "Colour & Light">'''
dtdfile = self.merge2dtd(dtd_snippet, po_snippet)
dtdsource = str(dtdfile)
print(dtdsource)
assert '"Lig en Kleur"' in dtdsource
assert '"K"' in dtdsource
def test_accesskey_and_amp_case_both_amp_and_accesskey(self):
"""tests that accesskey and & can work together
If present both & (and) and a marker then we use the correct source
accesskey"""
po_snippet = r'''#: key.label
#: key.accesskey
msgid "Colour & &Light"
msgstr "Lig & &Kleur"
'''
dtd_snippet = r'''<!ENTITY key.accesskey "L">
<!ENTITY key.label "Colour & Light">'''
dtdfile = self.merge2dtd(dtd_snippet, po_snippet)
dtdsource = str(dtdfile)
print(dtdsource)
assert '"Lig & Kleur"' in dtdsource
assert '"K"' in dtdsource
def test_entities_two(self):
"""test the error ouput when we find two entities"""
simplestring = '''#: simple.string second.string\nmsgid "Simple String"\nmsgstr "Dimpled Ring"\n'''
dtdfile = self.po2dtd(simplestring)
dtdsource = str(dtdfile)
assert "CONVERSION NOTE - multiple entities" in dtdsource
def test_entities(self):
"""tests that entities are correctly idnetified in the dtd"""
simplestring = '''#: simple.string\nmsgid "Simple String"\nmsgstr "Dimpled Ring"\n'''
dtdfile = self.po2dtd(simplestring)
dtdsource = str(dtdfile)
assert dtdsource.startswith("<!ENTITY simple.string")
def test_comments_translator(self):
"""tests for translator comments"""
simplestring = '''# Comment1\n# Comment2\n#: simple.string\nmsgid "Simple String"\nmsgstr "Dimpled Ring"\n'''
dtdfile = self.po2dtd(simplestring)
dtdsource = str(dtdfile)
assert dtdsource.startswith("<!-- Comment1 -->")
def test_retains_hashprefix(self):
"""tests that hash prefixes in the dtd are retained"""
hashpo = '''#: lang.version\nmsgid "__MOZILLA_LOCALE_VERSION__"\nmsgstr "__MOZILLA_LOCALE_VERSION__"\n'''
hashdtd = '#expand <!ENTITY lang.version "__MOZILLA_LOCALE_VERSION__">\n'
dtdfile = self.merge2dtd(hashdtd, hashpo)
regendtd = str(dtdfile)
assert regendtd == hashdtd
def test_convertdtd(self):
"""checks that the convertdtd function is working"""
posource = '''#: simple.label\n#: simple.accesskey\nmsgid "Simple &String"\nmsgstr "Dimpled &Ring"\n'''
dtdtemplate = '''<!ENTITY simple.label "Simple String">\n<!ENTITY simple.accesskey "S">\n'''
dtdexpected = '''<!ENTITY simple.label "Dimpled Ring">\n<!ENTITY simple.accesskey "R">\n'''
newdtd = self.convertdtd(posource, dtdtemplate)
print(newdtd)
assert newdtd == dtdexpected
def test_untranslated_with_template(self):
"""test removing of untranslated entries in redtd"""
posource = '''#: simple.label
msgid "Simple string"
msgstr "Dimpled ring"
#: simple.label2
msgid "Simple string 2"
msgstr ""
#: simple.label3
msgid "Simple string 3"
msgstr "Simple string 3"
#: simple.label4
#, fuzzy
msgid "Simple string 4"
msgstr "simple string four"
'''
dtdtemplate = '''<!ENTITY simple.label "Simple string">
<!ENTITY simple.label2 "Simple string 2">
<!ENTITY simple.label3 "Simple string 3">
<!ENTITY simple.label4 "Simple string 4">
'''
dtdexpected = '''<!ENTITY simple.label "Dimpled ring">
<!ENTITY simple.label3 "Simple string 3">
'''
newdtd = self.convertdtd(posource, dtdtemplate, remove_untranslated=True)
print(newdtd)
assert newdtd == dtdexpected
def test_untranslated_without_template(self):
"""test removing of untranslated entries in po2dtd"""
posource = '''#: simple.label
msgid "Simple string"
msgstr "Dimpled ring"
#: simple.label2
msgid "Simple string 2"
msgstr ""
#: simple.label3
msgid "Simple string 3"
msgstr "Simple string 3"
#: simple.label4
#, fuzzy
msgid "Simple string 4"
msgstr "simple string four"
'''
dtdexpected = '''<!ENTITY simple.label "Dimpled ring">
<!ENTITY simple.label3 "Simple string 3">
'''
newdtd = self.po2dtd(posource, remove_untranslated=True)
print(newdtd)
assert str(newdtd) == dtdexpected
def test_blank_source(self):
"""test removing of untranslated entries where source is blank"""
posource = '''#: simple.label
msgid "Simple string"
msgstr "Dimpled ring"
#: simple.label2
msgid ""
msgstr ""
#: simple.label3
msgid "Simple string 3"
msgstr "Simple string 3"
'''
dtdtemplate = '''<!ENTITY simple.label "Simple string">
<!ENTITY simple.label2 "">
<!ENTITY simple.label3 "Simple string 3">
'''
dtdexpected_with_template = '''<!ENTITY simple.label "Dimpled ring">
<!ENTITY simple.label2 "">
<!ENTITY simple.label3 "Simple string 3">
'''
dtdexpected_no_template = '''<!ENTITY simple.label "Dimpled ring">
<!ENTITY simple.label3 "Simple string 3">
'''
newdtd_with_template = self.convertdtd(posource, dtdtemplate, remove_untranslated=True)
print(newdtd_with_template)
assert newdtd_with_template == dtdexpected_with_template
newdtd_no_template = self.po2dtd(posource, remove_untranslated=True)
print(newdtd_no_template)
assert str(newdtd_no_template) == dtdexpected_no_template
def test_newlines_escapes(self):
"""check that we can handle a \n in the PO file"""
posource = '''#: simple.label\n#: simple.accesskey\nmsgid "A hard coded newline.\\n"\nmsgstr "Hart gekoeerde nuwe lyne\\n"\n'''
dtdtemplate = '<!ENTITY simple.label "A hard coded newline.\n">\n'
dtdexpected = '''<!ENTITY simple.label "Hart gekoeerde nuwe lyne\n">\n'''
dtdfile = self.merge2dtd(dtdtemplate, posource)
print(dtdfile)
assert str(dtdfile) == dtdexpected
def test_roundtrip_simple(self):
"""checks that simple strings make it through a dtd->po->dtd roundtrip"""
self.check_roundtrip('"Hello"')
self.check_roundtrip('"Hello Everybody"')
def test_roundtrip_escape(self):
"""checks that escapes in strings make it through a dtd->po->dtd roundtrip"""
self.check_roundtrip(r'"Simple Escape \ \n \\ \: \t \r "')
self.check_roundtrip(r'"End Line Escape \"')
def test_roundtrip_quotes(self):
"""Checks that quotes make it through a DTD->PO->DTD roundtrip.
Quotes may be escaped or not.
"""
# NOTE: during the roundtrip, if " quote mark is present, then it is
# converted to " and the resulting string is always enclosed
# between " characters independently of which quotation marks the
# original string is enclosed between. Thus the string cannot be
# compared with itself and therefore other string should be provided to
# compare with the result.
#
# Thus the string cannot be compared with itself and therefore another
# string should be provided to compare with the roundtrip result.
self.check_roundtrip(r"""'Quote Escape "" '""",
r'''"Quote Escape "" "''')
self.check_roundtrip(r'''"Double-Quote Escape "" "''')
self.check_roundtrip(r'''"Single-Quote ' "''')
self.check_roundtrip(r'''"Single-Quote Escape \' "''')
# NOTE: during the roundtrip, if " quote mark is present, then ' is
# converted to ' and " is converted to " Also the resulting
# string is always enclosed between " characters independently of which
# quotation marks the original string is enclosed between. Thus the
# string cannot be compared with itself and therefore another string
# should be provided to compare with the result.
#
# Thus the string cannot be compared with itself and therefore another
# string should be provided to compare with the roundtrip result.
self.check_roundtrip(r"""'Both Quotes "" '' '""",
r'''"Both Quotes "" '' "''')
self.check_roundtrip(r'''"Both Quotes "" '' "''')
# NOTE: during the roundtrip, if " is present, then ' is converted
# to ' Also the resulting string is always enclosed between "
# characters independently of which quotation marks the original string
# is enclosed between.
#
# Thus the string cannot be compared with itself and therefore another
# string should be provided to compare with the roundtrip result.
self.check_roundtrip(r'''"Both Quotes "" '' "''',
r'''"Both Quotes "" '' "''')
def test_roundtrip_amp(self):
"""Checks that quotes make it through a DTD->PO->DTD roundtrip.
Quotes may be escaped or not.
"""
self.check_roundtrip('"Colour & Light"')
def test_merging_entries_with_spaces_removed(self):
"""dtd2po removes pretty printed spaces, this tests that we can merge this back into the pretty printed dtd"""
posource = '''#: simple.label\nmsgid "First line then "\n"next lines."\nmsgstr "Eerste lyne en dan volgende lyne."\n'''
dtdtemplate = '<!ENTITY simple.label "First line then\n' + \
' next lines.">\n'
dtdexpected = '<!ENTITY simple.label "Eerste lyne en dan volgende lyne.">\n'
dtdfile = self.merge2dtd(dtdtemplate, posource)
print(dtdfile)
assert str(dtdfile) == dtdexpected
def test_preserving_spaces(self):
"""ensure that we preseve spaces between entity and value. Bug 1662"""
posource = '''#: simple.label\nmsgid "One"\nmsgstr "Een"\n'''
dtdtemplate = '<!ENTITY simple.label "One">\n'
dtdexpected = '<!ENTITY simple.label "Een">\n'
dtdfile = self.merge2dtd(dtdtemplate, posource)
print(dtdfile)
assert str(dtdfile) == dtdexpected
def test_preserving_spaces_after_value(self):
"""Preseve spaces after value. Bug 1662"""
# Space between value and >
posource = '''#: simple.label\nmsgid "One"\nmsgstr "Een"\n'''
dtdtemplate = '<!ENTITY simple.label "One" >\n'
dtdexpected = '<!ENTITY simple.label "Een" >\n'
dtdfile = self.merge2dtd(dtdtemplate, posource)
print(dtdfile)
assert str(dtdfile) == dtdexpected
# Space after >
dtdtemplate = '<!ENTITY simple.label "One"> \n'
dtdexpected = '<!ENTITY simple.label "Een"> \n'
dtdfile = self.merge2dtd(dtdtemplate, posource)
print(dtdfile)
assert str(dtdfile) == dtdexpected
def test_comments(self):
"""test that we preserve comments, bug 351"""
posource = '''#: name\nmsgid "Text"\nmsgstr "Teks"'''
dtdtemplate = '''<!ENTITY name "%s">\n<!-- \n\nexample -->\n'''
dtdfile = self.merge2dtd(dtdtemplate % "Text", posource)
print(dtdfile)
assert str(dtdfile) == dtdtemplate % "Teks"
def test_duplicates(self):
"""test that we convert duplicates back correctly to their respective entries."""
posource = r'''#: bookmarksMenu.label bookmarksMenu.accesskey
msgctxt "bookmarksMenu.label bookmarksMenu.accesskey"
msgid "&Bookmarks"
msgstr "Dipu&kutshwayo1"
#: bookmarksItem.title
msgctxt "bookmarksItem.title
msgid "Bookmarks"
msgstr "Dipukutshwayo2"
#: bookmarksButton.label
msgctxt "bookmarksButton.label"
msgid "Bookmarks"
msgstr "Dipukutshwayo3"
'''
dtdtemplate = r'''<!ENTITY bookmarksMenu.label "Bookmarks">
<!ENTITY bookmarksMenu.accesskey "B">
<!ENTITY bookmarksItem.title "Bookmarks">
<!ENTITY bookmarksButton.label "Bookmarks">
'''
dtdexpected = r'''<!ENTITY bookmarksMenu.label "Dipukutshwayo1">
<!ENTITY bookmarksMenu.accesskey "k">
<!ENTITY bookmarksItem.title "Dipukutshwayo2">
<!ENTITY bookmarksButton.label "Dipukutshwayo3">
'''
dtdfile = self.merge2dtd(dtdtemplate, posource)
print(dtdfile)
assert str(dtdfile) == dtdexpected
class TestPO2DTDCommand(test_convert.TestConvertCommand, TestPO2DTD):
"""Tests running actual po2dtd commands on files"""
convertmodule = po2dtd
defaultoptions = {"progress": "none"}
# TODO: because of having 2 base classes, we need to call all their setup and teardown methods
# (otherwise we won't reset the warnings etc)
def setup_method(self, method):
"""call both base classes setup_methods"""
test_convert.TestConvertCommand.setup_method(self, method)
TestPO2DTD.setup_method(self, method)
def teardown_method(self, method):
"""call both base classes teardown_methods"""
test_convert.TestConvertCommand.teardown_method(self, method)
TestPO2DTD.teardown_method(self, method)
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--fuzzy")
options = self.help_check(options, "--threshold=PERCENT")
options = self.help_check(options, "--removeuntranslated")
options = self.help_check(options, "--nofuzzy", last=True)
|
utkbansal/kuma
|
vendor/packages/translate/convert/test_po2dtd.py
|
Python
|
mpl-2.0
| 22,788 | 0.001668 |
#!/usr/bin/python
#
# This file is part of django-ship project.
#
# Copyright (C) 2011-2020 William Oliveira de Lagos <william.lagos@icloud.com>
#
# Shipping is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shipping is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Shipping. If not, see <http://www.gnu.org/licenses/>.
#
from django.utils.translation import ugettext as _
try:
from mezzanine.conf import settings
from cartridge.shop.utils import set_shipping
from cartridge.shop.models import Cart
from cartridge.shop.forms import OrderForm
except ImportError as e:
pass
from shipping.codes import CorreiosCode
from shipping.fretefacil import FreteFacilShippingService
from shipping.correios import CorreiosShippingService
from shipping.models import DeliverableProperty
def fretefacil_shipping_handler(request, form, order=None):
if request.session.get("free_shipping"): return
settings.use_editable()
if form is not None: user_postcode = form.cleaned_data['shipping_detail_postcode']
else: user_postcode = settings.STORE_POSTCODE
user_postcode = form.cleaned_data['shipping_detail_postcode']
shippingservice = FreteFacilShippingService()
cart = Cart.objects.from_request(request)
delivery_value = 0.0
if cart.has_items():
for product in cart:
properties = DeliverableProperty.objects.filter(sku=product.sku)
if len(properties) > 0:
props = properties[0]
deliverable = shippingservice.create_deliverable(settings.STORE_POSTCODE,
user_postcode,
props.width,
props.height,
props.length,
props.weight)
delivery_value += float(shippingservice.delivery_value(deliverable))
set_shipping(request, _("Correios"),delivery_value)
def correios_create_deliverable(obj,service,store_postcode,user_postcode,width,height,length,weight):
obj.cep_origem = store_postcode
obj.altura = height
obj.largura = width
obj.comprimento = length
obj.peso = weight
obj.servico = service
return {
'postcode':user_postcode,
'service':service
}
def correios_delivery_value(shippingservice,deliverable):
shippingservice(deliverable['postcode'],deliverable['service'])
return '.'.join(shippingservice.results[deliverable['service']][1].split(','))
def sedex_shipping_handler(request, form, order=None):
if request.session.get("free_shipping"): return
settings.use_editable()
if form is not None: user_postcode = form.cleaned_data['shipping_detail_postcode']
else: user_postcode = settings.STORE_POSTCODE
shippingservice = CorreiosShippingService()
cart = Cart.objects.from_request(request)
delivery_value = 0.0
if cart.has_items():
for product in cart:
properties = DeliverableProperty.objects.filter(sku=product.sku)
if len(properties) > 0:
props = properties[0]
deliverable = correios_create_deliverable(shippingservice,
'SEDEX',
settings.STORE_POSTCODE,
user_postcode,
props.width,
props.height,
props.length,
props.weight)
delivery_value += float(correios_delivery_value(shippingservice,deliverable))
set_shipping(request, _("Correios"),delivery_value)
def shipping_payment_handler(request, order_form, order):
data = order_form.cleaned_data
shipping = order.shipping_total
code = CorreiosCode()
shipping_data = code.consulta(order.billing_detail_postcode)
order.billing_detail_street = '%s %s %s' % (shipping_data['tipo_logradouro'],
shipping_data['logradouro'],
data['billing_detail_complement'])
order.billing_detail_city = shipping_data['cidade']
order.billing_detail_state = shipping_data['uf']
order.billing_detail_country = settings.STORE_COUNTRY
order.save()
currency = settings.SHOP_CURRENCY
cart = Cart.objects.from_request(request)
cart_items = []
has_shipping = False
for item in cart.items.all():
quantity = len(DeliverableProperty.objects.filter(sku=item.sku))
if quantity > 0: has_shipping = True
cart_items.append({
"name":item.description,
"sku":item.sku,
"price":'%.2f' % item.unit_price,
"currency":currency,
"quantity":item.quantity
})
if has_shipping:
cart_items.append({
"name": "Frete via SEDEX",
"sku":"1",
"price":'%.2f' % shipping,
"currency":currency,
"quantity":1
})
return shipping
|
efforia/django-shipping
|
shipping/providers/default.py
|
Python
|
lgpl-3.0
| 5,966 | 0.008213 |
# Solution to exercise MaxCounters
# http://www.codility.com/train/
def solution(N, A):
counters = [0 for _ in range(N)]
last_max_counter = 0
current_max_counter = 0
# Iterate through A. At each step, the value of counter i is
# last_max_counter or counters[i], whichever is greater
for a in A:
if a == N+1:
last_max_counter = current_max_counter
elif counters[a-1] < last_max_counter:
counters[a-1] = last_max_counter + 1
current_max_counter = max(current_max_counter, counters[a-1])
else:
counters[a-1] += 1
current_max_counter = max(current_max_counter, counters[a-1])
# Make a pass through counters to update the ones that
# have not changed since the last max_counter opperation
for i in range(N):
if counters[i] < last_max_counter:
counters[i] = last_max_counter
return counters
|
jmaidens/Codility
|
MaxCounters.py
|
Python
|
mit
| 951 | 0.005258 |
#__*__coding:utf-8__*__
import urllib
import urllib2
URL_IP = 'http://127.0.0.1:8000/ip'
URL_GET = 'http://127.0.0.1:8000/get'
def use_simple_urllib2():
response = urllib2.urlopen(URL_IP)
print '>>>>Response Headers:'
print response.info()
print '>>>>Response Body:'
print ''.join([line for line in response.readlines()])
def use_params_urllib2():
#构建请求参数
params = urllib.urlencode({'param1':'hello','param2':'world'})
print 'Request Params:'
print params
#发送请求
response = urllib2.urlopen('?'.join([URL_GET, '%s']) % params)
#处理响应
print '>>>>Response Headers:'
print response.info()
print '>>>>Status Code:'
print response.getcode()
print '>>>>Response Body:'
print ''.join([line for line in response.readlines()])
if __name__== '__main__':
print '>>>Use simple urllib2:'
use_simple_urllib2()
print
print '>>>Use params urllib2:'
use_params_urllib2()
|
ctenix/pytheway
|
imooc_requests_urllib.py
|
Python
|
gpl-3.0
| 978 | 0.011579 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("GaussianNB" , "FourClass_500" , "sqlite")
|
antoinecarme/sklearn2sql_heroku
|
tests/classification/FourClass_500/ws_FourClass_500_GaussianNB_sqlite_code_gen.py
|
Python
|
bsd-3-clause
| 139 | 0.014388 |
__author__ = 'Lenusik'
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
driver = self.app.driver
self.app.open_home_page()
self.contact_cache = []
for row in driver.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
lastname = cells[1].text
id = cells[0].find_element_by_tag_name("input").get_attribute('value')
firstname = cells[2].text
all_phones = cells[5].text
self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, id=id,
all_phones_from_home_page=all_phones))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
driver = self.app.driver
self.app.open_home_page()
row = driver.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_view_by_index(self, index):
driver = self.app.driver
self.app.open_home_page()
row = driver.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
self.open_contact_to_edit_by_index(index)
driver = self.app.driver
firstname = driver.find_element_by_name("firstname").get_attribute("value")
lastname = driver.find_element_by_name("lastname").get_attribute("value")
id = driver.find_element_by_name("id").get_attribute("value")
homephone = driver.find_element_by_name("home").get_attribute("value")
workphone = driver.find_element_by_name("work").get_attribute("value")
mobilephone = driver.find_element_by_name("mobile").get_attribute("value")
secondaryphone = driver.find_element_by_name("phone2").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, id=id,
homephone=homephone, workphone=workphone, mobilephone=mobilephone, secondaryphone=secondaryphone)
def get_contact_from_view_page(self, index):
driver = self.app.driver
self.open_contact_view_by_index(index)
text = driver.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
secondaryphone = re.search("P: (.*)", text).group(1)
return Contact(homephone=homephone, workphone=workphone, mobilephone=mobilephone, secondaryphone=secondaryphone)
|
Lenusik/python
|
fixture/contact.py
|
Python
|
gpl-2.0
| 2,909 | 0.003438 |
from yandextank.plugins.Aggregator import SecondAggregateData
from yandextank.plugins.Autostop import AutostopPlugin
from Tank_Test import TankTestCase
import tempfile
import unittest
class AutostopTestCase(TankTestCase):
def setUp(self):
core = self.get_core()
core.load_configs(['config/autostop.conf'])
core.load_plugins()
core.plugins_configure()
self.foo = AutostopPlugin(core)
def tearDown(self):
del self.foo
self.foo = None
def test_run(self):
data = SecondAggregateData()
data.overall.avg_response_time = 11
self.foo.core.set_option(self.foo.SECTION, "autostop", "time(1,10)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_http(self):
data = SecondAggregateData()
data.overall.http_codes = {'200':11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "http (200, 10, 5 )\nhttp (3xx, 1.5%, 10m)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_net(self):
data = SecondAggregateData()
data.overall.net_codes = {71:11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "net (71, 1, 5)\nnet (xx, 1.5%, 10m )")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_quan(self):
data = SecondAggregateData()
data.overall.quantiles = {99.0:11}
self.foo.core.set_option(self.foo.SECTION, "autostop", "quantile(99,2,3)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() < 0:
raise RuntimeError()
self.foo.end_test(0)
def test_run_false_trigger_bug(self):
data = SecondAggregateData()
data.overall.http_codes = {}
self.foo.core.set_option(self.foo.SECTION, "autostop", "http (5xx, 100%, 1)")
self.foo.configure()
self.foo.prepare_test()
self.foo.start_test()
for n in range(1, 15):
self.foo.aggregate_second(data)
if self.foo.is_test_finished() >= 0:
raise RuntimeError()
self.foo.end_test(0)
if __name__ == '__main__':
unittest.main()
|
asekretenko/yandex-tank
|
tests/Autostop_Test.py
|
Python
|
lgpl-2.1
| 3,041 | 0.007892 |
# -*- encoding: utf-8 -*-
__author__ = 'ray'
__date__ = '2/27/15'
from flask import jsonify, abort
from flask.views import MethodView
from ..models import ThemeModel
class ThemeView(MethodView):
""" Theme View
Retrieve description of a list of available themes.
:param theme_model: A theme model that manages themes.
:type theme_model: :class:`~stonemason.service.models.ThemeModel`
"""
def __init__(self, theme_model):
assert isinstance(theme_model, ThemeModel)
self._theme_model = theme_model
def get(self, tag):
"""Return description of the theme. Raise :http:statuscode:`404` if
not found.
:param name: Name of a theme.
:type name: str
"""
if tag is None:
collection = list()
for theme in self._theme_model.iter_themes():
collection.append(theme.to_dict())
return jsonify(result=collection)
else:
theme = self._theme_model.get_theme(tag)
if theme is None:
abort(404)
return jsonify(result=theme.to_dict())
|
Kotaimen/stonemason
|
stonemason/service/tileserver/themes/views.py
|
Python
|
mit
| 1,132 | 0.000883 |
import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
from hicexplorer import hicBuildMatrix, hicInfo
from hicmatrix import HiCMatrix as hm
from tempfile import NamedTemporaryFile, mkdtemp
import shutil
import os
import numpy.testing as nt
ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "test_data/")
sam_R1 = ROOT + "small_test_R1_unsorted.bam"
sam_R2 = ROOT + "small_test_R2_unsorted.bam"
dpnii_file = ROOT + "DpnII.bed"
def are_files_equal(file1, file2, delta=None):
equal = True
if delta:
mismatches = 0
with open(file1) as textfile1, open(file2) as textfile2:
for x, y in zip(textfile1, textfile2):
if x.startswith('File'):
continue
if x != y:
if delta:
mismatches += 1
if mismatches > delta:
equal = False
break
else:
equal = False
break
return equal
def test_build_matrix(capsys):
outfile = NamedTemporaryFile(suffix='.h5', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix="testQC_")
args = "-s {} {} --outFileName {} -bs 5000 -b /tmp/test.bam --QCfolder {} --threads 4".format(sam_R1, sam_R2,
outfile.name,
qc_folder).split()
hicBuildMatrix.main(args)
test = hm.hiCMatrix(ROOT + "small_test_matrix_parallel.h5")
new = hm.hiCMatrix(outfile.name)
nt.assert_equal(test.matrix.data, new.matrix.data)
nt.assert_equal(test.cut_intervals, new.cut_intervals)
# print("MATRIX NAME:", outfile.name)
print(set(os.listdir(ROOT + "QC/")))
assert are_files_equal(ROOT + "QC/QC.log", qc_folder + "/QC.log")
assert set(os.listdir(ROOT + "QC/")) == set(os.listdir(qc_folder))
# accept delta of 60 kb, file size is around 4.5 MB
assert abs(os.path.getsize(ROOT + "small_test_matrix_result.bam") - os.path.getsize("/tmp/test.bam")) < 64000
os.unlink(outfile.name)
shutil.rmtree(qc_folder)
os.unlink("/tmp/test.bam")
def test_build_matrix_cooler():
outfile = NamedTemporaryFile(suffix='.cool', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix="testQC_")
args = "-s {} {} --outFileName {} -bs 5000 -b /tmp/test.bam --QCfolder {} --threads 4".format(sam_R1, sam_R2,
outfile.name,
qc_folder).split()
hicBuildMatrix.main(args)
test = hm.hiCMatrix(ROOT + "small_test_matrix_parallel.h5")
new = hm.hiCMatrix(outfile.name)
nt.assert_equal(test.matrix.data, new.matrix.data)
# nt.assert_equal(test.cut_intervals, new.cut_intervals)
nt.assert_equal(len(new.cut_intervals), len(test.cut_intervals))
cut_interval_new_ = []
cut_interval_test_ = []
for x in new.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
# print(set(os.listdir(ROOT + "QC/")))
assert are_files_equal(ROOT + "QC/QC.log", qc_folder + "/QC.log")
assert set(os.listdir(ROOT + "QC/")) == set(os.listdir(qc_folder))
os.unlink(outfile.name)
shutil.rmtree(qc_folder)
def test_build_matrix_cooler_metadata():
outfile = NamedTemporaryFile(suffix='.cool', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix="testQC_")
args = "-s {} {} --outFileName {} -bs 5000 -b /tmp/test.bam --QCfolder {} --threads 4 --genomeAssembly dm3".format(sam_R1, sam_R2,
outfile.name,
qc_folder).split()
hicBuildMatrix.main(args)
test = hm.hiCMatrix(ROOT + "small_test_matrix_parallel.h5")
new = hm.hiCMatrix(outfile.name)
nt.assert_equal(test.matrix.data, new.matrix.data)
# nt.assert_equal(test.cut_intervals, new.cut_intervals)
nt.assert_equal(len(new.cut_intervals), len(test.cut_intervals))
cut_interval_new_ = []
cut_interval_test_ = []
for x in new.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
# print(set(os.listdir(ROOT + "QC/")))
assert are_files_equal(ROOT + "QC/QC.log", qc_folder + "/QC.log")
assert set(os.listdir(ROOT + "QC/")) == set(os.listdir(qc_folder))
outfile_metadata = NamedTemporaryFile(suffix='.txt', delete=False)
outfile_metadata.close()
args = "-m {} -o {}".format(outfile.name, outfile_metadata.name).split()
hicInfo.main(args)
assert are_files_equal(ROOT + "hicBuildMatrix/metadata.txt", outfile_metadata.name, delta=7)
os.unlink(outfile.name)
shutil.rmtree(qc_folder)
def test_build_matrix_cooler_multiple():
outfile = NamedTemporaryFile(suffix='.cool', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix="testQC_")
args = "-s {} {} --outFileName {} -bs 5000 10000 20000 -b /tmp/test.bam --QCfolder {} --threads 4".format(sam_R1, sam_R2,
outfile.name,
qc_folder).split()
hicBuildMatrix.main(args)
test_5000 = hm.hiCMatrix(ROOT + "hicBuildMatrix/multi_small_test_matrix.cool::/resolutions/5000")
test_10000 = hm.hiCMatrix(ROOT + "hicBuildMatrix/multi_small_test_matrix.cool::/resolutions/10000")
test_20000 = hm.hiCMatrix(ROOT + "hicBuildMatrix/multi_small_test_matrix.cool::/resolutions/20000")
new_5000 = hm.hiCMatrix(outfile.name + '::/resolutions/5000')
new_10000 = hm.hiCMatrix(outfile.name + '::/resolutions/10000')
new_20000 = hm.hiCMatrix(outfile.name + '::/resolutions/20000')
nt.assert_equal(test_5000.matrix.data, new_5000.matrix.data)
nt.assert_equal(test_10000.matrix.data, new_10000.matrix.data)
nt.assert_equal(test_20000.matrix.data, new_20000.matrix.data)
# nt.assert_equal(test.cut_intervals, new.cut_intervals)
nt.assert_equal(len(new_5000.cut_intervals), len(test_5000.cut_intervals))
nt.assert_equal(len(new_10000.cut_intervals), len(test_10000.cut_intervals))
nt.assert_equal(len(new_20000.cut_intervals), len(test_20000.cut_intervals))
cut_interval_new_ = []
cut_interval_test_ = []
for x in new_5000.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test_5000.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
cut_interval_new_ = []
cut_interval_test_ = []
for x in new_10000.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test_10000.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
cut_interval_new_ = []
cut_interval_test_ = []
for x in new_20000.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test_20000.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
# print(set(os.listdir(ROOT + "QC/")))
assert are_files_equal(ROOT + "QC/QC.log", qc_folder + "/QC.log")
assert set(os.listdir(ROOT + "QC/")) == set(os.listdir(qc_folder))
os.unlink(outfile.name)
shutil.rmtree(qc_folder)
def test_build_matrix_rf():
outfile = NamedTemporaryFile(suffix='.h5', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix="testQC_")
args = "-s {} {} -rs {} --outFileName {} --QCfolder {} " \
"--restrictionSequence GATC " \
"--danglingSequence GATC " \
"--minDistance 150 " \
"--maxLibraryInsertSize 1500 --threads 4".format(sam_R1, sam_R2, dpnii_file,
outfile.name,
qc_folder).split()
hicBuildMatrix.main(args)
test = hm.hiCMatrix(ROOT + "small_test_rf_matrix.h5")
new = hm.hiCMatrix(outfile.name)
nt.assert_equal(test.matrix.data, new.matrix.data)
nt.assert_equal(test.cut_intervals, new.cut_intervals)
print(set(os.listdir(ROOT + "QC_rc/")))
assert are_files_equal(ROOT + "QC_rc/QC.log", qc_folder + "/QC.log")
assert set(os.listdir(ROOT + "QC_rc/")) == set(os.listdir(qc_folder))
os.unlink(outfile.name)
shutil.rmtree(qc_folder)
|
maxplanck-ie/HiCExplorer
|
hicexplorer/test/long_run/test_hicBuildMatrix.py
|
Python
|
gpl-2.0
| 9,095 | 0.002969 |
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
import unittest
from meridian.acupoints import zuwuli233
class TestZuwuli233Functions(unittest.TestCase):
def setUp(self):
pass
def test_xxx(self):
pass
if __name__ == '__main__':
unittest.main()
|
sinotradition/meridian
|
meridian/tst/acupoints/test_zuwuli233.py
|
Python
|
apache-2.0
| 299 | 0.006689 |
import sys
import os
from subprocess import Popen, PIPE
import unittest
class CmdlineTest(unittest.TestCase):
def setUp(self):
self.env = os.environ.copy()
if 'PYTHONPATH' in os.environ:
self.env['PYTHONPATH'] = os.environ['PYTHONPATH']
self.env['SCRAPY_SETTINGS_MODULE'] = 'scrapy.tests.test_cmdline.settings'
def _execute(self, *new_args, **kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
proc = Popen(args, stdout=PIPE, stderr=PIPE, env=self.env, **kwargs)
comm = proc.communicate()
return comm[0].strip()
def test_default_settings(self):
self.assertEqual(self._execute('settings', '--get', 'TEST1'), \
'default + loaded + started')
def test_override_settings_using_set_arg(self):
self.assertEqual(self._execute('settings', '--get', 'TEST1', '-s', 'TEST1=override'), \
'override + loaded + started')
def test_override_settings_using_envvar(self):
self.env['SCRAPY_TEST1'] = 'override'
self.assertEqual(self._execute('settings', '--get', 'TEST1'), \
'override + loaded + started')
|
willingc/oh-mainline
|
vendor/packages/scrapy/scrapy/tests/test_cmdline/__init__.py
|
Python
|
agpl-3.0
| 1,166 | 0.008576 |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import concurrent.futures
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from taskflow import engines as tf_engines
from octavia.common import base_taskflow
import octavia.tests.unit.base as base
MAX_WORKERS = 1
ENGINE = 'parallel'
_engine_mock = mock.MagicMock()
class TestBaseTaskFlowEngine(base.TestCase):
def setUp(self):
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="task_flow", max_workers=MAX_WORKERS)
conf.config(group="task_flow", engine=ENGINE)
conf.config(group="task_flow", disable_revert=True)
super().setUp()
@mock.patch('concurrent.futures.ThreadPoolExecutor',
return_value='TESTEXECUTOR')
@mock.patch('taskflow.engines.load',
return_value=_engine_mock)
def test_taskflow_load(self,
mock_tf_engine_load,
mock_ThreadPoolExecutor):
# Test __init__
base_taskflow_engine = base_taskflow.BaseTaskFlowEngine()
concurrent.futures.ThreadPoolExecutor.assert_called_once_with(
max_workers=MAX_WORKERS)
# Test taskflow_load
base_taskflow_engine.taskflow_load('TEST')
tf_engines.load.assert_called_once_with(
'TEST',
engine=ENGINE,
executor='TESTEXECUTOR',
never_resolve=True)
_engine_mock.compile.assert_called_once_with()
_engine_mock.prepare.assert_called_once_with()
class TestTaskFlowServiceController(base.TestCase):
_mock_uuid = '9a2ebc48-cd3e-429e-aa04-e32f5fc5442a'
def setUp(self):
self.conf = oslo_fixture.Config(cfg.CONF)
self.conf.config(group="task_flow", engine='parallel')
self.conf.config(group="task_flow", max_workers=MAX_WORKERS)
self.driver_mock = mock.MagicMock()
self.persistence_mock = mock.MagicMock()
self.jobboard_mock = mock.MagicMock()
self.driver_mock.job_board.return_value = self.jobboard_mock
self.driver_mock.persistence_driver.get_persistence.return_value = (
self.persistence_mock)
self.service_controller = base_taskflow.TaskFlowServiceController(
self.driver_mock)
super().setUp()
@mock.patch('oslo_utils.uuidutils.generate_uuid', return_value=_mock_uuid)
@mock.patch('taskflow.engines.save_factory_details')
def test_run_poster(self, mock_engines, mockuuid):
flow_factory = mock.MagicMock()
flow_factory.__name__ = 'testname'
job_name = 'testname-%s' % self._mock_uuid
job_details = {'store': 'test'}
with mock.patch.object(self.service_controller, '_wait_for_job'
) as wait:
uuid = self.service_controller.run_poster(flow_factory,
**job_details)
save_logbook = self.persistence_mock.__enter__().get_connection(
).save_logbook
save_logbook.assert_called()
self.assertEqual(job_name, save_logbook.call_args[0][0].name)
mock_engines.assert_called()
save_args = mock_engines.call_args
self.assertEqual(job_name, save_args[0][0].name)
self.assertEqual(self._mock_uuid, save_args[0][0].uuid)
self.assertEqual(flow_factory, save_args[0][1])
self.assertEqual(self.persistence_mock.__enter__(),
save_args[1]['backend'])
self.jobboard_mock.__enter__().post.assert_called()
post_args = self.jobboard_mock.__enter__().post.call_args
self.assertEqual(job_name, post_args[0][0])
self.assertEqual(job_details, post_args[1]['details'])
wait.assert_called()
self.assertEqual(self._mock_uuid, uuid)
def test__wait_for_job(self):
job1 = mock.MagicMock()
job1.wait.side_effect = [False, True]
job2 = mock.MagicMock()
job2.wait.side_effect = [False, True]
job3 = mock.MagicMock()
job3.wait.return_value = True
job_board = mock.MagicMock()
job_board.iterjobs.side_effect = [
[job1, job2, job3],
[job1, job2]
]
self.service_controller._wait_for_job(job_board)
job1.extend_expiry.assert_called_once()
job2.extend_expiry.assert_called_once()
job3.extend_expiry.assert_not_called()
@mock.patch('octavia.common.base_taskflow.RedisDynamicLoggingConductor')
@mock.patch('octavia.common.base_taskflow.DynamicLoggingConductor')
def test_run_conductor(self, dynamiccond, rediscond):
self.service_controller.run_conductor("test")
rediscond.assert_called_once_with(
"test", self.jobboard_mock.__enter__(),
persistence=self.persistence_mock.__enter__(),
engine='parallel',
engine_options={
'max_workers': MAX_WORKERS,
})
self.conf.config(group="task_flow",
jobboard_backend_driver='zookeeper_taskflow_driver')
self.service_controller.run_conductor("test2")
dynamiccond.assert_called_once_with(
"test2", self.jobboard_mock.__enter__(),
persistence=self.persistence_mock.__enter__(),
engine='parallel')
|
openstack/octavia
|
octavia/tests/unit/common/test_base_taskflow.py
|
Python
|
apache-2.0
| 5,974 | 0 |
from unicodeconverter import convertToUnicode
def evaluateBoolean(b):
if isinstance(b, bool):
return b
if isinstance(b, str):
b = convertToUnicode(b)
if isinstance(b, unicode):
if b.lower() == u"false":
return False
elif b.lower() == u"true":
return True
elif b.lower() == u"no":
return False
elif b.lower() == u"yes":
return True
else:
try:
return bool(int(b))
except:
return True
else:
try:
return bool(int(b))
except:
return True
|
hiidef/hiispider
|
legacy/evaluateboolean.py
|
Python
|
mit
| 650 | 0.004615 |
import os
class ImageStore:
def __init__(self, root):
self.root = root
def exists(self, image):
image.root = self.root
return os.path.isfile(image.path)
def create(self, image):
image.root = self.root
image.generate()
|
CptSpaceToaster/memegen
|
memegen/stores/image.py
|
Python
|
mit
| 275 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright 2004-2020 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import logging
from collections import defaultdict
from six import text_type
from Cerebrum import Entity
from Cerebrum.modules.no.OrgLDIF import OrgLdifEntitlementsMixin
from Cerebrum.modules.LDIFutils import (
attr_unique,
normalize_string,
)
from Cerebrum.Utils import make_timer
logger = logging.getLogger(__name__)
class OrgLDIFHiAMixin(OrgLdifEntitlementsMixin):
"""Mixin class for norEduLDIFMixin(OrgLDIF) with HiA modifications."""
def __init__(self, *args, **kwargs):
super(OrgLDIFHiAMixin, self).__init__(*args, **kwargs)
self.attr2syntax['mobile'] = self.attr2syntax['telephoneNumber']
self.attr2syntax['roomNumber'] = (None, None, normalize_string)
def init_attr2id2contacts(self):
# Changes from the original:
# - Get phone and fax from system_manual, others from system_sap.
# - Add mobile and roomNumber.
sap, manual = self.const.system_sap, self.const.system_manual
contacts = [
(attr, self.get_contacts(contact_type=contact_type,
source_system=source_system,
convert=self.attr2syntax[attr][0],
verify=self.attr2syntax[attr][1],
normalize=self.attr2syntax[attr][2]))
for attr, source_system, contact_type in (
('telephoneNumber', manual, self.const.contact_phone),
('facsimileTelephoneNumber', manual, self.const.contact_fax),
('mobile', sap, self.const.contact_mobile_phone),
('labeledURI', None, self.const.contact_url))]
self.id2labeledURI = contacts[-1][1]
self.attr2id2contacts = [v for v in contacts if v[1]]
# roomNumber
# Some employees have registered their office addresses in SAP.
# We store this as co.contact_office. The roomNumber is the alias.
attr = 'roomNumber'
syntax = self.attr2syntax[attr]
contacts = self.get_contact_aliases(
contact_type=self.const.contact_office,
source_system=self.const.system_sap,
convert=syntax[0],
verify=syntax[1],
normalize=syntax[2])
if contacts:
self.attr2id2contacts.append((attr, contacts))
def get_contact_aliases(self, contact_type=None, source_system=None,
convert=None, verify=None, normalize=None):
"""Return a dict {entity_id: [list of contact aliases]}."""
# The code mimics a reduced modules/OrgLDIF.py:get_contacts().
entity = Entity.EntityContactInfo(self.db)
cont_tab = defaultdict(list)
if not convert:
convert = text_type
if not verify:
verify = bool
for row in entity.list_contact_info(source_system=source_system,
contact_type=contact_type):
alias = convert(text_type(row['contact_alias']))
if alias and verify(alias):
cont_tab[int(row['entity_id'])].append(alias)
return dict((key, attr_unique(values, normalize=normalize))
for key, values in cont_tab.iteritems())
def init_person_titles(self):
"""Extends the person_titles dict with employment titles available via
the PersonEmployment module."""
super(OrgLDIFHiAMixin, self).init_person_titles()
timer = make_timer(logger,
'Fetching personal employment titles...')
employments = self.person.search_employment(main_employment=True)
for emp in employments:
if emp['person_id'] not in self.person_titles:
title = [(self.const.language_nb, emp['description'])]
self.person_titles[emp['person_id']] = title
timer("...personal employment titles done.")
|
unioslo/cerebrum
|
Cerebrum/modules/no/hia/OrgLDIF.py
|
Python
|
gpl-2.0
| 4,715 | 0 |
""" Back-ported, durable, and portable selectors """
# MIT License
#
# Copyright (c) 2017 Seth Michael Larson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import collections
import errno
import math
import select
import socket
import sys
import time
from .. import pycompat
namedtuple = collections.namedtuple
Mapping = collections.Mapping
try:
monotonic = time.monotonic
except AttributeError:
monotonic = time.time
__author__ = 'Seth Michael Larson'
__email__ = 'sethmichaellarson@protonmail.com'
__version__ = '2.0.0'
__license__ = 'MIT'
__url__ = 'https://www.github.com/SethMichaelLarson/selectors2'
__all__ = ['EVENT_READ',
'EVENT_WRITE',
'SelectorKey',
'DefaultSelector',
'BaseSelector']
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
_DEFAULT_SELECTOR = None
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
_ERROR_TYPES = (OSError, IOError, socket.error)
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
# Getting the fileno of a closed socket on Windows errors with EBADF.
except socket.error as err:
if err.errno != errno.EBADF:
raise
else:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
self._fd_to_key.pop(key.fd)
break
else:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._wrap_select, True, self._readers,
self._writers, timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def _wrap_select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
__all__.append('SelectSelector')
# Jython has a different implementation of .fileno() for socket objects.
if pycompat.isjython:
class _JythonSelectorMapping(object):
""" This is an implementation of _SelectorMapping that is built
for use specifically with Jython, which does not provide a hashable
value from socket.socket.fileno(). """
def __init__(self, selector):
assert isinstance(selector, JythonSelectSelector)
self._selector = selector
def __len__(self):
return len(self._selector._sockets)
def __getitem__(self, fileobj):
for sock, key in self._selector._sockets:
if sock is fileobj:
return key
else:
raise KeyError("{0!r} is not registered.".format(fileobj))
class JythonSelectSelector(SelectSelector):
""" This is an implementation of SelectSelector that is for Jython
which works around that Jython's socket.socket.fileno() does not
return an integer fd value. All SelectorKey.fd will be equal to -1
and should not be used. This instead uses object id to compare fileobj
and will only use select.select as it's the only selector that allows
directly passing in socket objects rather than registering fds.
See: http://bugs.jython.org/issue1678
https://wiki.python.org/jython/NewSocketModule#socket.fileno.28.29_does_not_return_an_integer
"""
def __init__(self):
super(JythonSelectSelector, self).__init__()
self._sockets = [] # Uses a list of tuples instead of dictionary.
self._map = _JythonSelectorMapping(self)
self._readers = []
self._writers = []
# Jython has a select.cpython_compatible_select function in older versions.
self._select_func = getattr(select, 'cpython_compatible_select', select.select)
def register(self, fileobj, events, data=None):
for sock, _ in self._sockets:
if sock is fileobj:
raise KeyError("{0!r} is already registered"
.format(fileobj, sock))
key = SelectorKey(fileobj, -1, events, data)
self._sockets.append((fileobj, key))
if events & EVENT_READ:
self._readers.append(fileobj)
if events & EVENT_WRITE:
self._writers.append(fileobj)
return key
def unregister(self, fileobj):
for i, (sock, key) in enumerate(self._sockets):
if sock is fileobj:
break
else:
raise KeyError("{0!r} is not registered.".format(fileobj))
if key.events & EVENT_READ:
self._readers.remove(fileobj)
if key.events & EVENT_WRITE:
self._writers.remove(fileobj)
del self._sockets[i]
return key
def _wrap_select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return self._select_func(r, w, [], timeout)
__all__.append('JythonSelectSelector')
SelectSelector = JythonSelectSelector # Override so the wrong selector isn't used.
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1000)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
__all__.append('PollSelector')
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except _ERROR_TYPES:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1000) * 0.001
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
__all__.append('EpollSelector')
if hasattr(select, "devpoll"):
class DevpollSelector(BaseSelector):
"""Solaris /dev/poll selector."""
def __init__(self):
super(DevpollSelector, self).__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
key = super(DevpollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._devpoll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(DevpollSelector, self).unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.devpoll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1000)
result = self._devpoll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._devpoll.close()
super(DevpollSelector, self).close()
__all__.append('DevpollSelector')
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except _ERROR_TYPES:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except _ERROR_TYPES:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._kqueue.control, True,
None, max_events, timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
__all__.append('KqueueSelector')
def _can_allocate(struct):
""" Checks that select structs can be allocated by the underlying
operating system, not just advertised by the select module. We don't
check select() because we'll be hopeful that most platforms that
don't have it available will not advertise it. (ie: GAE) """
try:
# select.poll() objects won't fail until used.
if struct == 'poll':
p = select.poll()
p.poll(0)
# All others will fail on allocation.
else:
getattr(select, struct)().close()
return True
except (OSError, AttributeError):
return False
# Python 3.5 uses a more direct route to wrap system calls to increase speed.
if sys.version_info >= (3, 5):
def _syscall_wrapper(func, _, *args, **kwargs):
""" This is the short-circuit version of the below logic
because in Python 3.5+ all selectors restart system calls. """
return func(*args, **kwargs)
else:
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
timeout = kwargs.get("timeout", None)
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
args = list(args)
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in args or kwargs to be recalculated")
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno.ETIMEDOUT, 'Connection timed out')
if recalc_timeout:
if "timeout" in kwargs:
kwargs["timeout"] = expires - current_time
continue
raise
return result
# Choose the best implementation, roughly:
# kqueue == devpoll == epoll > poll > select
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
def DefaultSelector():
""" This function serves as a first call for DefaultSelector to
detect if the select module is being monkey-patched incorrectly
by eventlet, greenlet, and preserve proper behavior. """
global _DEFAULT_SELECTOR
if _DEFAULT_SELECTOR is None:
if pycompat.isjython:
_DEFAULT_SELECTOR = JythonSelectSelector
elif _can_allocate('kqueue'):
_DEFAULT_SELECTOR = KqueueSelector
elif _can_allocate('devpoll'):
_DEFAULT_SELECTOR = DevpollSelector
elif _can_allocate('epoll'):
_DEFAULT_SELECTOR = EpollSelector
elif _can_allocate('poll'):
_DEFAULT_SELECTOR = PollSelector
elif hasattr(select, 'select'):
_DEFAULT_SELECTOR = SelectSelector
else: # Platform-specific: AppEngine
raise RuntimeError('Platform does not have a selector.')
return _DEFAULT_SELECTOR()
|
smmribeiro/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/thirdparty/selectors2.py
|
Python
|
apache-2.0
| 27,478 | 0.000619 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import timedelta
from decimal import Decimal
from django.db import models
from django.db import transaction
from django.utils.timezone import now
from django.db.models import Sum, Max, F
from audit_log.models.managers import AuditLog
class CoreModel(models.Model):
created = models.DateTimeField(default=now, editable=False)
created_by = models.TextField(db_column='created_by', blank=False, editable=False)
last_updated = models.DateTimeField(default=now, db_column='last_updated', editable=False)
last_updated_by = models.TextField(db_column='last_updated_by', blank=False, editable=False)
class Meta:
abstract = True
class Config(CoreModel):
class Meta:
db_table = 'config'
verbose_name = "configuration"
verbose_name_plural = "configurations"
name = models.TextField(blank=False)
value = models.TextField(blank=False)
audit_log = AuditLog()
class AccountException(Exception):
pass
class Account(CoreModel):
"""
Account is defined by a customer and address.
Customers can have multiple accounts with different addresses.
An Account is mapped to a Meter.
Should the Meter be destroyed, close the account and create a new one with a new meter, but same customer and address
"""
customer = models.ForeignKey('Customer', db_column='customer_id')
address = models.ForeignKey('Address', db_column='address_id')
account_type = models.ForeignKey('AccountType', db_column='account_type_id')
#meter = models.ForeignKey('Meter', db_column='meter_id', unique=True)
status = models.TextField(blank=True)
remarks = models.TextField(blank=True)
class Meta:
db_table = 'account'
ordering = ['customer']
unique_together = ('customer', 'address')
def __unicode__(self):
return u'-'.join([unicode(self.customer),unicode(self.address)])
@property
def bills(self):
return self.bill_set.all()
@property
def notices(self):
return self.notice_set.all()
@property
def meterreads(self):
return self.meterread_set.all()
@property
def adjustments(self):
return self.adjustment_set.all()
@property
def meters(self):
meters = [meter for meter in self.accountmeter_set.all()]
return meters
@property
def accountmeters(self):
return self.accountmeter_set.all()
@property
def notes(self):
return self.accountnote_set.all()
@property
def balance(self):
return self.financialtransaction_set.latest('id').balance
@property
def latest_balance(self):
return self.financialtransaction_set.latest('id').balance
# @property
# def latest_balance_(self):
# posted_payment = self.payment_set.filter(status="posted",
# payment_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
# if not posted_payment:
# posted_payment = Decimal('0.0')
# if self.latest_bill:
# return self.latest_bill.amount_due - posted_payment
# return None
@property
def is_for_disconnection(self):
'''
Returns true if the account is for disconnection
'''
if self.status == 'for disconnection':
return True
return False
def for_disconnection(self):
self.status = 'for disconnection'
self.update()
def is_disconnected(self):
'''
Returns true if the account is for disconnection
'''
if self.status == 'disconnected':
return True
return False
def disconnect(self):
''' Set status of account to disconnected'''
self.status = 'disconnected'
self.update()
@property
def bill(self):
'''
Returns the bill of the current active period, None of none.
'''
period = Config.objects.get(name='active_period').value
bs = BillingSchedule.objects.get(pk=period)
bill = Bill.objects.filter(account=self, billing_schedule=bs)
if self.has_bill():
return bill.get()
return None
def has_bill(self, period=None):
'''
Determines if the account has a bill for a particular period (default is active period)
'''
has_bill = False
if period is None:
period = Config.objects.get(name='active_period').value
bs = BillingSchedule.objects.get(pk=period)
bill = Bill.objects.filter(account=self, billing_schedule=bs)
if bill.count() > 0 :
has_bill = True
return has_bill
@property
def latest_bill(self):
if self.bill_set.exists():
return self.bill_set.latest('id')
return None
@property
def latest_notice(self):
if self.notice_set.exists():
return self.notice_set.latest('id')
return None
@property
def reconnection_fees(self):
if self.latest_bill:
return self.financialtransaction_set.filter(type='reconnection_fee',
transaction_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
return None
@property
def total_posted_payment(self):
if self.latest_bill:
return self.financialtransaction_set.filter(type='posted_payment',
transaction_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
return None
@property
def total_adjustment(self):
if self.latest_bill:
credit = self.financialtransaction_set.filter(adjustment__type='credit', transaction_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
debit = self.financialtransaction_set.filter(adjustment__type='debit', transaction_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
reconnection_fee = self.financialtransaction_set.filter(adjustment__type='reconnection_fee', transaction_date__gte=self.latest_bill.bill_date).aggregate(Sum('amount'))['amount__sum']
#if credit is None and debit is None:
if credit is None and debit is None and reconnection_fee is None:
return Decimal('0.00')
#return credit - debit
return credit - debit - reconnection_fee
return Decimal('0.00')
def regenerate_bill(self,user=''):
"""
This function executes delete_current_bill and generate_bill in succession.
This regenerates an accounts current bill (or creates a new one if no bill is currently existing)
for the given active period.
"""
deleted = self.delete_current_bill(user=user)
created = False
if deleted:
print "-- original bill deleted"
bill, created = self.generate_bill()
else:
print "-- no bill to delete. generating anyway.."
bill, created = self.generate_bill()
return bill, created
def delete_current_bill(self, business_date=None, period=None, user=''):
"""
delete the bill for the current period. This feature is used for corrections in bill generation.
The related FinancialTransaction must also be deleted (currently unable to delete due to foreign key constraint).
"""
deleted = False
if period is None:
period = Config.objects.get(name='active_period').value
business_date = Config.objects.get(name='business_date').value
billing_schedule = BillingSchedule.objects.get(pk=period)
bill = self.bill
if bill:
try:
penalties = billing_schedule.penalty_set.filter(
account_id=self.id,
type='overdue')
penalty_amount = penalties.aggregate(Sum('amount'))['amount__sum']
print "penalty amount: ", penalty_amount
if penalty_amount is None:
penalty_amount = Decimal('0.0')
penalties.delete()
if self.balance > 0:
ft_balance = self.balance - penalty_amount
else:
ft_balance = self.balance
bill.meter_read.readcharge_set.all().delete()
#ft = bill.financial_transaction
#ft.delete()
self.financial_transaction = FinancialTransaction.objects.create(
account_id=self.id,
amount = bill.amount_due,
balance = ft_balance - bill.current_charge,
type = 'bill_deletion',
transaction_date = business_date)
bill.create_deleted_copy(deletor=user)
bill.delete()
deleted = True
except Exception, e:
print "-- an error occurred.. exiting: %s"%e
return deleted
else:
print "-- no bill to delete for this period!"
return deleted
def generate_bill(self, business_date=None, period=None):
"""
generate bills for this billing schedule
return a list of tuple of instance and True/False if created
"""
# get active period from Config
if period is None:
period = Config.objects.get(name='active_period').value
business_date = Config.objects.get(name='business_date').value
#meter read must be within billing schedule
if self.status=='inactive':
return None, False
previous_balance = self.latest_balance
#billing_schedule = BillingSchedule.objects.get(start_date__lte = business_date, end_date__gte = business_date)
billing_schedule = BillingSchedule.objects.get(pk=period)
#mr = self.meter.meterread_set.filter(read_date__lte=business_date).latest('id')
#mr = self.meter.meterread_set.filter(billing_schedule=billing_schedule, account=self).latest('id')
usage, mr = self.get_meter_reads_usage(period=period)
print "mr: ", type(mr), mr
if mr is None:
print "No reads for this period"
return None, False
#if billing_schedule.start_date>mr.read_date or billing_schedule.end_date < mr.read_date:
# return None, False
bill, created = Bill.objects.get_or_create(account = self, billing_schedule=billing_schedule, meter_read = mr,bill_date = business_date)
if created:
mr.status='used'
mr.update()
print "account balance: ", self.latest_balance
#if previous_balance > Decimal("0.0"):
# print " account: %s for disconnection " % (self.customer.last_name + ", " + self.customer.first_name)
# self.status = "for disconnection"
# self.save()
return bill, created
def generate_notice(self, business_date=None, period=None):
"""
generate notices for this billing schedule
1. Check if there is already a notice for the billing schedule
2. Check if there is a bill already created, get it and base the due date
FIXME: lots of hardcoded numbers
"""
from datetime import timedelta
# get active period from Config
if period is None:
period = Config.objects.get(name='active_period').value
if business_date is None:
business_date = Config.objects.get(name='business_date').value
if self.status=='inactive':
return None, False
notice_date = business_date
reconnection_fee = Decimal('200.0')
#billing_schedule = BillingSchedule.objects.get(start_date__lte=business_date, end_date__gte=business_date)
billing_schedule = BillingSchedule.objects.get(pk=period)
notice = self.latest_notice
if notice and notice.billing_schedule == billing_schedule:
return notice, False
elif self.latest_bill and self.latest_balance > Decimal("0.0") and self.latest_bill.billing_schedule == billing_schedule:
amount = self.latest_balance
due_date = self.latest_bill.due_date + timedelta(days=7) #FIX-ME: The notice period shoud be in Config
self.for_disconnection()
return Notice.objects.get_or_create(account = self,
billing_schedule=billing_schedule, notice_date = notice_date,
due_date = due_date, reconnection_fee = reconnection_fee,
amount=amount)
else:
print 'no notice generated'
return None, False
@property
def meter(self):
meter = self.get_meter()
return meter
def get_meter(self):
'''
Get the meter from the active account meter
'''
meter = self.get_account_meter().meter
#self.meter = meter
return meter
def get_account_meter(self):
'''
Get the active account meter for the Account from the AccountMeter table.
Return None if no AccountMeter exists for the acount.
'''
account_meter = AccountMeter.objects.filter(account = self, status='active')
if account_meter.count() > 0:
return account_meter.get()
return None
def add_account_meter(self, new_meter):
'''
Assign a new meter to the account. This adds the meter to the AccountMeter table, and sets the meter as 'active'
'''
print "--- Entering add account meter "
new_meter = Meter.objects.get(pk=new_meter)
current_meter = None
current_account_meter = self.get_account_meter()
#deactivate current account meter if existing
print "current_account_meter: ",current_account_meter
print "--- Deactivate current account meter "
if current_account_meter is not None:
print "meter: ", current_account_meter.meter
current_meter = current_account_meter.meter
current_account_meter.status = 'inactive'
current_account_meter.save()
print "--- Create a new account meter"
print "--- self: ", self
#below is encountering a UnicodeDecode Error. Manually doing for the meantime.
new_account_meter, created = AccountMeter.objects.get_or_create(account = self, meter=new_meter)
#new_account_meter =AccountMeter.objects.filter(account=self, meter=new_meter)
#
#print "new_account_meter: ", new_account_meter
#print "type: ", type(new_account_meter)
#if new_account_meter.count() > 0:
# new_account_meter = new_account_meter.get()
#else:
# new_account_meter = AccountMeter(account=self, meter=new_meter)
print "account meter: " + str(new_account_meter.meter.meter_uid)
new_account_meter.status = 'active'
new_account_meter.save()
#self.meter = new_meter
return new_account_meter
def add_meter_read(self, current_reading, read_date=None, period=None):
'''
Add a meter read for the specified account, read date, and billing period (Billing Schedule)
'''
from datetime import time
if period is None:
period = Config.objects.get(name='active_period').value
if read_date is None:
read_date = Config.objects.get(name='business_date').value
bs = BillingSchedule.objects.get(pk=period)
meter = self.get_meter()
print "self.get_current_reading: ", self.get_current_reading()
previous_reading = self.get_current_reading()
print "--- previous_reading: ", previous_reading
print "--- decimal previous reading: ", Decimal(previous_reading)
usage = Decimal(current_reading) - Decimal(previous_reading)
meterread, created = MeterRead.objects.get_or_create(meter = meter, account = self , billing_schedule = bs,
read_date=read_date,
defaults={
'read_time':time(),
'current_reading':current_reading,
'previous_reading': previous_reading,
'usage':usage
})
if created:
print "--- New Meter Read: ", meterread.id
#This is a new read, save all the values
meterread.previous_reading = previous_reading
meterread.usage = usage
meterread.current_reading = current_reading
meterread.read_time = time()
else:
print "-- Old Meter Read: ", meterread.id
print "-- Updating with current_reading: ", current_reading
# This is an old read, simply update the current reading, and get the new usage
meterread.current_reading = current_reading
meterread.read_time = time()
meterread.usage = Decimal(meterread.current_reading) - Decimal(meterread.previous_reading)
meterread.update()
print "meterread", meterread, created
print "meterread.id: ", meterread.id
print "meterread.previous_reading: ", meterread.previous_reading
print "meterread.current_reading: ", meterread.current_reading
print "meterread.usage: ", meterread.usage
print "- - - - - - "
@property
def usage(self):
usage, meter = self.get_meter_reads_usage()
return usage
def get_meter_read_usage(self, meter=None, period=None):
'''
Get the usage the latest read of an account meter for a particular period.
By the default it gets the usage of an active meter.
'''
# get the active meter
if meter is None:
meter = self.get_meter()
else:
meter = Meter.objects.get(pk=meter)
# get the active period
if period is None:
period = Config.objects.get(name='active_period').value
bs = BillingSchedule.objects.get(pk=period)
reads = MeterRead.objects.filter(billing_schedule=bs,account=self, meter=meter)
usage = Decimal("0.00")
if reads.count() > 1:
for read in reads:
usage = usage + read.usage
return usage
elif reads.count() == 1:
return reads.get().usage
else:
return usage
def get_meter_reads_usage(self, period=None):
'''
Retrieve all meter reads for an Account for a specific period (BillingSchedule).
Return latest MeterRead
'''
if period is None:
period = Config.objects.get(name='active_period').value
bs = BillingSchedule.objects.get(pk=period)
reads = MeterRead.objects.filter(billing_schedule=bs,account=self)
usage = Decimal("0.0")
for read in reads:
usage = usage + Decimal(read.usage)
reads_id = None
if reads.count() > 0:
reads_id = reads.latest('id')
return usage, reads_id
def add_payment(self, amount, payment_type='cash', check_number='', payment_date=None, remarks=''):
'''
Adds a payment to an account
'''
print "adding payment to account id: ", self.id
if payment_date is None:
payment_date = Config.objects.get(name='business_date').value
auto_post = Config.objects.get(name='auto_post').value
print "amount: ", Decimal(amount)
print "check_number: ", check_number
print "type: ", payment_type
print "remarks: ", remarks
payment = Payment(account=self, amount=Decimal(amount), payment_date=payment_date, check_number=check_number,
type=payment_type, remarks=remarks)
payment.save()
print "payment_date: ", payment_date
print "payment_id: ", payment.id
if auto_post =='True' and payment_type == 'cash':
"auto posting payment.."
pp = PostedPayment(payment=payment)
pp.save()
return payment
def add_adjustment(self, amount, adjustment_type='credit', adjustment_date=None, description=''):
'''
Adds an adjustment to an account
'''
print "adding adjustment to account id: ", self.id
if adjustment_date is None:
adjustment_date = Config.objects.get(name='business_date').value
auto_post = Config.objects.get(name='auto_post').value
print "amount: ", Decimal(amount)
print "type: ", adjustment_type
print "description: ", description
adjustment = Adjustment(account=self, amount=Decimal(amount), adjustment_date=adjustment_date,
type=adjustment_type, description=description)
adjustment.save()
print "adjustmet_date: ", adjustment_date
print "adjustment_id: ", adjustment.id
return adjustment
def add_note(self, note, user=''):
'''
Adds a note to an account
'''
print "adding note to account id: ", self.id
note = AccountNote(account=self, note=note, username=user)
note.save()
print "note_id: ", note.id
return note
@property
def reading(self):
'''
Property to get the most recent reading for the Account
'''
latest_reading = self.get_current_reading()
return latest_reading
def get_current_reading(self, period=None):
'''
Returns the current_reading value from the account's latest meter read
'''
if period is None:
period = Config.objects.get(name='active_period').value
bs = BillingSchedule.objects.get(pk=period)
meter = self.meter
reads = MeterRead.objects.filter(meter=meter,account=self)
if not reads: #if the current period does not return a read, get latest read for account only
reads = MeterRead.objects.filter(account=self)
latest_read = reads.latest('id')
print "latest_read.current_reading: ", latest_read.current_reading
return latest_read.current_reading
def get_previous_reading(self, period=None):
'''
Returns the previous_reading value from the account's latest meter read
'''
if period is None:
period = Config.objects.get(name='active_period').value
bs = BillingSchedule.objects.get(pk=period)
reads = MeterRead.objects.filter(billing_schedule=bs,account=self)
latest_read = reads.latest('id')
return latest_read.previous_reading
def get_bill(self, period=None):
'''
Returns the bill for a given period. Default is active period.
'''
if period is None:
period = Config.objects.get(name='active_period').value
bs = BillingSchedule.objects.get(pk=period)
bill = Bill.objects.filter(billing_schedule=bs,account=self)
customer_name = self.customer.last_name + ", " + self.customer.first_name
address = self.address.address1
if bill.count() == 1:
return bill.get() #return bill
elif bill.count() > 1:
raise AccountException("More than 1 bill for Account: %s , Address: %s and BillingSchedule: %s", customer_name , address, str(bs))
return None
def update(self, *args, **kwargs):
try:
super(Account, self).save(*args, **kwargs) # Call the "real" save() method.
except Exception, e:
print "account update failed", e
raise e
class AccountMeter(CoreModel):
"""
AccountMeter maps an account to any number of Meters.
But only one meter can be active at a time
"""
account = models.ForeignKey('Account', db_column='account_id')
meter = models.ForeignKey('Meter', db_column='meter_id')
status = models.TextField(blank=True)
remarks = models.TextField(blank=True)
class Meta:
db_table = 'account_meter'
unique_together = ('account', 'meter')
def __unicode__(self):
return u'-'#.join([unicode(self.account),unicode(self.meter)])
class AccountType(CoreModel):
description = models.TextField(db_column='description', unique=True)
rate = models.ForeignKey('Rate', db_column='rate_id', unique=True)
class Meta:
db_table = 'account_type'
def __unicode__(self):
return self.description
def save(self, *args, **kwargs):
try:
self.rate = Rate.objects.get(rate=self.description)
super(AccountType, self).save(*args, **kwargs) # Call the "real" save() method.
except Exception, e:
print e
return
class Address(CoreModel):
address1 = models.TextField("Complete address")
address2 = models.TextField("Block number", blank=True)
address3 = models.TextField("Lot number", blank=True)
address4 = models.TextField("Area", blank=True)
zip_code = models.BigIntegerField(blank=True)
class Meta:
verbose_name_plural = "addresses"
db_table = 'address'
unique_together = ('address1', 'address2', 'address3', 'address4', 'zip_code')
def __unicode__(self):
return ",".join([self.address1 , self.address2, self.address3, self.address4])
class AccountNote(CoreModel):
account = models.ForeignKey('Account', db_column='account_id')
note = models.TextField()
username = models.TextField()
class Meta:
db_table = 'account_note'
class Adjustment(CoreModel):
financial_transaction = models.OneToOneField('FinancialTransaction', db_column='financial_transaction_id')
description = models.TextField(blank=True)
type = models.TextField(blank=False) #credit, debit
amount = models.DecimalField(decimal_places=2, max_digits=11, db_column='amount')
adjustment_date = models.DateField(db_column='adjustment_date')
account = models.ForeignKey('Account', db_column='account_id')
class Meta:
db_table = 'adjustment'
@transaction.commit_manually
def save(self, *args, **kwargs):
if not self.adjustment_date:
self.adjustment_date = now().date()
try:
previous_transaction = self.account.financialtransaction_set.latest('id')
previous_balance = previous_transaction.balance
except FinancialTransaction.DoesNotExist, e:
previous_balance = Decimal('0.0')
if self.type in ['debit', 'reconnection_fee']:
print 'self type is debit: ', self.type
balance = previous_balance + self.amount
else:
print 'self type is not debit: ', self.type
balance = previous_balance - self.amount
try:
print 'before financial transaction', self.account.id, self.amount, self.type, balance, self.adjustment_date
self.financial_transaction = FinancialTransaction.objects.create(
account_id=self.account.id,
amount = self.amount,
type = self.type,
balance = balance,
transaction_date= self.adjustment_date)
print 'self financial_transaction', self.financial_transaction
super(Adjustment, self).save(*args, **kwargs)
print 'after super adjustment'
except Exception, e:
print 'exception', e
transaction.rollback()
raise e
else:
transaction.commit()
class BillingSchedule(CoreModel):
reading_start_date = models.DateField(db_column='reading_start_date')
reading_end_date = models.DateField(db_column='reading_end_date')
start_date = models.DateField(db_column='start_date')
end_date = models.DateField(db_column='end_date')
status = status = models.TextField(blank=True, default='inactive')
class Meta:
db_table = 'billing_schedule'
unique_together = ('start_date', 'end_date')
def __unicode__(self):
return unicode(self.start_date.strftime("%b %d, %Y")) + u" - " + unicode(self.end_date.strftime("%b %d, %Y"))
def due_date(self):
from datetime import timedelta
days_due = int(Config.objects.get(name='days_due').value)
due_date = self.start_date + timedelta(days_due)
return due_date
@property
def previous(self):
return self.get_previous_by_start_date()
@property
def next(self):
return self.get_next_by_start_date()
class Bill(CoreModel):
"""
A bill is unique based on schedule, account
Must provide the following:
account
billing_schedule
"""
financial_transaction = models.OneToOneField('FinancialTransaction', db_column='financial_transaction_id')
billing_schedule = models.ForeignKey('BillingSchedule', db_column='billing_schedule_id')
account = models.ForeignKey('Account', db_column='account_id')
meter_read = models.ForeignKey('MeterRead', db_column='meter_read_id')
bill_date = models.DateField(db_column='bill_date')
due_date = models.DateField(db_column='due_date')
previous_balance = models.DecimalField(decimal_places=2, max_digits=11, db_column='previous_balance')
current_charge = models.DecimalField(decimal_places=2, max_digits=11, db_column='current_charge', editable=False)
penalty_amount = models.DecimalField(decimal_places=2, max_digits=11, db_column='penalty_amount')
amount_due = models.DecimalField(decimal_places=2, max_digits=11, db_column='amount_due')
tracking_number = models.TextField(blank=True)
audit_log = AuditLog()
class Meta:
db_table = 'bill'
unique_together = ('billing_schedule', 'account')
def __unicode__(self):
return u'-'.join([unicode(self.account), unicode(self.billing_schedule), unicode(self.amount_due)])
def update(self, *args, **kwargs):
try:
super(Bill, self).save(*args, **kwargs) # Call the "real" save() method.
except Exception, e:
print "bill update failed", e
raise e
@transaction.commit_manually
def save(self, business_date=None, *args, **kwargs):
"""
Calculate current_charge
Get previous_balance
Get penalty_amount (sum of penalty transactions during penalty_period)
"""
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
try:
previous_transaction = self.account.financialtransaction_set.latest('id')
self.previous_balance = previous_transaction.balance
except FinancialTransaction.DoesNotExist, e:
self.previous_balance = Decimal('0.0')
"""
try:
self.reconnection_fees = self.account.reconnection_fees
except:
self.reconnection_fees = Decimal('0.0')
"""
try:
# get business date and tracking number
if business_date == None:
business_date = Config.objects.get(name='business_date').value
tnconfig = Config.objects.get(name='tracking_number')
zeroes = int(Config.objects.get(name='trn_zeroes').value)
self.save_read_charges()
self.save_overdue_penalty()
self.current_charge = sum([i.amount for i in self.meter_read.readcharge_set.all()])
self.penalty_amount = sum([i.amount for i in self.billing_schedule.penalty_set.filter(account_id=self.account.id).all()])
self.amount_due = self.current_charge + self.penalty_amount + self.previous_balance# + self.reconnection_fees
#self.bill_date = self.billing_schedule.end_date
self.bill_date = datetime.strptime(business_date,'%Y-%m-%d')
#self.due_date = self.bill_date + timedelta(days=19)
first_day, last_day = get_month_day_range(self.bill_date)
print "last_day: ", last_day
print "first_day: ", first_day
self.due_date = last_day + timedelta(days=-1)
self.financial_transaction = FinancialTransaction.objects.create(
account_id=self.account.id,
amount = self.current_charge + self.penalty_amount,
balance = self.amount_due,
type = self._meta.db_table,
transaction_date = self.bill_date)
# compose bill tracking number
self.tracking_number = str(business_date).replace('-','') + "-" + str(tnconfig.value).zfill(zeroes)
tnconfig.value = str(int(tnconfig.value) + 1) # increase config tracking number
tnconfig.save()
except Exception, e:
print "Bill Rollback", e
transaction.rollback()
raise e
else:
super(Bill, self).save(*args, **kwargs) # Call the "real" save() method.
transaction.commit()
def usage_to_amount(self, usage=Decimal('0.00')):
"""
Convert/calculate consumption to amount based on defined rate charges
"""
rate_charges = self.account.account_type.rate.ratecharge_set.all()
amount = Decimal('0.0')
#assume incremental
for rate_charge in rate_charges:
if rate_charge.read_value_end < usage:
#print (rate_charge.read_value_end - rate_charge.read_value_start), charge.amount
if rate_charge.type == 'Constant':
amount += rate_charge.amount
else:
amount += (rate_charge.read_value_end - rate_charge.read_value_start) * rate_charge.amount
else:
#print usage - rate_charge.read_value_start, rate_charge.amount
if rate_charge.type == 'Constant':
amount += rate_charge.amount
else:
amount += (usage - rate_charge.read_value_start) * rate_charge.amount
break
return amount
def save_overdue_penalty(self):
print "overdue save"
try:
print "got here"
if self.billing_schedule.penalty_set.filter(
account_id=self.account.id,
type='overdue').exists():
print "not empty, exiting", self.billing_schedule.penalty_set.filter(
account_id=self.account.id,
type='overdue').all()
return
except Penalty.DoesNotExist, e:
print "Penalty does not exist for this account"
pass
#check if there is a previous balance
try:
print "WARNING: PENALTY is calculated for now by checking latest balance"
previous_transaction = self.account.financialtransaction_set.latest('id')
previous_balance = previous_transaction.balance
if previous_balance > Decimal('0.0'):
Penalty.objects.create(
billing_schedule = self.billing_schedule,
account_id = self.account.id,
type = 'overdue',
amount = previous_balance * Decimal('0.05'),
penalty_date=now().date())
except FinancialTransaction.DoesNotExist, e:
pass
def save_read_charges(self):
"""
Insert ReadCharge to store breakdown of rates charges for a particular usage
"""
rate_charges = self.account.account_type.rate.ratecharge_set.all()
amount = Decimal('0.0')
#usage = self.meter_read.usage
usage, meterread = self.account.get_meter_reads_usage(period=self.billing_schedule.pk)
#self.meter_read_id = int(meterread.pk)
read_charges = []
for rate_charge in rate_charges:
if rate_charge.read_value_end < usage:
if rate_charge.type == 'Constant':
read_charges.append(
ReadCharge(meter_read_id = self.meter_read_id,
rate_charge_id = rate_charge.id,
quantity = (rate_charge.read_value_end -
rate_charge.read_value_start),
amount = rate_charge.amount))
else:
read_charges.append(
ReadCharge(meter_read_id = self.meter_read_id,
rate_charge_id = rate_charge.id,
quantity = (rate_charge.read_value_end -
rate_charge.read_value_start),
amount = (rate_charge.read_value_end -
rate_charge.read_value_start) *
rate_charge.amount))
else:
#print usage - rate_charge.read_value_start, rate_charge.amount
if rate_charge.type == 'Constant':
read_charges.append(
ReadCharge(meter_read_id = self.meter_read_id,
rate_charge_id = rate_charge.id,
quantity = max(Decimal('0.0'), (usage - rate_charge.read_value_start)),
amount = rate_charge.amount))
else:
read_charges.append(
ReadCharge(meter_read_id = self.meter_read_id,
rate_charge_id = rate_charge.id,
quantity = max(Decimal('0.0'), (usage - rate_charge.read_value_start)),
amount = max(Decimal('0.0'), (usage - rate_charge.read_value_start)) *
rate_charge.amount))
ReadCharge.objects.bulk_create(read_charges)
def create_deleted_copy(self,deletor='admin'):
deleted = BillDeleted()
for field in self._meta.fields:
setattr(deleted,field.name, getattr(self,field.name))
deleted.deleted_by = deletor
deleted.save()
class BillDeleted(CoreModel):
"""
This model stores archived copies of deleted Bills.
"""
financial_transaction = models.OneToOneField('FinancialTransaction', db_column='financial_transaction_id')
billing_schedule = models.ForeignKey('BillingSchedule', db_column='billing_schedule_id')
account = models.ForeignKey('Account', db_column='account_id')
meter_read = models.ForeignKey('MeterRead', db_column='meter_read_id')
bill_date = models.DateField(db_column='bill_date')
due_date = models.DateField(db_column='due_date')
previous_balance = models.DecimalField(decimal_places=2, max_digits=11, db_column='previous_balance')
current_charge = models.DecimalField(decimal_places=2, max_digits=11, db_column='current_charge', editable=False)
penalty_amount = models.DecimalField(decimal_places=2, max_digits=11, db_column='penalty_amount')
amount_due = models.DecimalField(decimal_places=2, max_digits=11, db_column='amount_due')
tracking_number = models.TextField(blank=True)
deleted_by = models.TextField(db_column='deleted_by', blank=False, editable=False)
deleted_on = models.DateTimeField(default=now, editable=False)
class Meta:
verbose_name_plural = "deleted bills"
db_table = 'bill_deleted'
# unique_together = ('billing_schedule', 'account')
def __unicode__(self):
return u'-'.join([unicode(self.account), unicode(self.billing_schedule), unicode(self.amount_due)])
def update(self, *args, **kwargs):
try:
super(BillDeleted, self).save(*args, **kwargs) # Call the "real" save() method.
except Exception, e:
print "deleted bill update failed", e
raise e
class Penalty(CoreModel):
billing_schedule = models.ForeignKey('BillingSchedule', db_column='billing_schedule_id')
account = models.ForeignKey('Account', db_column='account_id')
type = models.TextField(db_column='type', editable=False)
description = models.TextField(blank=True)
amount = models.DecimalField(decimal_places=2, max_digits=11, db_column='amount')
penalty_date = models.DateField(db_column='penalty_date')
status = models.TextField(blank=True)
class Meta:
verbose_name_plural = "penalties"
db_table = 'penalty'
class Customer(CoreModel):
last_name = models.TextField(db_column='last_name')
first_name = models.TextField(db_column='first_name')
middle_name = models.TextField(db_column='middle_name')
email_address = models.TextField(db_column='email_address', blank=True)
phone1 = models.TextField(blank=True)
phone2 = models.TextField(blank=True)
phone3 = models.TextField(blank=True)
class Meta:
db_table = 'customer'
ordering = ['last_name']
def __unicode__(self):
return self.last_name + u', ' + self.first_name + u' ' + self.middle_name
class FinancialTransaction(CoreModel):
"""
Debit (Charge) and Credit (Payment) for Account should always be recorded here
"""
type = models.TextField(db_column='type', editable=False)
account = models.ForeignKey('Account', db_column='account_id', editable=False)
amount = models.DecimalField(max_digits=11, decimal_places=2, editable=False)
balance = models.DecimalField(max_digits=11, decimal_places=2, editable=False)
transaction_date = models.DateField(blank=False)
audit_log = AuditLog()
class Meta:
db_table = 'financial_transaction'
ordering = ['account']
def __unicode__(self):
return '-'.join([unicode(self.account), self.type, unicode(self.amount)])
def is_credit(self):
if self.type in ['posted_payment', 'refund', 'credit','bill_deletion']:
return True
class MeterReadException(Exception):
pass
class MeterRead(CoreModel):
meter = models.ForeignKey('Meter', db_column='meter_id')
account = models.ForeignKey('Account', db_column='account_id')
billing_schedule = models.ForeignKey('BillingSchedule', db_column='billing_schedule_id')
read_date = models.DateField(db_column='read_date')
read_time = models.TimeField(db_column='read_time')
previous_reading = models.DecimalField(decimal_places=3, max_digits=11, db_column='previous_reading', editable=False)
current_reading = models.DecimalField(decimal_places=3, max_digits=11, db_column='current_reading')
usage = models.DecimalField(decimal_places=2, max_digits=11, editable=False)
status = models.TextField(default='new')
remarks = models.TextField(default='')
audit_log = AuditLog()
class Meta:
db_table = 'meter_read'
unique_together = ('account', 'meter', 'billing_schedule', 'read_date')
def __unicode__(self):
return u'-'.join([unicode(self.meter), unicode(self.read_date), unicode(self.usage)])
def save(self, *args, **kwargs):
#TODO:
#Check if there is already a meter_read for a particular meter in the billing schedule where read date falls.
#Add status of for the read
#billing_schedule = BillingSchedule.objects.get(reading_start_date__lte = self.read_date, start_date__gt = self.read_date)
print "billing_schedule: ", self.billing_schedule
billing_schedule = self.billing_schedule
try:
if self.meter.meterread_set.exists():
last_meter_read = self.meter.meterread_set.latest('previous_reading')
bill = self.account.get_bill(period=billing_schedule.pk)
if bill:
#raise MeterReadException('Meter read %d already exists for billing schedule %d' %(last_meter_read.id, billing_schedule.id))
raise MeterReadException('Bill %d already exists for billing schedule %s. Cannot accept more reads.. ' %(bill.id, str(billing_schedule)))
else:
self.previous_reading = self.meter.meterread_set.latest('previous_reading').current_reading
else:
if not self.previous_reading:
self.previous_reading = Decimal('0.0')
self.usage = Decimal(self.current_reading) - Decimal(self.previous_reading)
if self.usage < 0:
print "Negative usage is not permitted ----- " + str(self.usage) + " -- meter id: " + str(self.meter)
self.usage = Decimal('0.0')
#self.previous_reading = self.current_reading
self.current_reading = self.previous_reading
#raise MeterReadException('Negative usage not permitted')
super(MeterRead, self).save(*args, **kwargs) # Call the "real" save() method.
except Exception, e:
print "meter read save failed", e
raise e
def update(self, *args, **kwargs):
try:
super(MeterRead, self).save(*args, **kwargs) # Call the "real" save() method.
except Exception, e:
print "meter read update failed", e
raise e
class Meter(CoreModel):
meter_uid = models.TextField(db_column='meter_uid', unique=True)
type = models.TextField(blank=True)
date_installed = models.DateField(db_column='date_installed')
status = models.TextField(blank=True)
class Meta:
db_table = 'meter'
def __unicode__(self):
return unicode(self.meter_uid) + u'-' + unicode(self.id) + self.type
def activate(self, *args, **kwargs):
"""
Activate the Meter by initializing the first MeterRead
"""
try:
if self.meterread_set.exists():
pass
except MeterRead.DoesNotExist, e:
print "no meterreads yet, activating together with the account"
#'Activate' the meter by inserting a meterread entry with 0 previous and current reading
mr = MeterRead(meter=self, current_reading=Decimal('0.0'),
read_date = now().date(), read_time = now().time(),
)
self.status = 'active'
mr.save()
self.save()
@property
def latest_read(self):
self.meterread_set.latest('previous_reading').current_reading
class PostedPayment(CoreModel):
financial_transaction = models.OneToOneField('FinancialTransaction', db_column='financial_transaction_id')
payment = models.OneToOneField('Payment', db_column='payment_id')
posted_date = models.DateField(db_column='posted_date')
class Meta:
db_table = 'posted_payment'
@transaction.commit_manually
def save(self, *args, **kwargs):
if not self.posted_date:
self.posted_date = now().date()
try:
previous_transaction = self.payment.account.financialtransaction_set.latest('id')
previous_balance = previous_transaction.balance
except FinancialTransaction.DoesNotExist, e:
previous_balance = Decimal('0.0')
try:
self.financial_transaction = FinancialTransaction.objects.create(
account_id=self.payment.account.id,
amount = self.payment.amount,
balance = previous_balance - self.payment.amount,
type = self._meta.db_table,
transaction_date= self.posted_date)
self.payment.status = 'posted'
self.payment.save()
super(PostedPayment, self).save(*args, **kwargs)
except Exception, e:
transaction.rollback()
raise e
else:
transaction.commit()
class Payment(CoreModel):
"""
To handle penalties, must check if there are unpaid balances
"""
account = models.ForeignKey('Account', db_column='account_id')
amount = models.DecimalField(decimal_places=2, max_digits=11, db_column='amount')
remarks = models.TextField(blank=True)
payment_date = models.DateField(db_column='payment_date')
type = models.TextField(blank=True)
status = models.TextField(blank=True, default="new")
check_number = models.TextField(blank=True,default="")
class Meta:
db_table = 'payment'
def save(self, *args, **kwargs):
if not self.payment_date:
self.payment_date = now().date()
super(Payment, self).save(*args, **kwargs)
def __unicode__(self):
return u'-'.join([unicode(self.account), unicode(self.payment_date),
unicode(self.amount)])
class RateCharge(CoreModel):
"""
Charge matrix for a particular Rate type.
Depending on used resource (water, electricity), different charges apply
"""
rate = models.ForeignKey('Rate', db_column='rate_id')
type = models.TextField()
sequence_number = models.IntegerField(db_column='sequence_number')
read_value_start = models.DecimalField(decimal_places=2, max_digits=11, db_column='read_value_start')
read_value_end = models.DecimalField(decimal_places=2, max_digits=11, db_column='read_value_end')
amount = models.DecimalField(decimal_places=2, max_digits=11, db_column='amount')
class Meta:
db_table = 'rate_charge'
ordering = ['type', 'read_value_start']
def __unicode__(self):
return u'-'.join([unicode(self.rate), self.type,
unicode(self.read_value_start), unicode(self.read_value_end),
unicode(self.amount)])
class Rate(CoreModel):
description = models.TextField()
class Meta:
db_table = 'rate'
def __unicode__(self):
return unicode(self.description)
class ReadCharge(CoreModel):
"""
Breakdown of the amount based on the usage in meter_read
"""
rate_charge = models.ForeignKey('RateCharge', db_column='rate_charge_id')
meter_read = models.ForeignKey('MeterRead', db_column='meter_read_id')
quantity = models.DecimalField(decimal_places=2, max_digits=11, db_column='quantity')
amount = models.DecimalField(decimal_places=2, max_digits=11, db_column='amount')
class Meta:
db_table = 'read_charge'
def __unicode__(self):
return u'-'.join([unicode(self.rate_charge), unicode(self.meter_read),
unicode(self.amount)])
class Notice(CoreModel):
billing_schedule = models.ForeignKey('BillingSchedule', db_column='billing_schedule_id')
account = models.ForeignKey('Account', db_column='account_id')
amount = models.DecimalField(decimal_places=2, max_digits=11, db_column='amount')
notice_date = models.DateField(db_column='notice_date')
due_date = models.DateField(db_column='due_date')
reconnection_fee = models.DecimalField(decimal_places=2, max_digits=11, db_column='reconnection_fee')
class Meta:
db_table = 'notice'
class FileRepo(CoreModel):
file_name = models.TextField()
file_path = models.TextField()
file_type = models.TextField()
file_description = models.TextField()
generation_date = models.DateField(db_column='generation_date')
business_date = models.DateField(db_column='business_date')
reading_period =models.TextField()
class Meta:
db_table = 'filerepo'
class Task(CoreModel):
name = models.TextField() # name of task e.g. "Generating Bills"
type = models.TextField() # type of task "Bills, Notices"
task_id = models.BigIntegerField(default=0)
jobs_total = models.IntegerField(db_column='jobs_total', default=0) # expected number of records to be processed by this task
jobs_done = models.IntegerField(db_column='jobs_done', default=0) # actual number of records processed
status = models.TextField() # 'pending', 'in progress', 'completed', 'failed'
result = models.TextField() # records processed, and records in error here
description = models.TextField() # task description (any)
business_date = models.DateField(db_column='business_date') # business_date executed
reading_period =models.TextField() # period executed
deleted = models.BooleanField(default=False) # records here should not be deleted, just marked
class Meta:
db_table = 'task'
def get_month_day_range(date):
"""
For a date 'date' returns the start and end date for the month of 'date'.
Month with 31 days:
>>> date = datetime.date(2011, 7, 27)
>>> get_month_day_range(date)
(datetime.date(2011, 7, 1), datetime.date(2011, 7, 31))
Month with 28 days:
>>> date = datetime.date(2011, 2, 15)
>>> get_month_day_range(date)
(datetime.date(2011, 2, 1), datetime.date(2011, 2, 28))
"""
from dateutil.relativedelta import relativedelta
last_day = date + relativedelta(day=1, months=+1, days=-1)
first_day = date + relativedelta(day=1)
return first_day, last_day
|
tombs/Water-Billing-System
|
waterbilling/core/models.py
|
Python
|
agpl-3.0
| 53,833 | 0.009529 |
from pbxproj import PBXGenericObject
class XCConfigurationList(PBXGenericObject):
def _get_comment(self):
info = self._get_section()
return f'Build configuration list for {info[0]} "{info[1]}"'
def _get_section(self):
objects = self.get_parent()
target_id = self.get_id()
for obj in objects.get_objects_in_section('PBXNativeTarget', 'PBXAggregateTarget'):
if target_id in obj.buildConfigurationList:
return obj.isa, obj.name
projects = filter(lambda o: target_id in o.buildConfigurationList, objects.get_objects_in_section('PBXProject'))
project = projects.__next__()
target = objects[project.targets[0]]
name = target.name if hasattr(target, 'name') else target.productName
return project.isa, name
|
kronenthaler/mod-pbxproj
|
pbxproj/pbxsections/XCConfigurationList.py
|
Python
|
mit
| 821 | 0.002436 |
"""The ClimaCell integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from math import ceil
from typing import Any
from pyclimacell import ClimaCellV3, ClimaCellV4
from pyclimacell.const import CURRENT, DAILY, FORECASTS, HOURLY, NOWCAST
from pyclimacell.exceptions import (
CantConnectException,
InvalidAPIKeyException,
RateLimitedException,
UnknownException,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.weather import DOMAIN as WEATHER_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_API_VERSION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTRIBUTION,
CC_ATTR_CLOUD_COVER,
CC_ATTR_CONDITION,
CC_ATTR_HUMIDITY,
CC_ATTR_OZONE,
CC_ATTR_PRECIPITATION,
CC_ATTR_PRECIPITATION_PROBABILITY,
CC_ATTR_PRECIPITATION_TYPE,
CC_ATTR_PRESSURE,
CC_ATTR_TEMPERATURE,
CC_ATTR_TEMPERATURE_HIGH,
CC_ATTR_TEMPERATURE_LOW,
CC_ATTR_VISIBILITY,
CC_ATTR_WIND_DIRECTION,
CC_ATTR_WIND_GUST,
CC_ATTR_WIND_SPEED,
CC_SENSOR_TYPES,
CC_V3_ATTR_CLOUD_COVER,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_HUMIDITY,
CC_V3_ATTR_OZONE,
CC_V3_ATTR_PRECIPITATION,
CC_V3_ATTR_PRECIPITATION_DAILY,
CC_V3_ATTR_PRECIPITATION_PROBABILITY,
CC_V3_ATTR_PRECIPITATION_TYPE,
CC_V3_ATTR_PRESSURE,
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_VISIBILITY,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_WIND_GUST,
CC_V3_ATTR_WIND_SPEED,
CC_V3_SENSOR_TYPES,
CONF_TIMESTEP,
DEFAULT_TIMESTEP,
DOMAIN,
MAX_REQUESTS_PER_DAY,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [SENSOR_DOMAIN, WEATHER_DOMAIN]
def _set_update_interval(hass: HomeAssistant, current_entry: ConfigEntry) -> timedelta:
"""Recalculate update_interval based on existing ClimaCell instances and update them."""
api_calls = 4 if current_entry.data[CONF_API_VERSION] == 3 else 2
# We check how many ClimaCell configured instances are using the same API key and
# calculate interval to not exceed allowed numbers of requests. Divide 90% of
# MAX_REQUESTS_PER_DAY by 4 because every update requires four API calls and we want
# a buffer in the number of API calls left at the end of the day.
other_instance_entry_ids = [
entry.entry_id
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.entry_id != current_entry.entry_id
and entry.data[CONF_API_KEY] == current_entry.data[CONF_API_KEY]
]
interval = timedelta(
minutes=(
ceil(
(24 * 60 * (len(other_instance_entry_ids) + 1) * api_calls)
/ (MAX_REQUESTS_PER_DAY * 0.9)
)
)
)
for entry_id in other_instance_entry_ids:
if entry_id in hass.data[DOMAIN]:
hass.data[DOMAIN][entry_id].update_interval = interval
return interval
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up ClimaCell API from a config entry."""
hass.data.setdefault(DOMAIN, {})
params = {}
# If config entry options not set up, set them up
if not entry.options:
params["options"] = {
CONF_TIMESTEP: DEFAULT_TIMESTEP,
}
else:
# Use valid timestep if it's invalid
timestep = entry.options[CONF_TIMESTEP]
if timestep not in (1, 5, 15, 30):
if timestep <= 2:
timestep = 1
elif timestep <= 7:
timestep = 5
elif timestep <= 20:
timestep = 15
else:
timestep = 30
new_options = entry.options.copy()
new_options[CONF_TIMESTEP] = timestep
params["options"] = new_options
# Add API version if not found
if CONF_API_VERSION not in entry.data:
new_data = entry.data.copy()
new_data[CONF_API_VERSION] = 3
params["data"] = new_data
if params:
hass.config_entries.async_update_entry(entry, **params)
api_class = ClimaCellV3 if entry.data[CONF_API_VERSION] == 3 else ClimaCellV4
api = api_class(
entry.data[CONF_API_KEY],
entry.data.get(CONF_LATITUDE, hass.config.latitude),
entry.data.get(CONF_LONGITUDE, hass.config.longitude),
session=async_get_clientsession(hass),
)
coordinator = ClimaCellDataUpdateCoordinator(
hass,
entry,
api,
_set_update_interval(hass, entry),
)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
hass.data[DOMAIN].pop(config_entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
class ClimaCellDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold ClimaCell data."""
def __init__(
self,
hass: HomeAssistant,
config_entry: ConfigEntry,
api: ClimaCellV3 | ClimaCellV4,
update_interval: timedelta,
) -> None:
"""Initialize."""
self._config_entry = config_entry
self._api_version = config_entry.data[CONF_API_VERSION]
self._api = api
self.name = config_entry.data[CONF_NAME]
self.data = {CURRENT: {}, FORECASTS: {}}
super().__init__(
hass,
_LOGGER,
name=config_entry.data[CONF_NAME],
update_interval=update_interval,
)
async def _async_update_data(self) -> dict[str, Any]:
"""Update data via library."""
data = {FORECASTS: {}}
try:
if self._api_version == 3:
data[CURRENT] = await self._api.realtime(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_HUMIDITY,
CC_V3_ATTR_PRESSURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_VISIBILITY,
CC_V3_ATTR_OZONE,
CC_V3_ATTR_WIND_GUST,
CC_V3_ATTR_CLOUD_COVER,
CC_V3_ATTR_PRECIPITATION_TYPE,
*(sensor_type.key for sensor_type in CC_V3_SENSOR_TYPES),
]
)
data[FORECASTS][HOURLY] = await self._api.forecast_hourly(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_PRECIPITATION,
CC_V3_ATTR_PRECIPITATION_PROBABILITY,
],
None,
timedelta(hours=24),
)
data[FORECASTS][DAILY] = await self._api.forecast_daily(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_PRECIPITATION_DAILY,
CC_V3_ATTR_PRECIPITATION_PROBABILITY,
],
None,
timedelta(days=14),
)
data[FORECASTS][NOWCAST] = await self._api.forecast_nowcast(
[
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_WIND_SPEED,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_PRECIPITATION,
],
None,
timedelta(
minutes=min(300, self._config_entry.options[CONF_TIMESTEP] * 30)
),
self._config_entry.options[CONF_TIMESTEP],
)
else:
return await self._api.realtime_and_all_forecasts(
[
CC_ATTR_TEMPERATURE,
CC_ATTR_HUMIDITY,
CC_ATTR_PRESSURE,
CC_ATTR_WIND_SPEED,
CC_ATTR_WIND_DIRECTION,
CC_ATTR_CONDITION,
CC_ATTR_VISIBILITY,
CC_ATTR_OZONE,
CC_ATTR_WIND_GUST,
CC_ATTR_CLOUD_COVER,
CC_ATTR_PRECIPITATION_TYPE,
*(sensor_type.key for sensor_type in CC_SENSOR_TYPES),
],
[
CC_ATTR_TEMPERATURE_LOW,
CC_ATTR_TEMPERATURE_HIGH,
CC_ATTR_WIND_SPEED,
CC_ATTR_WIND_DIRECTION,
CC_ATTR_CONDITION,
CC_ATTR_PRECIPITATION,
CC_ATTR_PRECIPITATION_PROBABILITY,
],
)
except (
CantConnectException,
InvalidAPIKeyException,
RateLimitedException,
UnknownException,
) as error:
raise UpdateFailed from error
return data
class ClimaCellEntity(CoordinatorEntity):
"""Base ClimaCell Entity."""
def __init__(
self,
config_entry: ConfigEntry,
coordinator: ClimaCellDataUpdateCoordinator,
api_version: int,
) -> None:
"""Initialize ClimaCell Entity."""
super().__init__(coordinator)
self.api_version = api_version
self._config_entry = config_entry
@staticmethod
def _get_cc_value(
weather_dict: dict[str, Any], key: str
) -> int | float | str | None:
"""
Return property from weather_dict.
Used for V3 API.
"""
items = weather_dict.get(key, {})
# Handle cases where value returned is a list.
# Optimistically find the best value to return.
if isinstance(items, list):
if len(items) == 1:
return items[0].get("value")
return next(
(item.get("value") for item in items if "max" in item),
next(
(item.get("value") for item in items if "min" in item),
items[0].get("value", None),
),
)
return items.get("value")
def _get_current_property(self, property_name: str) -> int | str | float | None:
"""
Get property from current conditions.
Used for V4 API.
"""
return self.coordinator.data.get(CURRENT, {}).get(property_name)
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def device_info(self) -> DeviceInfo:
"""Return device registry information."""
return {
"identifiers": {(DOMAIN, self._config_entry.data[CONF_API_KEY])},
"name": "ClimaCell",
"manufacturer": "ClimaCell",
"sw_version": f"v{self.api_version}",
"entry_type": "service",
}
|
sander76/home-assistant
|
homeassistant/components/climacell/__init__.py
|
Python
|
apache-2.0
| 11,985 | 0.000834 |
"""
Course Advanced Settings page
"""
from bok_choy.promise import EmptyPromise
from .course_page import CoursePage
from .utils import press_the_notification_button, type_in_codemirror, get_codemirror_value
KEY_CSS = '.key h3.title'
UNDO_BUTTON_SELECTOR = ".action-item .action-undo"
MANUAL_BUTTON_SELECTOR = ".action-item .action-cancel"
MODAL_SELECTOR = ".validation-error-modal-content"
ERROR_ITEM_NAME_SELECTOR = ".error-item-title strong"
ERROR_ITEM_CONTENT_SELECTOR = ".error-item-message"
SETTINGS_NAME_SELECTOR = ".is-not-editable"
class AdvancedSettingsPage(CoursePage):
"""
Course Advanced Settings page.
"""
url_path = "settings/advanced"
def is_browser_on_page(self):
def _is_finished_loading():
return len(self.q(css='.course-advanced-policy-list-item')) > 0
EmptyPromise(_is_finished_loading, 'Finished rendering the advanced policy items.').fulfill()
return self.q(css='body.advanced').present
def wait_for_modal_load(self):
"""
Wait for validation response from the server, and make sure that
the validation error modal pops up.
This method should only be called when it is guaranteed that there're
validation errors in the settings changes.
"""
self.wait_for_ajax()
self.wait_for_element_presence(MODAL_SELECTOR, 'Validation Modal is present')
def refresh_and_wait_for_load(self):
"""
Refresh the page and wait for all resources to load.
"""
self.browser.refresh()
self.wait_for_page()
def coordinates_for_scrolling(self, coordinates_for):
"""
Get the x and y coordinates of elements
"""
cordinates_dict = self.browser.find_element_by_css_selector(coordinates_for)
location = cordinates_dict.location
for key, val in location.iteritems():
if key == 'x':
x_axis = val
elif key == 'y':
y_axis = val
return x_axis, y_axis
def undo_changes_via_modal(self):
"""
Trigger clicking event of the undo changes button in the modal.
Wait for the undoing process to load via ajax call.
Before that Scroll so the button is clickable on all browsers
"""
self.browser.execute_script("window.scrollTo" + str(self.coordinates_for_scrolling(UNDO_BUTTON_SELECTOR)))
self.q(css=UNDO_BUTTON_SELECTOR).click()
self.wait_for_ajax()
def trigger_manual_changes(self):
"""
Trigger click event of the manual changes button in the modal.
No need to wait for any ajax.
Before that Scroll so the button is clickable on all browsers
"""
self.browser.execute_script("window.scrollTo" + str(self.coordinates_for_scrolling(MANUAL_BUTTON_SELECTOR)))
self.q(css=MANUAL_BUTTON_SELECTOR).click()
def is_validation_modal_present(self):
"""
Checks if the validation modal is present.
"""
return self.q(css=MODAL_SELECTOR).present
def get_error_item_names(self):
"""
Returns a list of display names of all invalid settings.
"""
return self.q(css=ERROR_ITEM_NAME_SELECTOR).text
def get_error_item_messages(self):
"""
Returns a list of error messages of all invalid settings.
"""
return self.q(css=ERROR_ITEM_CONTENT_SELECTOR).text
def _get_index_of(self, expected_key):
for i, element in enumerate(self.q(css=KEY_CSS)):
# Sometimes get stale reference if I hold on to the array of elements
key = self.q(css=KEY_CSS).nth(i).text[0]
if key == expected_key:
return i
return -1
def save(self):
press_the_notification_button(self, "Save")
def cancel(self):
press_the_notification_button(self, "Cancel")
def set(self, key, new_value):
index = self._get_index_of(key)
type_in_codemirror(self, index, new_value)
self.save()
def get(self, key):
index = self._get_index_of(key)
return get_codemirror_value(self, index)
def set_values(self, key_value_map):
"""
Make multiple settings changes and save them.
"""
for key, value in key_value_map.iteritems():
index = self._get_index_of(key)
type_in_codemirror(self, index, value)
self.save()
def get_values(self, key_list):
"""
Get a key-value dictionary of all keys in the given list.
"""
result_map = {}
for key in key_list:
index = self._get_index_of(key)
val = get_codemirror_value(self, index)
result_map[key] = val
return result_map
@property
def displayed_settings_names(self):
"""
Returns all settings displayed on the advanced settings page/screen/modal/whatever
We call it 'name', but it's really whatever is embedded in the 'id' element for each field
"""
query = self.q(css=SETTINGS_NAME_SELECTOR)
return query.attrs('id')
@property
def expected_settings_names(self):
"""
Returns a list of settings expected to be displayed on the Advanced Settings screen
Should match the list of settings found in cms/djangoapps/models/settings/course_metadata.py
If a new setting is added to the metadata list, this test will fail and you must update it.
Basically this guards against accidental exposure of a field on the Advanced Settings screen
"""
return [
'advanced_modules',
'allow_anonymous',
'allow_anonymous_to_peers',
'allow_public_wiki_access',
'cert_html_view_overrides',
'cert_name_long',
'cert_name_short',
'certificates_display_behavior',
'course_image',
'cosmetic_display_price',
'advertised_start',
'announcement',
'display_name',
'info_sidebar_name',
'is_new',
'ispublic',
'issue_badges',
'max_student_enrollments_allowed',
'no_grade',
'display_coursenumber',
'display_organization',
'catalog_visibility',
'chrome',
'days_early_for_beta',
'default_tab',
'disable_progress_graph',
'discussion_blackouts',
'discussion_sort_alpha',
'discussion_topics',
'due',
'due_date_display_format',
'edxnotes',
'use_latex_compiler',
'video_speed_optimizations',
'enrollment_domain',
'html_textbooks',
'invitation_only',
'lti_passports',
'matlab_api_key',
'max_attempts',
'mobile_available',
'rerandomize',
'remote_gradebook',
'annotation_token_secret',
'showanswer',
'show_calculator',
'show_chat',
'show_reset_button',
'static_asset_path',
'text_customization',
'annotation_storage_url',
'social_sharing_url',
'video_bumper',
'cert_html_view_enabled',
'enable_proctored_exams',
'enable_timed_exams',
]
|
xingyepei/edx-platform
|
common/test/acceptance/pages/studio/settings_advanced.py
|
Python
|
agpl-3.0
| 7,464 | 0.001742 |
from RaspiBot import Methods, sleep
# represents btn color and action on press in a state
class Btn:
def __init__(self, red, green, nextid = None):
self.red = red
self.green = green
self.next = nextid
# represents one menu state with message, button and own action
class State:
def __init__(self, title, param, btns, func):
self.title = title
self.param = param
self.func = func
self.btns = btns
def run(self):
# write messages
Methods.clearLCD()
Methods.writeLCD(self.title, 0, 0)
Methods.writeLCD(self.param, 0, 1)
# set button colors
for i in range(len(self.btns)):
Methods.setRedLED(i + 1, self.btns[i].red)
Methods.setGreenLED(i + 1, self.btns[i].green)
# run action
return self.func(self)
# represents whole menu
class StateMachine:
state = {}
states = 1
# returns ids to create new states
def getStates(self, num): return range(self.states, self.states + num)
# define state of specific id
def setState(self, id, *StateArgs):
self.state[id] = State(*StateArgs)
self.states += 1
return self.states
# run machine
def run(self, id):
while id != None: id = self.state[id].run()
# navigate through menu
def select(state):
while True:
if Methods.isBtnPressed(1):
Methods.waitForBtnRelease(1)
return state.btns[0].next
if Methods.isBtnPressed(2):
Methods.waitForBtnRelease(2)
return state.btns[1].next
if Methods.isBtnPressed(3):
Methods.waitForBtnRelease(3)
return state.btns[2].next
sleep(0.1)
# measure sharp values
def cal_sharps(state):
i = 0
sharp = 1
while True:
# measure left sharp
if Methods.isBtnPressed(1):
sharp = 1
Methods.writeLCD("left ", 5, 0)
Methods.waitForBtnRelease(1)
# measure right sharp
if Methods.isBtnPressed(2):
sharp = 2
Methods.writeLCD("right", 5, 0)
Methods.waitForBtnRelease(2)
# exit
if Methods.isBtnPressed(3):
Methods.waitForBtnRelease(3)
return state.btns[2].next
if i % 8 == 0:
Methods.writeLCD("%i" % Methods.getSharp(sharp, "raw"), 12, 0)
sleep(0.1)
i += 1
# measure radencoders after driven for specific time
def cal_radenc(state):
time = 1
while True:
# increase time
if Methods.isBtnPressed(1):
time = (time + 1) % 10
Methods.writeLCD("%i" % time, 5, 1)
Methods.waitForBtnRelease(1)
# start driving
if Methods.isBtnPressed(2):
# reset display
Methods.writeLCD("l: ---- r: ---- ", 0, 0)
Methods.waitForBtnRelease(2)
# drive
Methods.resetEncoders()
Methods.setMotors(50, 50)
sleep(time)
# get encoder values
Methods.stopMotors()
Methods.writeLCD("l:%5i r:%5i" % tuple(Methods.getEncoders("raw")), 0, 0)
# exit
if Methods.isBtnPressed(3):
Methods.waitForBtnRelease(3)
return state.btns[2].next
sleep(0.1)
# create new state machine
Calibrate = StateMachine()
# S:status C:calibrate
(S_EXIT, S_START, S_C_IRS, S_C_RNC
) = Calibrate.getStates(4)
# start menu
Calibrate.setState(S_START, "calibration tool", "sharp radenc x",
[Btn(0, 100, S_C_IRS), Btn(0, 100, S_C_RNC), Btn(100, 0, S_EXIT)], select
)
# calibrate sharps
Calibrate.setState(S_C_IRS, " sharp", "left right x",
[Btn(0, 100), Btn(0, 100), Btn(100, 0, S_START)], cal_sharps
)
# calibrate encoders
Calibrate.setState(S_C_RNC, "l: r: ", "time 1s start x",
[Btn(100, 100), Btn(0, 100), Btn(100, 0, S_START)], cal_radenc
)
# exit menu
Calibrate.setState(S_EXIT, "", "",
[Btn(0, 0), Btn(0, 0), Btn(0, 0)], lambda _: Methods.cleanup()
)
# run machine at start
Calibrate.run(S_START)
|
alex-Symbroson/BotScript
|
BotScript/res/calibrate.py
|
Python
|
mit
| 4,128 | 0.004845 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-09-21 15:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='article',
name='desc',
field=models.TextField(blank=True),
),
]
|
wenxiaomao1023/wenxiaomao
|
article/migrations/0002_auto_20160921_1518.py
|
Python
|
mit
| 587 | 0 |
"""
@author: Nikhith !!
"""
from pycricbuzz import Cricbuzz
import json
import sys
""" Writing a CLI for Live score """
try:
cric_obj = Cricbuzz() # cric_obj contains object instance of Cricbuzz Class
matches = cric_obj.matches()
except:
print "Connection dobhindhi bey!"
sys.exit(0)
# matches func is returning List of dictionaries
""" Key items in match dict : 1) status -- ex) Starts on Jun 15 at 09:30 GMT
2) mnum -- ex) 2nd Semi-Final (A2 VS B1)
3) mchdesc-- ex) BAN vs IND
4) srs -- ex) ICC Champions Trophy, 2017
5) mchstate- ex) preview / abandon / Result / complete
6) type -- ex) ODI
7) id -- ex) 4 / 6 (anything random given)
"""
"""CLI must contain commands for
-- current matches
-- selecting match by match id
-- getCommentary
"""
def upcomingmatches():
"""Prints upcoming matches list
"""
count = 1
for match in matches:
if match['mchstate'] == "preview":
print str(count)+". "+str(match['mchdesc'])+ " - "+ str(match['srs'])+"- - "+str(match['status'])
count = count + 1
def currentlive():
"""Prints Current LIVE MATCHES"""
count = 1
for match in matches:
#print str(match['mchdesc']) + " match id: " + str(match['mchstate'])
if (match['mchstate'] == "innings break" ) :
print str(match['mchdesc'])+" match id: "+str(match['id'])
count = count + 1
if (match['mchstate'] == "inprogress" ) :
print str(match['mchdesc'])+" match id: "+str(match['id'])
count = count + 1
if match['mchstate'] == "delay":
print str(match['mchdesc'])+" -> match has been delayed due to rain..! Enjoy the drizzle..!!"
if count == 1:
print "\nNO LIVE MATCHES RIGHT NOW!\n"
print "UPCOMING MATCHES TODAY!"
upcomingmatches()
else:
id = input("Enter corresponding match id : ")
gotolive(id)
return id
def calculate_runrate(runs, overs):
balls = str(overs)
arr = balls.split('.')
if len(arr) == 2:
rr = float(int(arr[0])*6)+int(arr[1])
else:
rr = float(int(arr[0])*6)
return (float(runs)/rr)*6
def gotolive(matchid):
batobj = cric_obj.livescore(matchid)['batting']
bowlobj = cric_obj.livescore(matchid)['bowling']
print "\n "+str(batobj['team'])+" vs "+str(bowlobj['team'])+"\n"
print " "+str(cric_obj.livescore(matchid)['matchinfo']['status'])+"\n"
if (bowlobj['score'] == []):
print "1st INNINGS: "+str(batobj['team'])+" => "+str(batobj['score'][0]['runs'])+"/"+str(batobj['score'][0]['wickets'])+" ("+str(batobj['score'][0]['overs'])+" Overs)"
print "Batting:"
try:
print " " + str(batobj['batsman'][0]['name']) + " : " + str(batobj['batsman'][0]['runs']) + " (" + str(batobj['batsman'][0]['balls']) + ")"
print " " + str(batobj['batsman'][1]['name']) + " : " + str(batobj['batsman'][1]['runs']) + " (" + str(batobj['batsman'][1]['balls']) + ")"
except:
print "Wicket!!!!"
print "Bowling:"
print " " + str(bowlobj['bowler'][0]['name']) + " : " + str(bowlobj['bowler'][0]['runs']) + " /" + str(bowlobj['bowler'][0]['wickets']) + " (" + str(bowlobj['bowler'][0]['overs']) + ")"
print " " + str(bowlobj['bowler'][1]['name']) + " : " + str(bowlobj['bowler'][1]['runs']) + " /" + str(bowlobj['bowler'][1]['wickets']) + " (" + str(bowlobj['bowler'][1]['overs']) + ")"
print "Runrate:"
print ' {:1.2f}'.format(calculate_runrate(str(batobj['score'][0]['runs']),str(batobj['score'][0]['overs'])))
else:
print "1st INNINGS: "+str(bowlobj['team'])+" => "+str(bowlobj['score'][0]['runs'])+"/"+str(bowlobj['score'][0]['wickets'])+" ("+str(bowlobj['score'][0]['overs'])+" Overs)"
print "2nd INNINGS: "+str(batobj['team'])+" => "+str(batobj['score'][0]['runs'])+"/"+str(batobj['score'][0]['wickets'])+" ("+str(batobj['score'][0]['overs'])+" Overs)"
print "Batting:"
try:
print " "+str(batobj['batsman'][0]['name'])+" : "+str(batobj['batsman'][0]['runs'])+" ("+str(batobj['batsman'][0]['balls'])+")"
print " " + str(batobj['batsman'][1]['name']) + " : " + str(batobj['batsman'][1]['runs']) + " (" + str(batobj['batsman'][1]['balls']) + ")"
except:
print "Wicket!!"
print "Bowling:"
print " " + str(bowlobj['bowler'][0]['name']) + " : " + str(bowlobj['bowler'][0]['runs'])+" /"+str(bowlobj['bowler'][0]['wickets']) + " (" + str(bowlobj['bowler'][0]['overs']) + ")"
print " " + str(bowlobj['bowler'][1]['name']) + " : " + str(bowlobj['bowler'][1]['runs']) + " /" + str(bowlobj['bowler'][1]['wickets']) + " (" + str(bowlobj['bowler'][1]['overs']) + ")"
print "Summary:"
print " " + str(cric_obj.livescore(matchid)['matchinfo']['status'])
def last12Balls():
pass
def commentary(matchid):
print "\nCommentary: "
try:
for i in range(6):
print " "+str(cric_obj.commentary(matchid)['commentary'][i])
print "************************************************************************************************"
except:
print "No running commentary.. now..!!"
if __name__ == '__main__':
matchid=currentlive()
commentary(matchid)
|
nikkitricky/nikbuzz
|
score.py
|
Python
|
mit
| 5,706 | 0.006484 |
# Create your views here.
from rest_framework import viewsets
from offers.models import Offer, OfferHistory, OfferReview
from offers.serializers import (
OfferSerializer,
OfferHistorySerializer,
OfferReviewSerializer
)
from shops.serializers import ShopSerializer
from rest_framework import status
from rest_framework import viewsets
from rest_framework.response import Response
from math import radians, cos, sin, asin, sqrt
from accounts.models import User
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
class OfferViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Offer.objects.all()
serializer_class = OfferSerializer
def get_queryset(self):
queryset = Offer.objects.all()
email = self.request.query_params.get('email', None)
# user = User.objects.get(email=email)
if email:
queryset = queryset.filter(shop__user__email=email)
return queryset
class OfferHistoryViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = OfferHistory.objects.all()
serializer_class = OfferHistorySerializer
class OfferReviewViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = OfferReview.objects.all()
serializer_class = OfferReviewSerializer
def get_queryset(self):
queryset = OfferReview.objects.all()
offer_id = self.request.query_params.get('offer', None)
if offer_id:
queryset = queryset.filter(offer__id=offer_id)
return queryset
class NearestOffer(viewsets.ViewSet):
queryset = Offer.objects.all()
# serializer_class = OfferSerializer
def list(self, request):
params = request.query_params
offers = []
if params:
ulat = float(params['lat'])
ulon = float(params['lon'])
for offer in Offer.objects.select_related('shop').all():
olat = float(offer.shop.latitude)
olon = float(offer.shop.longitude)
distance = haversine(ulat, ulon, olat, olon)
offer_data = OfferSerializer(offer).data
offer_data['distace'] = float(distance)
offer_data['shop'] = ShopSerializer(offer.shop).data
offers.append(offer_data)
return Response(
offers,
status=status.HTTP_200_OK)
class OfferData(viewsets.ViewSet):
queryset = Offer.objects.all()
# serializer_class = OfferSerializer
def retrieve(self, request, pk=None):
if pk:
offer = Offer.objects.get(pk=pk)
total = offer.offer_reviews.all().count()
if not total == 0:
positive = offer.offer_reviews.filter(sentiment="positive").count()
negative = offer.offer_reviews.filter(sentiment="negative").count()
neutral = offer.offer_reviews.filter(sentiment="neutral").count()
response = {
"positive": (float(positive) / total) * 100,
"negative": (float(negative) / total) * 100,
"neutral": (float(neutral) / total) * 100,
}
else:
response = {
"positive": float(0),
"negative": float(0),
"neutral": float(0),
}
return Response(
response,
status=status.HTTP_200_OK)
|
cliffton/localsecrets
|
offers/views.py
|
Python
|
mit
| 3,993 | 0.001002 |
def represents_int(value):
try:
int(value)
return True
except ValueError:
return False
def bytes_to_gib(byte_value, round_digits=2):
return round(byte_value / 1024 / 1024 / float(1024), round_digits)
def count_to_millions(count_value, round_digits=3):
return round(count_value / float(1000000), round_digits)
|
skomendera/PyMyTools
|
providers/value.py
|
Python
|
mit
| 359 | 0 |
from .image import Image
from .product_category import ProductCategory
from .supplier import Supplier, PaymentMethod
from .product import Product
from .product import ProductImage
from .enum_values import EnumValues
from .related_values import RelatedValues
from .customer import Customer
from .expense import Expense
from .incoming import Incoming
from .shipping import Shipping, ShippingLine
from .receiving import Receiving, ReceivingLine
from .inventory_transaction import InventoryTransaction, InventoryTransactionLine
from .purchase_order import PurchaseOrder, PurchaseOrderLine
from .sales_order import SalesOrder, SalesOrderLine
from .user import User
from .role import Role, roles_users
from .organization import Organization
from .inventory_in_out_link import InventoryInOutLink
from .aspects import update_menemonic
from .product_inventory import ProductInventory
|
betterlife/psi
|
psi/app/models/__init__.py
|
Python
|
mit
| 875 | 0.001143 |
# Copyright 2020 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Bowl arena with bumps."""
from dm_control import composer
from dm_control.locomotion.arenas import assets as locomotion_arenas_assets
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
from scipy import ndimage
mjlib = mjbindings.mjlib
_TOP_CAMERA_DISTANCE = 100
_TOP_CAMERA_Y_PADDING_FACTOR = 1.1
# Constants related to terrain generation.
_TERRAIN_SMOOTHNESS = .5 # 0.0: maximally bumpy; 1.0: completely smooth.
_TERRAIN_BUMP_SCALE = .2 # Spatial scale of terrain bumps (in meters).
class Bowl(composer.Arena):
"""A bowl arena with sinusoidal bumps."""
def _build(self, size=(10, 10), aesthetic='default', name='bowl'):
super()._build(name=name)
self._hfield = self._mjcf_root.asset.add(
'hfield',
name='terrain',
nrow=201,
ncol=201,
size=(6, 6, 0.5, 0.1))
if aesthetic != 'default':
ground_info = locomotion_arenas_assets.get_ground_texture_info(aesthetic)
sky_info = locomotion_arenas_assets.get_sky_texture_info(aesthetic)
texturedir = locomotion_arenas_assets.get_texturedir(aesthetic)
self._mjcf_root.compiler.texturedir = texturedir
self._texture = self._mjcf_root.asset.add(
'texture', name='aesthetic_texture', file=ground_info.file,
type=ground_info.type)
self._material = self._mjcf_root.asset.add(
'material', name='aesthetic_material', texture=self._texture,
texuniform='true')
self._skybox = self._mjcf_root.asset.add(
'texture', name='aesthetic_skybox', file=sky_info.file,
type='skybox', gridsize=sky_info.gridsize,
gridlayout=sky_info.gridlayout)
self._terrain_geom = self._mjcf_root.worldbody.add(
'geom',
name='terrain',
type='hfield',
pos=(0, 0, -0.01),
hfield='terrain',
material=self._material)
self._ground_geom = self._mjcf_root.worldbody.add(
'geom',
type='plane',
name='groundplane',
size=list(size) + [0.5],
material=self._material)
else:
self._terrain_geom = self._mjcf_root.worldbody.add(
'geom',
name='terrain',
type='hfield',
rgba=(0.2, 0.3, 0.4, 1),
pos=(0, 0, -0.01),
hfield='terrain')
self._ground_geom = self._mjcf_root.worldbody.add(
'geom',
type='plane',
name='groundplane',
rgba=(0.2, 0.3, 0.4, 1),
size=list(size) + [0.5])
self._mjcf_root.visual.headlight.set_attributes(
ambient=[.4, .4, .4], diffuse=[.8, .8, .8], specular=[.1, .1, .1])
self._regenerate = True
def regenerate(self, random_state):
# regeneration of the bowl requires physics, so postponed to initialization.
self._regenerate = True
def initialize_episode(self, physics, random_state):
if self._regenerate:
self._regenerate = False
# Get heightfield resolution, assert that it is square.
res = physics.bind(self._hfield).nrow
assert res == physics.bind(self._hfield).ncol
# Sinusoidal bowl shape.
row_grid, col_grid = np.ogrid[-1:1:res*1j, -1:1:res*1j]
radius = np.clip(np.sqrt(col_grid**2 + row_grid**2), .1, 1)
bowl_shape = .5 - np.cos(2*np.pi*radius)/2
# Random smooth bumps.
terrain_size = 2 * physics.bind(self._hfield).size[0]
bump_res = int(terrain_size / _TERRAIN_BUMP_SCALE)
bumps = random_state.uniform(_TERRAIN_SMOOTHNESS, 1, (bump_res, bump_res))
smooth_bumps = ndimage.zoom(bumps, res / float(bump_res))
# Terrain is elementwise product.
terrain = bowl_shape * smooth_bumps
start_idx = physics.bind(self._hfield).adr
physics.model.hfield_data[start_idx:start_idx+res**2] = terrain.ravel()
# If we have a rendering context, we need to re-upload the modified
# heightfield data.
if physics.contexts:
with physics.contexts.gl.make_current() as ctx:
ctx.call(mjlib.mjr_uploadHField,
physics.model.ptr,
physics.contexts.mujoco.ptr,
physics.bind(self._hfield).element_id)
@property
def ground_geoms(self):
return (self._terrain_geom, self._ground_geom)
|
deepmind/dm_control
|
dm_control/locomotion/arenas/bowl.py
|
Python
|
apache-2.0
| 4,927 | 0.008119 |
"""The tests for the Template Binary sensor platform."""
from datetime import timedelta
import logging
from unittest.mock import patch
import pytest
from homeassistant import setup
from homeassistant.components import binary_sensor
from homeassistant.const import (
ATTR_DEVICE_CLASS,
EVENT_HOMEASSISTANT_START,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import Context, CoreState
from homeassistant.helpers import entity_registry
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
ON = "on"
OFF = "off"
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ True }}",
"device_class": "motion",
}
},
},
},
],
)
async def test_setup_legacy(hass, start_ha):
"""Test the setup."""
state = hass.states.get("binary_sensor.test")
assert state is not None
assert state.name == "virtual thingy"
assert state.state == ON
assert state.attributes["device_class"] == "motion"
@pytest.mark.parametrize("count,domain", [(0, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{"binary_sensor": {"platform": "template"}},
{"binary_sensor": {"platform": "template", "sensors": {"foo bar": {}}}},
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"value_template": "{{ foo }}",
"device_class": "foobarnotreal",
}
},
}
},
{
"binary_sensor": {
"platform": "template",
"sensors": {"test": {"device_class": "motion"}},
}
},
],
)
async def test_setup_invalid_sensors(hass, count, start_ha):
"""Test setup with no sensors."""
assert len(hass.states.async_entity_ids()) == count
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"icon_template": "{% if "
"states.binary_sensor.test_state.state == "
"'Works' %}"
"mdi:check"
"{% endif %}",
},
},
},
},
],
)
async def test_icon_template(hass, start_ha):
"""Test icon template."""
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("icon") == ""
hass.states.async_set("binary_sensor.test_state", "Works")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["icon"] == "mdi:check"
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"entity_picture_template": "{% if "
"states.binary_sensor.test_state.state == "
"'Works' %}"
"/local/sensor.png"
"{% endif %}",
},
},
},
},
],
)
async def test_entity_picture_template(hass, start_ha):
"""Test entity_picture template."""
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("entity_picture") == ""
hass.states.async_set("binary_sensor.test_state", "Works")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["entity_picture"] == "/local/sensor.png"
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_sensor": {
"value_template": "{{ states.sensor.xyz.state }}",
"attribute_templates": {
"test_attribute": "It {{ states.sensor.test_state.state }}."
},
},
},
},
},
],
)
async def test_attribute_templates(hass, start_ha):
"""Test attribute_templates template."""
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes.get("test_attribute") == "It ."
hass.states.async_set("sensor.test_state", "Works2")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_state", "Works")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_template_sensor")
assert state.attributes["test_attribute"] == "It Works."
@pytest.fixture
async def setup_mock():
"""Do setup of sensor mock."""
with patch(
"homeassistant.components.template.binary_sensor."
"BinarySensorTemplate._update_state"
) as _update_state:
yield _update_state
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"match_all_template_sensor": {
"value_template": (
"{% for state in states %}"
"{% if state.entity_id == 'sensor.humidity' %}"
"{{ state.entity_id }}={{ state.state }}"
"{% endif %}"
"{% endfor %}"
),
},
},
}
},
],
)
async def test_match_all(hass, setup_mock, start_ha):
"""Test template that is rerendered on any state lifecycle."""
init_calls = len(setup_mock.mock_calls)
hass.states.async_set("sensor.any_state", "update")
await hass.async_block_till_done()
assert len(setup_mock.mock_calls) == init_calls
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
},
},
},
},
],
)
async def test_event(hass, start_ha):
"""Test the event."""
state = hass.states.get("binary_sensor.test")
assert state.state == OFF
hass.states.async_set("sensor.test_state", ON)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == ON
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_on": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_on": 5,
},
"test_off": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_off": 5,
},
},
},
},
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_on": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_on": '{{ ({ "seconds": 10 / 2 }) }}',
},
"test_off": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_off": '{{ ({ "seconds": 10 / 2 }) }}',
},
},
},
},
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_on": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_on": '{{ ({ "seconds": states("input_number.delay")|int }) }}',
},
"test_off": {
"friendly_name": "virtual thingy",
"value_template": "{{ states.sensor.test_state.state == 'on' }}",
"device_class": "motion",
"delay_off": '{{ ({ "seconds": states("input_number.delay")|int }) }}',
},
},
},
},
],
)
async def test_template_delay_on_off(hass, start_ha):
"""Test binary sensor template delay on."""
assert hass.states.get("binary_sensor.test_on").state == OFF
assert hass.states.get("binary_sensor.test_off").state == OFF
hass.states.async_set("input_number.delay", 5)
hass.states.async_set("sensor.test_state", ON)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_on").state == OFF
assert hass.states.get("binary_sensor.test_off").state == ON
future = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_on").state == ON
assert hass.states.get("binary_sensor.test_off").state == ON
# check with time changes
hass.states.async_set("sensor.test_state", OFF)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_on").state == OFF
assert hass.states.get("binary_sensor.test_off").state == ON
hass.states.async_set("sensor.test_state", ON)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_on").state == OFF
assert hass.states.get("binary_sensor.test_off").state == ON
hass.states.async_set("sensor.test_state", OFF)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_on").state == OFF
assert hass.states.get("binary_sensor.test_off").state == ON
future = dt_util.utcnow() + timedelta(seconds=5)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test_on").state == OFF
assert hass.states.get("binary_sensor.test_off").state == OFF
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "true",
"device_class": "motion",
"delay_off": 5,
},
},
},
},
],
)
async def test_available_without_availability_template(hass, start_ha):
"""Ensure availability is true without an availability_template."""
state = hass.states.get("binary_sensor.test")
assert state.state != STATE_UNAVAILABLE
assert state.attributes[ATTR_DEVICE_CLASS] == "motion"
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "true",
"device_class": "motion",
"delay_off": 5,
"availability_template": "{{ is_state('sensor.test_state','on') }}",
},
},
},
},
],
)
async def test_availability_template(hass, start_ha):
"""Test availability template."""
hass.states.async_set("sensor.test_state", STATE_OFF)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.test").state == STATE_UNAVAILABLE
hass.states.async_set("sensor.test_state", STATE_ON)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state != STATE_UNAVAILABLE
assert state.attributes[ATTR_DEVICE_CLASS] == "motion"
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"invalid_template": {
"value_template": "{{ states.binary_sensor.test_sensor }}",
"attribute_templates": {
"test_attribute": "{{ states.binary_sensor.unknown.attributes.picture }}"
},
}
},
},
},
],
)
async def test_invalid_attribute_template(hass, start_ha, caplog_setup_text):
"""Test that errors are logged if rendering template fails."""
hass.states.async_set("binary_sensor.test_sensor", "true")
assert len(hass.states.async_all()) == 2
assert ("test_attribute") in caplog_setup_text
assert ("TemplateError") in caplog_setup_text
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"my_sensor": {
"value_template": "{{ states.binary_sensor.test_sensor }}",
"availability_template": "{{ x - 12 }}",
},
},
},
},
],
)
async def test_invalid_availability_template_keeps_component_available(
hass, start_ha, caplog_setup_text
):
"""Test that an invalid availability keeps the device available."""
assert hass.states.get("binary_sensor.my_sensor").state != STATE_UNAVAILABLE
assert "UndefinedError: 'x' is undefined" in caplog_setup_text
async def test_no_update_template_match_all(hass, caplog):
"""Test that we do not update sensors that match on all."""
hass.state = CoreState.not_running
await setup.async_setup_component(
hass,
binary_sensor.DOMAIN,
{
"binary_sensor": {
"platform": "template",
"sensors": {
"all_state": {"value_template": '{{ "true" }}'},
"all_icon": {
"value_template": "{{ states.binary_sensor.test_sensor.state }}",
"icon_template": "{{ 1 + 1 }}",
},
"all_entity_picture": {
"value_template": "{{ states.binary_sensor.test_sensor.state }}",
"entity_picture_template": "{{ 1 + 1 }}",
},
"all_attribute": {
"value_template": "{{ states.binary_sensor.test_sensor.state }}",
"attribute_templates": {"test_attribute": "{{ 1 + 1 }}"},
},
},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("binary_sensor.test_sensor", "true")
assert len(hass.states.async_all()) == 5
assert hass.states.get("binary_sensor.all_state").state == OFF
assert hass.states.get("binary_sensor.all_icon").state == OFF
assert hass.states.get("binary_sensor.all_entity_picture").state == OFF
assert hass.states.get("binary_sensor.all_attribute").state == OFF
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.all_state").state == ON
assert hass.states.get("binary_sensor.all_icon").state == ON
assert hass.states.get("binary_sensor.all_entity_picture").state == ON
assert hass.states.get("binary_sensor.all_attribute").state == ON
hass.states.async_set("binary_sensor.test_sensor", "false")
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.all_state").state == ON
# Will now process because we have one valid template
assert hass.states.get("binary_sensor.all_icon").state == OFF
assert hass.states.get("binary_sensor.all_entity_picture").state == OFF
assert hass.states.get("binary_sensor.all_attribute").state == OFF
await hass.helpers.entity_component.async_update_entity("binary_sensor.all_state")
await hass.helpers.entity_component.async_update_entity("binary_sensor.all_icon")
await hass.helpers.entity_component.async_update_entity(
"binary_sensor.all_entity_picture"
)
await hass.helpers.entity_component.async_update_entity(
"binary_sensor.all_attribute"
)
assert hass.states.get("binary_sensor.all_state").state == ON
assert hass.states.get("binary_sensor.all_icon").state == OFF
assert hass.states.get("binary_sensor.all_entity_picture").state == OFF
assert hass.states.get("binary_sensor.all_attribute").state == OFF
@pytest.mark.parametrize("count,domain", [(1, "template")])
@pytest.mark.parametrize(
"config",
[
{
"template": {
"unique_id": "group-id",
"binary_sensor": {
"name": "top-level",
"unique_id": "sensor-id",
"state": ON,
},
},
"binary_sensor": {
"platform": "template",
"sensors": {
"test_template_cover_01": {
"unique_id": "not-so-unique-anymore",
"value_template": "{{ true }}",
},
"test_template_cover_02": {
"unique_id": "not-so-unique-anymore",
"value_template": "{{ false }}",
},
},
},
},
],
)
async def test_unique_id(hass, start_ha):
"""Test unique_id option only creates one binary sensor per id."""
assert len(hass.states.async_all()) == 2
ent_reg = entity_registry.async_get(hass)
assert len(ent_reg.entities) == 2
assert (
ent_reg.async_get_entity_id("binary_sensor", "template", "group-id-sensor-id")
is not None
)
assert (
ent_reg.async_get_entity_id(
"binary_sensor", "template", "not-so-unique-anymore"
)
is not None
)
@pytest.mark.parametrize("count,domain", [(1, binary_sensor.DOMAIN)])
@pytest.mark.parametrize(
"config",
[
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test": {
"friendly_name": "virtual thingy",
"value_template": "True",
"icon_template": "{{ states.sensor.test_state.state }}",
"device_class": "motion",
"delay_on": 5,
},
},
},
},
],
)
async def test_template_validation_error(hass, caplog, start_ha):
"""Test binary sensor template delay on."""
caplog.set_level(logging.ERROR)
state = hass.states.get("binary_sensor.test")
assert state.attributes.get("icon") == ""
hass.states.async_set("sensor.test_state", "mdi:check")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.attributes.get("icon") == "mdi:check"
hass.states.async_set("sensor.test_state", "invalid_icon")
await hass.async_block_till_done()
assert len(caplog.records) == 1
assert caplog.records[0].message.startswith(
"Error validating template result 'invalid_icon' from template"
)
state = hass.states.get("binary_sensor.test")
assert state.attributes.get("icon") is None
@pytest.mark.parametrize("count,domain", [(2, "template")])
@pytest.mark.parametrize(
"config",
[
{
"template": [
{"invalid": "config"},
# Config after invalid should still be set up
{
"unique_id": "listening-test-event",
"trigger": {"platform": "event", "event_type": "test_event"},
"binary_sensors": {
"hello": {
"friendly_name": "Hello Name",
"unique_id": "hello_name-id",
"device_class": "battery",
"value_template": "{{ trigger.event.data.beer == 2 }}",
"entity_picture_template": "{{ '/local/dogs.png' }}",
"icon_template": "{{ 'mdi:pirate' }}",
"attribute_templates": {
"plus_one": "{{ trigger.event.data.beer + 1 }}"
},
},
},
"binary_sensor": [
{
"name": "via list",
"unique_id": "via_list-id",
"device_class": "battery",
"state": "{{ trigger.event.data.beer == 2 }}",
"picture": "{{ '/local/dogs.png' }}",
"icon": "{{ 'mdi:pirate' }}",
"attributes": {
"plus_one": "{{ trigger.event.data.beer + 1 }}",
"another": "{{ trigger.event.data.uno_mas or 1 }}",
},
}
],
},
{
"trigger": [],
"binary_sensors": {
"bare_minimum": {
"value_template": "{{ trigger.event.data.beer == 1 }}"
},
},
},
],
},
],
)
async def test_trigger_entity(hass, start_ha):
"""Test trigger entity works."""
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.hello_name")
assert state is not None
assert state.state == OFF
state = hass.states.get("binary_sensor.bare_minimum")
assert state is not None
assert state.state == OFF
context = Context()
hass.bus.async_fire("test_event", {"beer": 2}, context=context)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.hello_name")
assert state.state == ON
assert state.attributes.get("device_class") == "battery"
assert state.attributes.get("icon") == "mdi:pirate"
assert state.attributes.get("entity_picture") == "/local/dogs.png"
assert state.attributes.get("plus_one") == 3
assert state.context is context
ent_reg = entity_registry.async_get(hass)
assert len(ent_reg.entities) == 2
assert (
ent_reg.entities["binary_sensor.hello_name"].unique_id
== "listening-test-event-hello_name-id"
)
assert (
ent_reg.entities["binary_sensor.via_list"].unique_id
== "listening-test-event-via_list-id"
)
state = hass.states.get("binary_sensor.via_list")
assert state.state == ON
assert state.attributes.get("device_class") == "battery"
assert state.attributes.get("icon") == "mdi:pirate"
assert state.attributes.get("entity_picture") == "/local/dogs.png"
assert state.attributes.get("plus_one") == 3
assert state.attributes.get("another") == 1
assert state.context is context
# Even if state itself didn't change, attributes might have changed
hass.bus.async_fire("test_event", {"beer": 2, "uno_mas": "si"})
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.via_list")
assert state.state == ON
assert state.attributes.get("another") == "si"
@pytest.mark.parametrize("count,domain", [(1, "template")])
@pytest.mark.parametrize(
"config",
[
{
"template": {
"trigger": {"platform": "event", "event_type": "test_event"},
"binary_sensor": {
"name": "test",
"state": "{{ trigger.event.data.beer == 2 }}",
"device_class": "motion",
"delay_on": '{{ ({ "seconds": 6 / 2 }) }}',
"auto_off": '{{ ({ "seconds": 1 + 1 }) }}',
},
},
},
],
)
async def test_template_with_trigger_templated_delay_on(hass, start_ha):
"""Test binary sensor template with template delay on."""
state = hass.states.get("binary_sensor.test")
assert state.state == OFF
context = Context()
hass.bus.async_fire("test_event", {"beer": 2}, context=context)
await hass.async_block_till_done()
future = dt_util.utcnow() + timedelta(seconds=3)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == ON
# Now wait for the auto-off
future = dt_util.utcnow() + timedelta(seconds=2)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test")
assert state.state == OFF
|
Danielhiversen/home-assistant
|
tests/components/template/test_binary_sensor.py
|
Python
|
apache-2.0
| 27,103 | 0.001107 |
import unittest
import datetime
import pykmlib
class PyKmlibAdsTest(unittest.TestCase):
def test_smoke(self):
classificator_file_str = ''
with open('./data/classificator.txt', 'r') as classificator_file:
classificator_file_str = classificator_file.read()
types_file_str = ''
with open('./data/types.txt', 'r') as types_file:
types_file_str = types_file.read()
pykmlib.load_classificator_types(classificator_file_str, types_file_str)
category = pykmlib.CategoryData()
category.name['default'] = 'Test category'
category.name['ru'] = 'Тестовая категория'
category.description['default'] = 'Test description'
category.description['ru'] = 'Тестовое описание'
category.annotation['default'] = 'Test annotation'
category.annotation['en'] = 'Test annotation'
category.image_url = 'https://localhost/123.png'
category.visible = True
category.author_name = 'Maps.Me'
category.author_id = '12345'
category.rating = 8.9
category.reviews_number = 567
category.last_modified = int(datetime.datetime.now().timestamp())
category.access_rules = pykmlib.AccessRules.PUBLIC
category.tags.set_list(['mountains', 'ski', 'snowboard'])
category.toponyms.set_list(['12345', '54321'])
category.languages.set_list(['en', 'ru', 'de'])
category.properties.set_dict({'property1':'value1', 'property2':'value2'})
bookmark = pykmlib.BookmarkData()
bookmark.name['default'] = 'Test bookmark'
bookmark.name['ru'] = 'Тестовая метка'
bookmark.description['default'] = 'Test bookmark description'
bookmark.description['ru'] = 'Тестовое описание метки'
bookmark.feature_types.set_list([
pykmlib.classificator_type_to_index('historic-castle'),
pykmlib.classificator_type_to_index('historic-memorial')])
bookmark.custom_name['default'] = 'Мое любимое место'
bookmark.custom_name['en'] = 'My favorite place'
bookmark.color.predefined_color = pykmlib.PredefinedColor.BLUE
bookmark.color.rgba = 0
bookmark.icon = pykmlib.BookmarkIcon.HOTEL
bookmark.viewport_scale = 15
bookmark.timestamp = int(datetime.datetime.now().timestamp())
bookmark.point = pykmlib.LatLon(45.9242, 56.8679)
bookmark.visible = True
bookmark.nearest_toponym = '12345'
bookmark.properties.set_dict({'bm_property1':'value1', 'bm_property2':'value2'})
bookmark.bound_tracks.set_list([0])
layer1 = pykmlib.TrackLayer()
layer1.line_width = 6.0
layer1.color.rgba = 0xff0000ff
layer2 = pykmlib.TrackLayer()
layer2.line_width = 7.0
layer2.color.rgba = 0x00ff00ff
track = pykmlib.TrackData()
track.local_id = 1
track.name['default'] = 'Test track'
track.name['ru'] = 'Тестовый трек'
track.description['default'] = 'Test track description'
track.description['ru'] = 'Тестовое описание трека'
track.timestamp = int(datetime.datetime.now().timestamp())
track.layers.set_list([layer1, layer2])
track.points.set_list([
pykmlib.LatLon(45.9242, 56.8679),
pykmlib.LatLon(45.2244, 56.2786),
pykmlib.LatLon(45.1964, 56.9832)])
track.visible = True
track.nearest_toponyms.set_list(['12345', '54321', '98765'])
track.properties.set_dict({'tr_property1':'value1', 'tr_property2':'value2'})
file_data = pykmlib.FileData()
file_data.server_id = 'AAAA-BBBB-CCCC-DDDD'
file_data.category = category
file_data.bookmarks.append(bookmark)
file_data.tracks.append(track)
s = pykmlib.export_kml(file_data)
imported_file_data = pykmlib.import_kml(s)
self.assertEqual(file_data, imported_file_data)
if __name__ == "__main__":
unittest.main()
|
rokuz/omim
|
kml/pykmlib/bindings_test.py
|
Python
|
apache-2.0
| 4,094 | 0.004272 |
m = [2]
for i in range(int(input())-1):
m.append(int(3*m[i]/2))
print(sum(m))
|
vipmunot/HackerRank
|
Algorithms/Viral Advertising.py
|
Python
|
mit
| 81 | 0.012346 |
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import logging
import requests
logger = logging.getLogger(__name__)
def collect_filters(url):
"""Return filters from FEWS, cleaned and ready for storing as json."""
from_fews = _download(url)
result = []
for filter_dict in from_fews:
result.append(_process_filter_dict(filter_dict))
return result
def collect_parameters(url):
from_fews = _download(url)
# TODO
return from_fews
def collect_locations(url):
from_fews = _download(url)
# TODO
return from_fews
def _download(url):
r = requests.get(url)
r.raise_for_status() # Only raises an error when not succesful.
return r.json()
def _process_filter_dict(filter_dict):
# {'filter': {name, childfilters, etc}
content = filter_dict['filter']
name = content['name']
description = content['description']
if name == description:
# Description is only interesting if it is different from the name.
# Often it is the same, so we've got to filter it out.
description = ''
children = [_process_filter_dict(child_filter_dict)
for child_filter_dict in content.get('childFilters', [])]
result = {'id': content['id'],
'name': name,
'description': description,
'children': children}
return result
|
lizardsystem/lizard-fewsapi
|
lizard_fewsapi/collect.py
|
Python
|
gpl-3.0
| 1,482 | 0 |
if dest.lower()=='footballbot': dest=origin
par=' '.join(params).lower()
if len(par) < 10 and par.count('is') == 0 and par.count('?') == 0 and par.count('will') == 0 and par.count('should') == 0 and par.count('could') == 0 and par.count('do') == 0 and par.count('has') == 0 and par.count('does') == 0 and par.count('when') == 0 and par.count('why') == 0 and par.count('who') == 0: db['msgqueue'].append([origin+': That\'s not a question!',dest])
else:
if par.count(' or ') == 1:
opt1=par[par.find(' or ')+4:].strip()
if opt1.count(' ') != 0: opt1=opt1[:opt1.find(' ')].strip()
opt2=par[::-1]
opt2=opt2[opt2.find(' ro ')+4:].strip()
if opt2.count(' ') != 0: opt2=opt2[:opt2.find(' ')].strip()
opt1=opt1.replace('?','')
opt2=opt2.replace('?','')
opt2=opt2[::-1]
db['msgqueue'].append([origin+': '+random.choice(db['language']['verbs'])+'ing '+random.choice([opt1,opt2]),dest])
else: db['msgqueue'].append([origin+': '+random.choice(db['language']['eightball']),dest])
|
epmatsw/FootballBot
|
fxns/8ball.py
|
Python
|
cc0-1.0
| 986 | 0.037525 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import time
import traceback
import urlparse
import random
import csv
from chrome_remote_control import page_test
from chrome_remote_control import util
from chrome_remote_control import wpr_modes
class PageState(object):
def __init__(self):
self.did_login = False
class PageRunner(object):
"""Runs a given test against a given test."""
def __init__(self, page_set):
self.page_set = page_set
def __enter__(self):
return self
def __exit__(self, *args):
self.Close()
def _ReorderPageSet(self, test_shuffle_order_file):
page_set_dict = {}
for page in self.page_set:
page_set_dict[page.url] = page
self.page_set.pages = []
with open(test_shuffle_order_file, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
csv_header = csv_reader.next()
if 'url' not in csv_header:
raise Exception('Unusable test_shuffle_order_file.')
url_index = csv_header.index('url')
for csv_row in csv_reader:
if csv_row[url_index] in page_set_dict:
self.page_set.pages.append(page_set_dict[csv_row[url_index]])
else:
raise Exception('Unusable test_shuffle_order_file.')
def Run(self, options, possible_browser, test, results):
archive_path = os.path.abspath(os.path.join(self.page_set.base_dir,
self.page_set.archive_path))
if options.wpr_mode == wpr_modes.WPR_OFF:
if os.path.isfile(archive_path):
possible_browser.options.wpr_mode = wpr_modes.WPR_REPLAY
else:
possible_browser.options.wpr_mode = wpr_modes.WPR_OFF
logging.warning("""
The page set archive %s does not exist, benchmarking against live sites!
Results won't be repeatable or comparable.
To fix this, either add svn-internal to your .gclient using
http://goto/read-src-internal, or create a new archive using --record.
""", os.path.relpath(archive_path))
credentials_path = None
if self.page_set.credentials_path:
credentials_path = os.path.join(self.page_set.base_dir,
self.page_set.credentials_path)
if not os.path.exists(credentials_path):
credentials_path = None
with possible_browser.Create() as b:
b.credentials.credentials_path = credentials_path
test.SetUpBrowser(b)
b.credentials.WarnIfMissingCredentials(self.page_set)
if not options.test_shuffle and options.test_shuffle_order_file is not\
None:
raise Exception('--test-shuffle-order-file requires --test-shuffle.')
# Set up a random generator for shuffling the page running order.
test_random = random.Random()
b.SetReplayArchivePath(archive_path)
with b.ConnectToNthTab(0) as tab:
if options.test_shuffle_order_file is None:
for _ in range(int(options.pageset_repeat)):
if options.test_shuffle:
test_random.shuffle(self.page_set)
for page in self.page_set:
for _ in range(int(options.page_repeat)):
self._RunPage(options, page, tab, test, results)
else:
self._ReorderPageSet(options.test_shuffle_order_file)
for page in self.page_set:
self._RunPage(options, page, tab, test, results)
def _RunPage(self, options, page, tab, test, results):
logging.info('Running %s' % page.url)
page_state = PageState()
try:
did_prepare = self.PreparePage(page, tab, page_state, results)
except Exception, ex:
logging.error('Unexpected failure while running %s: %s',
page.url, traceback.format_exc())
self.CleanUpPage(page, tab, page_state)
raise
if not did_prepare:
self.CleanUpPage(page, tab, page_state)
return
try:
test.Run(options, page, tab, results)
except page_test.Failure, ex:
logging.info('%s: %s', ex, page.url)
results.AddFailure(page, ex, traceback.format_exc())
return
except util.TimeoutException, ex:
logging.warning('Timed out while running %s', page.url)
results.AddFailure(page, ex, traceback.format_exc())
return
except Exception, ex:
logging.error('Unexpected failure while running %s: %s',
page.url, traceback.format_exc())
raise
finally:
self.CleanUpPage(page, tab, page_state)
def Close(self):
pass
@staticmethod
def WaitForPageToLoad(expression, tab):
def IsPageLoaded():
return tab.runtime.Evaluate(expression)
# Wait until the form is submitted and the page completes loading.
util.WaitFor(lambda: IsPageLoaded(), 60) # pylint: disable=W0108
def PreparePage(self, page, tab, page_state, results):
parsed_url = urlparse.urlparse(page.url)
if parsed_url[0] == 'file':
path = os.path.join(self.page_set.base_dir,
parsed_url.netloc) # pylint: disable=E1101
dirname, filename = os.path.split(path)
tab.browser.SetHTTPServerDirectory(dirname)
target_side_url = tab.browser.http_server.UrlOf(filename)
else:
target_side_url = page.url
if page.credentials:
page_state.did_login = tab.browser.credentials.LoginNeeded(
tab, page.credentials)
if not page_state.did_login:
msg = 'Could not login to %s on %s' % (page.credentials,
target_side_url)
logging.info(msg)
results.AddFailure(page, msg, "")
return False
tab.page.Navigate(target_side_url)
# Wait for unpredictable redirects.
if page.wait_time_after_navigate:
time.sleep(page.wait_time_after_navigate)
if page.wait_for_javascript_expression is not None:
self.WaitForPageToLoad(page.wait_for_javascript_expression, tab)
tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
return True
def CleanUpPage(self, page, tab, page_state): # pylint: disable=R0201
if page.credentials and page_state.did_login:
tab.browser.credentials.LoginNoLongerNeeded(tab, page.credentials)
tab.runtime.Evaluate("""chrome && chrome.benchmarking &&
chrome.benchmarking.closeConnections()""")
|
junmin-zhu/chromium-rivertrail
|
tools/chrome_remote_control/chrome_remote_control/page_runner.py
|
Python
|
bsd-3-clause
| 6,385 | 0.01112 |
import cv2
import skimage.data
import instance_occlsegm_lib
def test_resize():
for interpolation in [cv2.INTER_NEAREST, cv2.INTER_LINEAR]:
_test_resize(interpolation)
def _test_resize(interpolation):
img = skimage.data.astronaut()
H_dst, W_dst = 480, 640
ret = instance_occlsegm_lib.image.resize(img, height=H_dst, width=W_dst,
interpolation=interpolation)
assert ret.dtype == img.dtype
assert ret.shape == (H_dst, W_dst, 3)
ret = instance_occlsegm_lib.image.resize(
img, height=H_dst, interpolation=interpolation)
hw_ratio = 1. * img.shape[1] / img.shape[0]
W_expected = int(round(1 / hw_ratio * H_dst))
assert ret.dtype == img.dtype
assert ret.shape == (H_dst, W_expected, 3)
scale = 0.3
ret = instance_occlsegm_lib.image.resize(img, fy=scale, fx=scale,
interpolation=interpolation)
assert ret.dtype == img.dtype
H_expected = int(round(img.shape[0] * 0.3))
W_expected = int(round(img.shape[1] * 0.3))
assert ret.shape == (H_expected, W_expected, 3)
scale = 0.3
ret = instance_occlsegm_lib.image.resize(
img, fy=scale, interpolation=interpolation)
assert ret.dtype == img.dtype
assert ret.shape == (H_expected, W_expected, 3)
|
start-jsk/jsk_apc
|
demos/instance_occlsegm/tests/image_tests/test_resize.py
|
Python
|
bsd-3-clause
| 1,334 | 0 |
BREADABILITY_AVAILABLE = True
try:
from breadability.readable import Article, prep_article, check_siblings
except ImportError:
BREADABILITY_AVAILABLE = False
Article = object
from operator import attrgetter
from werkzeug.utils import cached_property
import re
from lxml.etree import tounicode, tostring
class PageText(Article):
WHITESPACE = {' ':re.compile(r"[\s\r\n]+"),
'':re.compile(r"\.{3,}"),}
CANDIDATE_SEPARATOR = u'\r\n'
def __init__(self, *args, **kwargs):
if not BREADABILITY_AVAILABLE:
raise ImportError('breadability is not available')
super(PageText, self).__init__(*args, **kwargs)
def __unicode__(self):
return self.winner()
def stripped(self, text):
for replacement, whitespace in self.WHITESPACE.items():
text = re.sub(whitespace, replacement, text)
return text
def slice(self, before=1, reverse=True):
if self.candidates:
# cleanup by removing the should_drop we spotted.
[n.drop_tree() for n in self._should_drop
if n.getparent() is not None]
# right now we return the highest scoring candidate content
by_score = sorted([c for c in self.candidates.values()],
key=attrgetter('content_score'), reverse=reverse)
# since we have several candidates, check the winner's siblings
# for extra content
for winner in by_score[:before]:
winner = check_siblings(winner, self.candidates)
# updated_winner.node = prep_article(updated_winner.node)
if winner.node is not None:
yield winner.node
def winner(self, greed=1):
if not self.candidates:
return u''
if isinstance(greed, float):
if 0 > greed > 1.0:
raise ValueError('greed coeft should be integer or 0<x<1.0')
greed = int(round(len(self.candidates)*greed))
return self.CANDIDATE_SEPARATOR.join((self.stripped(tounicode(node,
method='text')) for node in self.slice(before=greed)))
|
denz/swarm-crawler
|
swarm_crawler/text.py
|
Python
|
bsd-3-clause
| 2,172 | 0.005525 |
# -*- coding: utf-8 -*-
import pytest
import mock
from apispec import APISpec, Path
from apispec.exceptions import PluginError, APISpecError
description = 'This is a sample Petstore server. You can find out more '
'about Swagger at <a href=\"http://swagger.wordnik.com\">http://swagger.wordnik.com</a> '
'or on irc.freenode.net, #swagger. For this sample, you can use the api '
'key \"special-key\" to test the authorization filters'
@pytest.fixture()
def spec():
return APISpec(
title='Swagger Petstore',
version='1.0.0',
info={'description': description},
security=[{'apiKey': []}],
)
class TestMetadata:
def test_swagger_version(self, spec):
assert spec.to_dict()['swagger'] == '2.0'
def test_swagger_metadata(self, spec):
metadata = spec.to_dict()
assert metadata['security'] == [{'apiKey': []}]
assert metadata['info']['title'] == 'Swagger Petstore'
assert metadata['info']['version'] == '1.0.0'
assert metadata['info']['description'] == description
class TestTags:
tag = {
'name': 'MyTag',
'description': 'This tag gathers all API endpoints which are mine.'
}
def test_tag(self, spec):
spec.add_tag(self.tag)
tags_json = spec.to_dict()['tags']
assert self.tag in tags_json
class TestDefinitions:
properties = {
'id': {'type': 'integer', 'format': 'int64'},
'name': {'type': 'string', 'example': 'doggie'},
}
def test_definition(self, spec):
spec.definition('Pet', properties=self.properties)
defs_json = spec.to_dict()['definitions']
assert 'Pet' in defs_json
assert defs_json['Pet']['properties'] == self.properties
def test_definition_description(self, spec):
model_description = 'An animal which lives with humans.'
spec.definition('Pet', properties=self.properties, description=model_description)
defs_json = spec.to_dict()['definitions']
assert defs_json['Pet']['description'] == model_description
def test_definition_stores_enum(self, spec):
enum = ['name', 'photoUrls']
spec.definition(
'Pet',
properties=self.properties,
enum=enum
)
defs_json = spec.to_dict()['definitions']
assert defs_json['Pet']['enum'] == enum
def test_definition_extra_fields(self, spec):
extra_fields = {'discriminator': 'name'}
spec.definition('Pet', properties=self.properties, extra_fields=extra_fields)
defs_json = spec.to_dict()['definitions']
assert defs_json['Pet']['discriminator'] == 'name'
def test_pass_definition_to_plugins(self, spec):
def def_helper(spec, name, **kwargs):
if kwargs.get('definition') is not None:
return {'available': True}
return {'available': False}
spec.register_definition_helper(def_helper)
spec.definition('Pet', properties=self.properties)
defs_json = spec.to_dict()['definitions']
assert defs_json['Pet']['available']
class TestPath:
paths = {
'/pet/{petId}': {
'get': {
'parameters': [
{
'required': True,
'format': 'int64',
'name': 'petId',
'in': 'path',
'type': 'integer',
'description': 'ID of pet that needs to be fetched'
}
],
'responses': {
"200": {
"schema": {'$ref': '#/definitions/Pet'},
'description': 'successful operation'
},
"400": {
"description": "Invalid ID supplied"
},
"404": {
"description": "Pet not found"
}
},
"produces": [
"application/json",
"application/xml"
],
"operationId": "getPetById",
"summary": "Find pet by ID",
'description': ('Returns a pet when ID < 10. '
'ID > 10 or nonintegers will simulate API error conditions'),
'tags': ['pet']
}
}
}
def test_add_path(self, spec):
route_spec = self.paths['/pet/{petId}']['get']
spec.add_path(
path='/pet/{petId}',
operations=dict(
get=dict(
parameters=route_spec['parameters'],
responses=route_spec['responses'],
produces=route_spec['produces'],
operationId=route_spec['operationId'],
summary=route_spec['summary'],
description=route_spec['description'],
tags=route_spec['tags']
)
)
)
p = spec._paths['/pet/{petId}']['get']
assert p['parameters'] == route_spec['parameters']
assert p['responses'] == route_spec['responses']
assert p['operationId'] == route_spec['operationId']
assert p['summary'] == route_spec['summary']
assert p['description'] == route_spec['description']
assert p['tags'] == route_spec['tags']
def test_paths_maintain_order(self, spec):
spec.add_path(path='/path1')
spec.add_path(path='/path2')
spec.add_path(path='/path3')
spec.add_path(path='/path4')
assert list(spec.to_dict()['paths'].keys()) == ['/path1', '/path2', '/path3', '/path4']
def test_add_path_merges_paths(self, spec):
"""Test that adding a second HTTP method to an existing path performs
a merge operation instead of an overwrite"""
path = '/pet/{petId}'
route_spec = self.paths[path]['get']
spec.add_path(
path=path,
operations=dict(
get=route_spec
)
)
spec.add_path(
path=path,
operations=dict(
put=dict(
parameters=route_spec['parameters'],
responses=route_spec['responses'],
produces=route_spec['produces'],
operationId='updatePet',
summary='Updates an existing Pet',
description='Use this method to make changes to Pet `petId`',
tags=route_spec['tags']
)
)
)
p = spec._paths[path]
assert 'get' in p
assert 'put' in p
def test_add_path_ensures_path_parameters_required(self, spec):
path = '/pet/{petId}'
spec.add_path(
path=path,
operations=dict(
put=dict(
parameters=[{
'name': 'petId',
'in': 'path',
}]
)
)
)
assert spec._paths[path]['put']['parameters'][0]['required'] is True
def test_add_path_with_no_path_raises_error(self, spec):
with pytest.raises(APISpecError) as excinfo:
spec.add_path()
assert 'Path template is not specified' in str(excinfo)
def test_add_path_strips_base_path(self, spec):
spec.options['basePath'] = '/v1'
spec.add_path('/v1/pets')
assert '/pets' in spec._paths
assert '/v1/pets' not in spec._paths
def test_add_path_accepts_path(self, spec):
route = '/pet/{petId}'
route_spec = self.paths[route]
path = Path(path=route, operations={'get': route_spec['get']})
spec.add_path(path)
p = spec._paths[path.path]
assert path.path == p.path
assert 'get' in p
def test_add_path_strips_path_base_path(self, spec):
spec.options['basePath'] = '/v1'
path = Path(path='/v1/pets')
spec.add_path(path)
assert '/pets' in spec._paths
assert '/v1/pets' not in spec._paths
def test_add_parameters(self, spec):
route_spec = self.paths['/pet/{petId}']['get']
spec.add_parameter('test_parameter', 'path', **route_spec['parameters'][0])
spec.add_path(
path='/pet/{petId}',
operations=dict(
get=dict(
parameters=['test_parameter'],
)
)
)
metadata = spec.to_dict()
p = spec._paths['/pet/{petId}']['get']
assert p['parameters'][0] == {'$ref': '#/parameters/test_parameter'}
assert route_spec['parameters'][0] == metadata['parameters']['test_parameter']
class TestPlugins:
DUMMY_PLUGIN = 'tests.plugins.dummy_plugin'
@mock.patch(DUMMY_PLUGIN + '.setup', autospec=True)
def test_setup_plugin(self, mock_setup, spec):
spec.setup_plugin(self.DUMMY_PLUGIN)
assert self.DUMMY_PLUGIN in spec.plugins
mock_setup.assert_called_once_with(spec)
spec.setup_plugin(self.DUMMY_PLUGIN)
assert mock_setup.call_count == 1
def test_setup_plugin_doesnt_exist(self, spec):
with pytest.raises(PluginError):
spec.setup_plugin('plugin.doesnt.exist')
def test_setup_plugin_with_no_setup_function_raises_error(self, spec):
plugin_path = 'tests.plugins.dummy_plugin_no_setup'
with pytest.raises(PluginError) as excinfo:
spec.setup_plugin(plugin_path)
msg = excinfo.value.args[0]
assert msg == 'Plugin "{0}" has no setup(spec) function'.format(plugin_path)
def test_register_definition_helper(self, spec):
def my_definition_helper(name, schema, **kwargs):
pass
spec.register_definition_helper(my_definition_helper)
assert my_definition_helper in spec._definition_helpers
def test_register_path_helper(self, spec):
def my_path_helper(**kwargs):
pass
spec.register_path_helper(my_path_helper)
assert my_path_helper in spec._path_helpers
def test_multiple_path_helpers_w_different_signatures(self, spec):
def helper1(spec, spam, **kwargs):
return Path(path='/foo/bar')
def helper2(spec, eggs, **kwargs):
return Path(path='/foo/bar')
spec.register_path_helper(helper1)
spec.register_path_helper(helper2)
spec.add_path(eggs=object())
def test_multiple_definition_helpers_w_different_signatures(self, spec):
def helper1(spec, name, spam, **kwargs):
return mock.MagicMock()
def helper2(spec, name, eggs, **kwargs):
return mock.MagicMock()
spec.register_definition_helper(helper1)
spec.register_definition_helper(helper2)
spec.definition('SpammitySpam', eggs=mock.MagicMock())
class TestDefinitionHelpers:
def test_definition_helpers_are_used(self, spec):
properties = {'properties': {'name': {'type': 'string'}}}
def definition_helper(spec, name, **kwargs):
assert type(spec) == APISpec
return properties
spec.register_definition_helper(definition_helper)
spec.definition('Pet', {})
assert spec._definitions['Pet'] == properties
def test_multiple_definition_helpers(self, spec):
def helper1(spec, name, **kwargs):
return {'properties': {'age': {'type': 'number'}}}
def helper2(spec, name, fmt, **kwargs):
return {'properties': {'age': {'type': 'number', 'format': fmt}}}
spec.register_definition_helper(helper1)
spec.register_definition_helper(helper2)
spec.definition('Pet', fmt='int32')
expected = {'properties': {'age': {'type': 'number', 'format': 'int32'}}}
assert spec._definitions['Pet'] == expected
class TestPathHelpers:
def test_path_helper_is_used(self, spec):
def path_helper(spec, view_func, **kwargs):
return Path(path=view_func['path'], method='get')
spec.register_path_helper(path_helper)
spec.add_path(
view_func={'path': '/pet/{petId}'},
operations=dict(
get=dict(
produces=('application/xml', ),
responses={
"200": {
"schema": {'$ref': '#/definitions/Pet'},
'description': 'successful operation'
}
}
)
)
)
expected = {
'/pet/{petId}': {
'get': {
'produces': ('application/xml', ),
'responses': {
'200': {
'schema': {'$ref': '#/definitions/Pet'},
'description': 'successful operation',
}
}
}
}
}
assert spec._paths == expected
class TestResponseHelpers:
def test_response_helper_is_used(self, spec):
def success_helper(spec, success_description, **kwargs):
return {'description': success_description}
spec.register_response_helper(success_helper, 'get', 200)
spec.add_path('/pet/{petId}', success_description='success!', operations={
'get': {
'responses': {
200: {
'schema': {'$ref': 'Pet'}
}
}
}
})
resp_obj = spec._paths['/pet/{petId}']['get']['responses'][200]
assert resp_obj['schema'] == {'$ref': 'Pet'}
assert resp_obj['description'] == 'success!'
|
gorgias/apispec
|
tests/test_core.py
|
Python
|
mit
| 13,849 | 0.000939 |
#!usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author magic
"""
import urllib2
def download(url, user_agent='wswp', num_retries=2):
print 'Downloading:', url
headers = {'User-Agent': user_agent}
request = urllib2.Request(url, headers=headers)
try:
html = urllib2.urlopen(request).read()
except urllib2.URLError, e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
html = download(url, user_agent, num_retries-1)
return html
if __name__ == '__main__':
pass
# download('http://blog.csdn.net/column/details/datamining.html')
|
csunny/blog_project
|
source/libs/spider/common.py
|
Python
|
mit
| 694 | 0.001441 |
from django.conf.urls import patterns, include, url
from settings import STATIC_ROOT, GRAPHITE_API_PREFIX, CONTENT_DIR
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns(
'',
# These views are needed for the django-rest-framework debug interface
# to be able to log in and out. The URL path doesn't matter, rest_framework
# finds the views by name.
url(r'^api/rest_framework/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^$', 'calamari_web.views.home'),
url(r'^api/v1/', include('calamari_rest.urls.v1')),
url(r'^api/v2/', include('calamari_rest.urls.v2')),
url(r'^admin/(?P<path>.*)$', 'calamari_web.views.serve_dir_or_index',
{'document_root': '%s/admin/' % STATIC_ROOT}),
url(r'^manage/(?P<path>.*)$', 'calamari_web.views.serve_dir_or_index',
{'document_root': '%s/manage/' % STATIC_ROOT}),
url(r'^login/$', 'django.views.static.serve',
{'document_root': '%s/login/' % STATIC_ROOT, 'path': "index.html"}),
url(r'^login/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': '%s/login/' % STATIC_ROOT}),
url(r'^bootstrap$', 'calamari_web.views.bootstrap', name='bootstrap'),
url(r'^dashboard/(?P<path>.*)$', 'calamari_web.views.dashboard',
{'document_root': '%s/dashboard/' % STATIC_ROOT},
name='dashboard'),
url(r'^render/?', include('graphite.render.urls')),
url(r'^metrics/?', include('graphite.metrics.urls')),
url(r'^%s/dashboard/?' % GRAPHITE_API_PREFIX.lstrip('/'), include('graphite.dashboard.urls')),
# XXX this is a hack to make graphite visible where the 1.x GUI expects it,
url(r'^graphite/render/?', include('graphite.render.urls')),
url(r'^graphite/metrics/?', include('graphite.metrics.urls')),
# XXX this is a hack to make graphite dashboard work in dev mode (full installation
# serves this part with apache)
url('^content/(?P<path>.*)$', 'django.views.static.serve', {'document_root': CONTENT_DIR}),
# XXX this is a hack to serve apt repo in dev mode (Full installation serves this with apache)
url(r'^static/ubuntu/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': '%s/ubuntu/' % STATIC_ROOT}),
)
# Graphite dashboard client code is not CSRF enabled, but we have
# global CSRF protection enabled. Make exceptions for the views
# that the graphite dashboard wants to POST to.
from django.views.decorators.csrf import csrf_exempt
# By default graphite views are visible to anyone who asks:
# we only want to allow logged in users to access graphite
# API.
from django.contrib.auth.decorators import login_required
def patch_views(mod):
for url_pattern in mod.urlpatterns:
cb = url_pattern.callback
url_pattern._callback = csrf_exempt(login_required(cb))
import graphite.metrics.urls
import graphite.dashboard.urls
patch_views(graphite.metrics.urls)
patch_views(graphite.dashboard.urls)
# Explicitly reset to default or graphite hijacks it
handler500 = 'django.views.defaults.bad_request'
|
ceph/calamari-clients
|
utils/urls.py
|
Python
|
mit
| 3,135 | 0.00319 |
from __future__ import unicode_literals
from copy import copy
import difflib
import errno
from functools import wraps
import json
import os
import re
import sys
import select
import socket
import threading
import unittest
from unittest import skipIf # Imported here for backward compatibility
from unittest.util import safe_repr
try:
from urllib.parse import urlsplit, urlunsplit
except ImportError: # Python 2
from urlparse import urlsplit, urlunsplit
from django.conf import settings
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core import mail
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.core.handlers.wsgi import WSGIHandler
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.commands import flush
from django.core.servers.basehttp import (WSGIRequestHandler, WSGIServer,
WSGIServerException)
from django.core.urlresolvers import clear_url_caches, set_urlconf
from django.db import connection, connections, DEFAULT_DB_ALIAS, transaction
from django.db.models.loading import cache
from django.forms.fields import CharField
from django.http import QueryDict
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import template_rendered
from django.test.utils import (CaptureQueriesContext, ContextList,
override_settings, compare_xml)
from django.utils import six
from django.utils.encoding import force_text
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Puts value into a list if it's not already one.
Returns an empty list if value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
real_commit = transaction.commit
real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management
real_abort = transaction.abort
def nop(*args, **kwargs):
return
def disable_transaction_methods():
transaction.commit = nop
transaction.rollback = nop
transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop
transaction.abort = nop
def restore_transaction_methods():
transaction.commit = real_commit
transaction.rollback = real_rollback
transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management
transaction.abort = real_abort
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e.msg)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num, "%d queries executed, %d expected" % (
executed, self.num
)
)
class _AssertTemplateUsedContext(object):
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if len(self.rendered_templates) == 0:
message += ' No template was rendered.'
else:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names))
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False))
if not skipped:
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result.addError(self, sys.exc_info())
return
super(SimpleTestCase, self).__call__(result)
if not skipped:
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* Creating a test client.
* If the class has a 'urls' attribute, replace ROOT_URLCONF with it.
* Clearing the mail test outbox.
"""
self.client = self.client_class()
self._urlconf_setup()
mail.outbox = []
def _urlconf_setup(self):
set_urlconf(None)
if hasattr(self, 'urls'):
self._old_root_urlconf = settings.ROOT_URLCONF
settings.ROOT_URLCONF = self.urls
clear_url_caches()
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Putting back the original ROOT_URLCONF if it was changed.
"""
self._urlconf_teardown()
def _urlconf_teardown(self):
set_urlconf(None)
if hasattr(self, '_old_root_urlconf'):
settings.ROOT_URLCONF = self._old_root_urlconf
clear_url_caches()
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts
back to the original value when exiting the context.
"""
return override_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, host=None, msg_prefix=''):
"""Asserts that a response redirected to a specific URL, and that the
redirect URL can be loaded.
Note that assertRedirects won't work for external links since it uses
TestClient to do a request.
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
self.assertEqual(response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected:"
" Response code was %d (expected %d)" %
(response.redirect_chain[0][1], status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_response = response.client.get(path, QueryDict(query))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s':"
" response code was %d (expected %d)" %
(path, redirect_response.status_code, target_status_code))
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(
expected_url)
if not (e_scheme or e_netloc):
expected_url = urlunsplit(('http', host or 'testserver', e_path,
e_query, e_fragment))
self.assertEqual(url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" %
(url, expected_url))
def assertContains(self, response, text, count=None, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response._charset)
content = content.decode(response._charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None,
"Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None,
"Second argument is not valid HTML:")
real_count = content.count(text)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of %s in response"
" (expected %d)" % (real_count, text_repr, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200,
msg_prefix='', html=False):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (hasattr(response, 'render') and callable(response.render)
and not response.is_rendered):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
content = response.content
if not isinstance(text, bytes) or html:
text = force_text(text, encoding=response._charset)
content = content.decode(response._charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None,
'Response\'s content is not valid HTML:')
text = assert_and_parse_html(self, text, None,
'Second argument is not valid HTML:')
self.assertEqual(content.count(text), 0,
msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Asserts that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to "
"render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i,context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors)))
elif field in context[form].fields:
self.fail(msg_prefix + "The field '%s' on form '%s'"
" in context %d contains no errors" %
(field, form, i))
else:
self.fail(msg_prefix + "The form '%s' in context %d"
" does not contain the field '%s'" %
(form, i, field))
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors))
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the"
" response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Asserts that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err,
repr(field_errors)))
elif field in context[formset].forms[form_index].fields:
self.fail(msg_prefix + "The field '%s' "
"on formset '%s', form %d in "
"context %d contains no errors" %
(field, formset, form_index, i))
else:
self.fail(msg_prefix + "The formset '%s', form %d in "
"context %d does not contain the field '%s'" %
(formset, form_index, i, field))
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(len(non_field_errors) == 0,
msg_prefix + "The formset '%s', form %d in "
"context %d does not contain any non-field "
"errors." % (formset, form_index, i))
self.assertTrue(err in non_field_errors,
msg_prefix + "The formset '%s', form %d "
"in context %d does not contain the "
"non-field error '%s' "
"(actual errors: %s)" %
(formset, form_index, i, err,
repr(non_field_errors)))
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(len(non_form_errors) == 0,
msg_prefix + "The formset '%s' in "
"context %d does not contain any "
"non-form errors." % (formset, i))
self.assertTrue(err in non_form_errors,
msg_prefix + "The formset '%s' in context "
"%d does not contain the "
"non-form error '%s' (actual errors: %s)" %
(formset, i, err, repr(non_form_errors)))
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render "
"the response" % formset)
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
# Use assertTemplateUsed as context manager.
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
context = _AssertTemplateUsedContext(self, template_name)
return context
template_names = [t.name for t in response.templates]
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, ', '.join(template_names)))
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
# Use assertTemplateUsed as context manager.
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
context = _AssertTemplateNotUsedContext(self, template_name)
return context
template_names = [t.name for t in response.templates]
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name)
def assertRaisesMessage(self, expected_exception, expected_message,
callable_obj=None, *args, **kwargs):
"""
Asserts that the message in a raised exception matches the passed
value.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
return six.assertRaisesRegex(self, expected_exception,
re.escape(expected_message), callable_obj, *args, **kwargs)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Asserts that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args,
**dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [force_text(required.error_messages['required'])]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages,
error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length':2, 'max_length':20})
self.assertTrue(isinstance(fieldclass(*field_args, **field_kwargs),
fieldclass))
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Asserts that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
six.text_type(dom1).splitlines(),
six.text_type(dom2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Asserts that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg,
'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg,
'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None,
'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None,
'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(real_count, count,
msg_prefix + "Found %d instances of '%s' in response"
" (expected %d)" % (real_count, needle, count))
else:
self.assertTrue(real_count != 0,
msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
try:
data = json.loads(raw)
except ValueError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, six.string_types):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Asserts that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The passed-in arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
def _pre_setup(self):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
cache to these applications, then firing post_syncdb -- it must run
with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
super(TransactionTestCase, self)._pre_setup()
if self.available_apps is not None:
cache.set_available_apps(self.available_apps)
for db_name in self._databases_names(include_mirrors=False):
flush.Command.emit_post_syncdb(
verbosity=0, interactive=False, database=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
cache.unset_available_apps()
raise
def _databases_names(self, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(self, 'multi_db', False):
return [alias for alias in connections
if include_mirrors or not connections[alias].settings_dict['TEST_MIRROR']]
else:
return [DEFAULT_DB_ALIAS]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = \
conn.ops.sequence_reset_by_name_sql(no_style(),
conn.introspection.sequence_list())
if sql_list:
with transaction.commit_on_success_unless_managed(using=db_name):
cursor = conn.cursor()
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
if hasattr(self, 'fixtures'):
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name, 'skip_validation': True})
def _post_teardown(self):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_syncdb isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super(TransactionTestCase, self)._post_teardown()
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does rollback, the effect of
# these statements is lost, which can effect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't happen,
# get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
cache.unset_available_apps()
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_syncdb signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
call_command('flush', verbosity=0, interactive=False,
database=db_name, skip_validation=True,
reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_syncdb=self.available_apps is not None)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True):
items = six.moves.map(transform, qs)
if not ordered:
return self.assertEqual(set(items), set(values))
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values)
def assertNumQueries(self, num, func=None, *args, **kwargs):
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions
for conn in connections.all())
class TestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase, but surrounds every test
with a transaction, monkey-patches the real transaction management routines
to do nothing, and rollsback the test transaction at the end of the test.
You have to use TransactionTestCase, if you need transaction management
inside a test.
"""
def _fixture_setup(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = {}
for db_name in self._databases_names():
self.atomics[db_name] = transaction.atomic(using=db_name)
self.atomics[db_name].__enter__()
# Remove this when the legacy transaction management goes away.
disable_transaction_methods()
for db_name in self._databases_names(include_mirrors=False):
if hasattr(self, 'fixtures'):
try:
call_command('loaddata', *self.fixtures,
**{
'verbosity': 0,
'commit': False,
'database': db_name,
'skip_validation': True,
})
except Exception:
self._fixture_teardown()
raise
def _fixture_teardown(self):
if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown()
# Remove this when the legacy transaction management goes away.
restore_transaction_methods()
for db_name in reversed(self._databases_names()):
# Hack to force a rollback
connections[db_name].needs_rollback = True
self.atomics[db_name].__exit__(None, None, None)
def _deferredSkip(condition, reason):
def decorator(test_func):
if not (isinstance(test_func, type) and
issubclass(test_func, TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
test_item = test_func
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIfDBFeature(feature):
"""
Skip a test if a database has the named feature
"""
return _deferredSkip(lambda: getattr(connection.features, feature),
"Database has feature %s" % feature)
def skipUnlessDBFeature(feature):
"""
Skip a test unless a database has the named feature
"""
return _deferredSkip(lambda: not getattr(connection.features, feature),
"Database doesn't support feature %s" % feature)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
Just a regular WSGIRequestHandler except it doesn't log to the standard
output any of the requests received, so as to not clutter the output for
the tests' results.
"""
def log_message(*args):
pass
if sys.version_info >= (3, 3, 0):
_ImprovedEvent = threading.Event
elif sys.version_info >= (2, 7, 0):
_ImprovedEvent = threading._Event
else:
class _ImprovedEvent(threading._Event):
"""
Does the same as `threading.Event` except it overrides the wait() method
with some code borrowed from Python 2.7 to return the set state of the
event (see: http://hg.python.org/cpython/rev/b5aa8aa78c0f/). This allows
to know whether the wait() method exited normally or because of the
timeout. This class can be removed when Django supports only Python >= 2.7.
"""
def wait(self, timeout=None):
self._Event__cond.acquire()
try:
if not self._Event__flag:
self._Event__cond.wait(timeout)
return self._Event__flag
finally:
self._Event__cond.release()
class StoppableWSGIServer(WSGIServer):
"""
The code in this class is borrowed from the `SocketServer.BaseServer` class
in Python 2.6. The important functionality here is that the server is non-
blocking and that it can be shut down at any moment. This is made possible
by the server regularly polling the socket and checking if it has been
asked to stop.
Note for the future: Once Django stops supporting Python 2.6, this class
can be removed as `WSGIServer` will have this ability to shutdown on
demand and will not require the use of the _ImprovedEvent class whose code
is borrowed from Python 2.7.
"""
def __init__(self, *args, **kwargs):
super(StoppableWSGIServer, self).__init__(*args, **kwargs)
self.__is_shut_down = _ImprovedEvent()
self.__serving = False
def serve_forever(self, poll_interval=0.5):
"""
Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds.
"""
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
r, w, e = select.select([self], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
"""
Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.
"""
self.__serving = False
if not self.__is_shut_down.wait(2):
raise RuntimeError(
"Failed to shutdown the live test server in 2 seconds. The "
"server might be stuck or generating a slow response.")
def handle_request(self):
"""Handle one request, possibly blocking.
"""
fd_sets = select.select([self], [], [], None)
if not fd_sets[0]:
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""
Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except Exception:
self.handle_error(request, client_address)
self.close_request(request)
class _MediaFilesHandler(StaticFilesHandler):
"""
Handler for serving the media files. This is a private class that is
meant to be used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
def serve(self, request):
relative_url = request.path[len(self.base_url[2]):]
return serve(request, relative_url, document_root=self.get_base_dir())
class LiveServerThread(threading.Thread):
"""
Thread for running a live http server while the tests are running.
"""
def __init__(self, host, possible_ports, connections_override=None):
self.host = host
self.port = None
self.possible_ports = possible_ports
self.is_ready = threading.Event()
self.error = None
self.connections_override = connections_override
super(LiveServerThread, self).__init__()
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = StaticFilesHandler(_MediaFilesHandler(WSGIHandler()))
# Go through the list of possible ports, hoping that we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = StoppableWSGIServer(
(self.host, port), QuietWSGIRequestHandler)
except WSGIServerException as e:
if (index + 1 < len(self.possible_ports) and
hasattr(e.args[0], 'errno') and
e.args[0].errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
def join(self, timeout=None):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
super(LiveServerThread, self).join(timeout)
class LiveServerTestCase(TransactionTestCase):
"""
Does basically the same as TransactionTestCase but also launches a live
http server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
Note that it inherits from TransactionTestCase instead of TestCase because
the threads do not share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
@property
def live_server_url(self):
return 'http://%s:%s' % (
self.server_thread.host, self.server_thread.port)
@classmethod
def setUpClass(cls):
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if (conn.settings_dict['ENGINE'].rsplit('.', 1)[-1] in ('sqlite3', 'spatialite')
and conn.settings_dict['NAME'] == ':memory:'):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
# Launch the live server's thread
specified_address = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', 'localhost:8081')
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
msg = 'Invalid address ("%s") for live server.' % specified_address
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg), sys.exc_info()[2])
cls.server_thread = LiveServerThread(
host, possible_ports, connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
raise cls.server_thread.error
super(LiveServerTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.join()
# Restore sqlite connections' non-sharability
for conn in connections.all():
if (conn.settings_dict['ENGINE'].rsplit('.', 1)[-1] in ('sqlite3', 'spatialite')
and conn.settings_dict['NAME'] == ':memory:'):
conn.allow_thread_sharing = False
super(LiveServerTestCase, cls).tearDownClass()
|
makinacorpus/django
|
django/test/testcases.py
|
Python
|
bsd-3-clause
| 49,061 | 0.002242 |
""" Helper for the CS Resources section
"""
import re
from distutils.version import LooseVersion #pylint: disable=no-name-in-module,import-error
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.Core.Utilities.List import uniqueElements, fromChar
__RCSID__ = "$Id$"
gBaseResourcesSection = "/Resources"
def getSites():
""" Get the list of all the sites defined in the CS
"""
result = gConfig.getSections( cfgPath( gBaseResourcesSection, 'Sites' ) )
if not result['OK']:
return result
grids = result['Value']
sites = []
for grid in grids:
result = gConfig.getSections( cfgPath( gBaseResourcesSection, 'Sites', grid ) )
if not result['OK']:
return result
sites += result['Value']
return S_OK( sites )
def getStorageElementSiteMapping( siteList = None ):
""" Get Storage Element belonging to the given sites
"""
if not siteList:
result = getSites()
if not result['OK']:
return result
siteList = result['Value']
siteDict = {}
for site in siteList:
grid = site.split( '.' )[0]
ses = gConfig.getValue( cfgPath( gBaseResourcesSection, 'Sites', grid, site, 'SE' ), [] )
if ses:
siteDict[site] = ses
return S_OK( siteDict )
def getFTS2ServersForSites( siteList = None ):
""" get FTSServers for sites
:param siteList: list of sites
:type siteList: python:list
"""
siteList = siteList if siteList else None
if not siteList:
siteList = getSites()
if not siteList["OK"]:
return siteList
siteList = siteList["Value"]
ftsServers = dict()
defaultServ = gConfig.getValue( cfgPath( gBaseResourcesSection, 'FTSEndpoints/Default', 'FTSEndpoint' ), '' )
for site in siteList:
serv = gConfig.getValue( cfgPath( gBaseResourcesSection, "FTSEndpoints/FTS2", site ), defaultServ )
if serv:
ftsServers[site] = serv
return S_OK( ftsServers )
def getFTS3Servers():
""" get FTSServers for sites
"""
csPath = cfgPath( gBaseResourcesSection, "FTSEndpoints/FTS3" )
# We do it in two times to keep the order
ftsServerNames = gConfig.getOptions( csPath ).get( 'Value', [] )
ftsServers = []
for name in ftsServerNames:
ftsServers.append( gConfig.getValue( cfgPath( csPath, name ) ) )
return S_OK( ftsServers )
def getSiteTier( site ):
"""
Return Tier level of the given Site
"""
result = getSitePath( site )
if not result['OK']:
return result
sitePath = result['Value']
return S_OK( gConfig.getValue( cfgPath( sitePath, 'MoUTierLevel' ), 2 ) )
def getSitePath( site ):
"""
Return path to the Site section on CS
"""
result = getSiteGrid( site )
if not result['OK']:
return result
grid = result['Value']
return S_OK( cfgPath( gBaseResourcesSection, 'Sites', grid, site ) )
def getSiteGrid( site ):
"""
Return Grid component from Site Name
"""
sitetuple = site.split( "." )
if len( sitetuple ) != 3:
return S_ERROR( 'Wrong Site Name format' )
return S_OK( sitetuple[0] )
def getStorageElementOptions( seName ):
""" Get the CS StorageElementOptions
"""
storageConfigPath = '/Resources/StorageElements/%s' % seName
result = gConfig.getOptionsDict( storageConfigPath )
if not result['OK']:
return result
options = result['Value']
# If the SE is an baseSE or an alias, derefence it
if 'BaseSE' in options or 'Alias' in options:
storageConfigPath = '/Resources/StorageElements/%s' % options.get( 'BaseSE', options.get( 'Alias' ) )
result = gConfig.getOptionsDict( storageConfigPath )
if not result['OK']:
return result
result['Value'].update( options )
options = result['Value']
# Help distinguishing storage type
diskSE = True
tapeSE = False
if 'SEType' in options:
# Type should follow the convention TXDY
seType = options['SEType']
diskSE = re.search( 'D[1-9]', seType ) != None
tapeSE = re.search( 'T[1-9]', seType ) != None
options['DiskSE'] = diskSE
options['TapeSE'] = tapeSE
return S_OK( options )
def getQueue( site, ce, queue ):
""" Get parameters of the specified queue
"""
grid = site.split( '.' )[0]
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s' % ( grid, site, ce ) )
if not result['OK']:
return result
resultDict = result['Value']
for tagFieldName in ( 'Tag', 'RequiredTag' ):
Tags = []
ceTags = resultDict.get( tagFieldName )
if ceTags:
Tags = fromChar( ceTags )
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s/Queues/%s' % ( grid, site, ce, queue ) )
if not result['OK']:
return result
resultDict.update( result['Value'] )
queueTags = resultDict.get( tagFieldName )
if queueTags:
queueTags = fromChar( queueTags )
Tags = list( set( Tags + queueTags ) )
if Tags:
resultDict[tagFieldName] = Tags
resultDict['Queue'] = queue
return S_OK( resultDict )
def getQueues( siteList = None, ceList = None, ceTypeList = None, community = None, mode = None ):
""" Get CE/queue options according to the specified selection
"""
result = gConfig.getSections( '/Resources/Sites' )
if not result['OK']:
return result
resultDict = {}
grids = result['Value']
for grid in grids:
result = gConfig.getSections( '/Resources/Sites/%s' % grid )
if not result['OK']:
continue
sites = result['Value']
for site in sites:
if siteList is not None and not site in siteList:
continue
if community:
comList = gConfig.getValue( '/Resources/Sites/%s/%s/VO' % ( grid, site ), [] )
if comList and not community in comList:
continue
siteCEParameters = {}
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs' % ( grid, site ) )
if result['OK']:
siteCEParameters = result['Value']
result = gConfig.getSections( '/Resources/Sites/%s/%s/CEs' % ( grid, site ) )
if not result['OK']:
continue
ces = result['Value']
for ce in ces:
if mode:
ceMode = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/SubmissionMode' % ( grid, site, ce ), 'Direct' )
if not ceMode or ceMode != mode:
continue
if ceTypeList:
ceType = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/CEType' % ( grid, site, ce ), '' )
if not ceType or not ceType in ceTypeList:
continue
if ceList is not None and not ce in ceList:
continue
if community:
comList = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/VO' % ( grid, site, ce ), [] )
if comList and not community in comList:
continue
ceOptionsDict = dict( siteCEParameters )
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s' % ( grid, site, ce ) )
if not result['OK']:
continue
ceOptionsDict.update( result['Value'] )
result = gConfig.getSections( '/Resources/Sites/%s/%s/CEs/%s/Queues' % ( grid, site, ce ) )
if not result['OK']:
continue
queues = result['Value']
for queue in queues:
if community:
comList = gConfig.getValue( '/Resources/Sites/%s/%s/CEs/%s/Queues/%s/VO' % ( grid, site, ce, queue ), [] )
if comList and not community in comList:
continue
resultDict.setdefault( site, {} )
resultDict[site].setdefault( ce, ceOptionsDict )
resultDict[site][ce].setdefault( 'Queues', {} )
result = gConfig.getOptionsDict( '/Resources/Sites/%s/%s/CEs/%s/Queues/%s' % ( grid, site, ce, queue ) )
if not result['OK']:
continue
queueOptionsDict = result['Value']
resultDict[site][ce]['Queues'][queue] = queueOptionsDict
return S_OK( resultDict )
def getCompatiblePlatforms( originalPlatforms ):
""" Get a list of platforms compatible with the given list
"""
if isinstance( originalPlatforms, basestring ):
platforms = [originalPlatforms]
else:
platforms = list( originalPlatforms )
platforms = list( platform.replace( ' ', '' ) for platform in platforms )
result = gConfig.getOptionsDict( '/Resources/Computing/OSCompatibility' )
if not ( result['OK'] and result['Value'] ):
return S_ERROR( "OS compatibility info not found" )
platformsDict = dict( ( k, v.replace( ' ', '' ).split( ',' ) ) for k, v in result['Value'].iteritems() )
for k, v in platformsDict.iteritems():
if k not in v:
v.append( k )
resultList = list( platforms )
for p in platforms:
tmpList = platformsDict.get( p, [] )
for pp in platformsDict:
if p in platformsDict[pp]:
tmpList.append( pp )
tmpList += platformsDict[pp]
if tmpList:
resultList += tmpList
return S_OK( uniqueElements( resultList ) )
def getDIRACPlatform( OS ):
""" Get standard DIRAC platform(s) compatible with the argument.
NB: The returned value is a list! ordered, in reverse, using distutils.version.LooseVersion
In practice the "highest" version (which should be the most "desirable" one is returned first)
"""
result = gConfig.getOptionsDict( '/Resources/Computing/OSCompatibility' )
if not ( result['OK'] and result['Value'] ):
return S_ERROR( "OS compatibility info not found" )
platformsDict = dict( ( k, v.replace( ' ', '' ).split( ',' ) ) for k, v in result['Value'].iteritems() )
for k, v in platformsDict.iteritems():
if k not in v:
v.append( k )
# making an OS -> platforms dict
os2PlatformDict = dict()
for platform, osItems in platformsDict.iteritems():
for osItem in osItems:
if os2PlatformDict.get( osItem ):
os2PlatformDict[osItem].append( platform )
else:
os2PlatformDict[osItem] = [platform]
if OS not in os2PlatformDict:
return S_ERROR( 'No compatible DIRAC platform found for %s' % OS )
platforms = os2PlatformDict[OS]
platforms.sort( key = LooseVersion, reverse = True )
return S_OK( platforms )
def getDIRACPlatforms():
""" just returns list of platforms defined in the CS
"""
result = gConfig.getOptionsDict( '/Resources/Computing/OSCompatibility' )
if not ( result['OK'] and result['Value'] ):
return S_ERROR( "OS compatibility info not found" )
return S_OK( result['Value'].keys() )
def getCatalogPath( catalogName ):
""" Return the configuration path of the description for a a given catalog
"""
return '/Resources/FileCatalogs/%s' % catalogName
def getBackendConfig(backendID):
""" Return a backend configuration for a given backend identifier
:params backendID: string representing a backend identifier. Ex: stdout, file, f02
"""
return gConfig.getOptionsDict('Resources/LogBackends/%s' % backendID)
|
hgiemza/DIRAC
|
ConfigurationSystem/Client/Helpers/Resources.py
|
Python
|
gpl-3.0
| 10,846 | 0.043426 |
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.item import Item, Field
class WorkabroadItem(Item):
# define the fields for your item here like:
# name = Field()
pass
class PostItem(Item):
href = Field()
id = Field()
title = Field()
location = Field()
expiry = Field()
agency = Field()
qualifications = Field()
info = Field()
requirements = Field()
|
staceytay/workabroad-scraper
|
workabroad/items.py
|
Python
|
mit
| 490 | 0.004082 |
#!/bin/env python3.4
# -*- coding: UTF-8 -*-
import simocracy.wiki as wiki
import re
## config ##
#Möglichkeit zur Simulation des Vorgangs
simulation = False
#Loglevel: schreibe nur geänderte Zeilen ("line") oder
# ganze geänderte Artikel ("article") auf stdin oder
# gar nicht ("none")
loglevel = "line"
# Ersatz für LD-Host-Links
replacement = r"{{LD-Host-Replacer}}"
# Kommt vor jeden Artikel, wo was ersetzt wurde
notif = r"{{LD-Host}}"
############
def main():
opener = wiki.login(wiki.username, wiki.password)
for p in wiki.all_pages(opener, resume="speed"):
doIt(p, opener)
#Ersetzt alle Vorkommnisse von sub in s durch repl.
def replaceAll(sub, repl, s):
while True:
testagainst = s
s = re.sub(sub, repl, s)
if s == testagainst:
return s
def doIt(article, opener):
ldhost = re.compile(r'(Thumb=)?\[?\[?\s*(?P<link>(http://)?(www\.)?ld-host\.de/[/\w]*?\.[a-z][a-z][a-z])\s*[^\]]*?\]?\]?')
doubleRepl = re.compile(r'\[?\s*' + re.escape(replacement) + r'\s*' + re.escape(replacement) + r'\s*\]?')
found = False
text = ""
logs = ""
#Spezialseiten abfangen
site = None
try:
site = wiki.openArticle(article, opener, redirect=False)
except Exception as e:
if str(e) == "Spezialseite":
return
for line in site:
newLine = line.decode('utf-8')
foundList = []
for el in ldhost.finditer(newLine):
foundList.append(el)
#nichts gefunden
if foundList == []:
text = text + newLine
continue
else:
found = True
#ersetzen
for el in foundList:
#Bildboxen berücksichtigen
if 'Thumb=' in el.groups():
newLine = replaceAll(el.groupdict()['link'], "", newLine)
else:
newLine = replaceAll(el.groupdict()['link'], replacement, newLine)
newLine = replaceAll(doubleRepl, replacement, newLine)
text = text + newLine
#logging
if simulation and loglevel == "line":
logs = logs + "\n- " + line.decode('utf-8') + "+ " + newLine + "\n"
if found:
text = notif + text
print("[[" + article + "]]")
if loglevel == "line":
print(logs)
elif loglevel == "article":
print(text)
else:
raise Exception("config kaputt")
#Schreiben
if not simulation:
wiki.edit_article(article, text, opener)
print("Done: "+article)
print("========================================================\n")
if __name__ == "__main__":
main()
|
Simocracy/simocraPy
|
simocracy/ldhost.py
|
Python
|
gpl-2.0
| 2,714 | 0.006275 |
# -*- coding: utf-8 -*-
import os
import unittest
import inotify.constants
import inotify.calls
import inotify.adapters
import inotify.test_support
try:
unicode
except NameError:
_HAS_PYTHON2_UNICODE_SUPPORT = False
else:
_HAS_PYTHON2_UNICODE_SUPPORT = True
class TestInotify(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotify, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
@unittest.skipIf(_HAS_PYTHON2_UNICODE_SUPPORT is True, "Not in Python 3")
def test__international_naming_python3(self):
with inotify.test_support.temp_path() as path:
inner_path = os.path.join(path, '新增資料夾')
os.mkdir(inner_path)
i = inotify.adapters.Inotify()
i.add_watch(inner_path)
with open(os.path.join(inner_path, 'filename'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], inner_path, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], inner_path, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], inner_path, 'filename'),
]
self.assertEquals(events, expected)
@unittest.skipIf(_HAS_PYTHON2_UNICODE_SUPPORT is False, "Not in Python 2")
def test__international_naming_python2(self):
with inotify.test_support.temp_path() as path:
inner_path = os.path.join(unicode(path), u'新增資料夾')
os.mkdir(inner_path)
i = inotify.adapters.Inotify()
i.add_watch(inner_path)
with open(os.path.join(inner_path, u'filename料夾'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], inner_path, u'filename料夾'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], inner_path, u'filename料夾'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], inner_path, u'filename料夾'),
]
self.assertEquals(events, expected)
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.Inotify()
i.add_watch(path1)
with open('ignored_new_file', 'w'):
pass
with open(os.path.join(path1, 'seen_new_file'), 'w'):
pass
with open(os.path.join(path2, 'ignored_new_file'), 'w'):
pass
os.remove(os.path.join(path1, 'seen_new_file'))
events = self.__read_all_events(i)
expected = [
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16),
['IN_CREATE'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16),
['IN_OPEN'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16),
['IN_CLOSE_WRITE'],
path1,
'seen_new_file'
),
(
inotify.adapters._INOTIFY_EVENT(wd=1, mask=512, cookie=0, len=16),
['IN_DELETE'],
path1,
'seen_new_file'
)
]
self.assertEquals(events, expected)
# This can't be removed until *after* we've read the events because
# they'll be flushed the moment we remove the watch.
i.remove_watch(path1)
with open(os.path.join(path1, 'ignored_after_removal'), 'w'):
pass
events = self.__read_all_events(i)
self.assertEquals(events, [])
@staticmethod
def _open_write_close(*args):
with open(os.path.join(*args), 'w'):
pass
@staticmethod
def _make_temp_path(*args):
path = os.path.join(*args)
os.mkdir(path)
return path
@staticmethod
def _event_general(wd, mask, type_name, path, filename):
return ((inotify.adapters._INOTIFY_EVENT(wd=wd, mask=mask, cookie=0, len=16)),
[type_name],
path,
filename)
@staticmethod
def _event_create(wd, path, filename):
return TestInotify._event_general(wd, 256, 'IN_CREATE', path, filename)
@staticmethod
def _event_open(wd, path, filename):
return TestInotify._event_general(wd, 32, 'IN_OPEN', path, filename)
@staticmethod
def _event_close_write(wd, path, filename):
return TestInotify._event_general(wd, 8, 'IN_CLOSE_WRITE', path, filename)
def test__watch_list_of_paths(self):
with inotify.test_support.temp_path() as path:
path1 = TestInotify._make_temp_path(path, 'aa')
path2 = TestInotify._make_temp_path(path, 'bb')
i = inotify.adapters.Inotify([path1, path2])
TestInotify._open_write_close('ignored_new_file')
TestInotify._open_write_close(path1, 'seen_new_file')
TestInotify._open_write_close(path2, 'seen_new_file2')
os.remove(os.path.join(path1, 'seen_new_file'))
events = self.__read_all_events(i)
expected = [
TestInotify._event_create(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_open(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_close_write(wd=1, path=path1, filename='seen_new_file'),
TestInotify._event_create(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_open(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_close_write(wd=2, path=path2, filename='seen_new_file2'),
TestInotify._event_general(wd=1, mask=512, type_name='IN_DELETE',
path=path1, filename='seen_new_file')
]
self.assertEquals(events, expected)
def test__error_on_watch_nonexistent_folder(self):
i = inotify.adapters.Inotify()
with self.assertRaises(inotify.calls.InotifyError):
i.add_watch('/dev/null/foo')
def test__get_event_names(self):
all_mask = 0
for bit in inotify.constants.MASK_LOOKUP.keys():
all_mask |= bit
all_names = inotify.constants.MASK_LOOKUP.values()
all_names = list(all_names)
i = inotify.adapters.Inotify()
names = i._get_event_names(all_mask)
self.assertEquals(names, all_names)
class TestInotifyTree(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotifyTree, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.InotifyTree(path)
with open('seen_new_file1', 'w'):
pass
with open(os.path.join(path1, 'seen_new_file2'), 'w'):
pass
with open(os.path.join(path2, 'seen_new_file3'), 'w'):
pass
os.remove(os.path.join(path, 'seen_new_file1'))
os.remove(os.path.join(path1, 'seen_new_file2'))
os.remove(os.path.join(path2, 'seen_new_file3'))
os.rmdir(path1)
os.rmdir(path2)
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=256, cookie=0, len=16), ['IN_CREATE'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=32, cookie=0, len=16), ['IN_OPEN'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=512, cookie=0, len=16), ['IN_DELETE'], path, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=512, cookie=0, len=16), ['IN_DELETE'], path1, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=512, cookie=0, len=16), ['IN_DELETE'], path2, 'seen_new_file3'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=1024, cookie=0, len=0), ['IN_DELETE_SELF'], path1, ''),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=32768, cookie=0, len=0), ['IN_IGNORED'], path1, ''),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742336, cookie=0, len=16), ['IN_ISDIR', 'IN_DELETE'], path, 'aa'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=1024, cookie=0, len=0), ['IN_DELETE_SELF'], path2, ''),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32768, cookie=0, len=0), ['IN_IGNORED'], path2, ''),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742336, cookie=0, len=16), ['IN_ISDIR', 'IN_DELETE'], path, 'bb'),
]
self.assertEquals(events, expected)
def test__renames(self):
# Since we're not reading the events one at a time in a loop and
# removing or renaming folders will flush any queued events, we have to
# group things in order to check things first before such operations.
with inotify.test_support.temp_path() as path:
i = inotify.adapters.InotifyTree(path)
old_path = os.path.join(path, 'old_folder')
new_path = os.path.join(path, 'new_folder')
os.mkdir(old_path)
events1 = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742080, cookie=events1[0][0].cookie, len=16), ['IN_ISDIR', 'IN_CREATE'], path, 'old_folder'),
]
self.assertEquals(events1, expected)
os.rename(old_path, new_path)
events2 = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073741888, cookie=events2[0][0].cookie, len=16), ['IN_MOVED_FROM', 'IN_ISDIR'], path, 'old_folder'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073741952, cookie=events2[1][0].cookie, len=16), ['IN_MOVED_TO', 'IN_ISDIR'], path, 'new_folder'),
]
self.assertEquals(events2, expected)
with open(os.path.join(new_path, 'old_filename'), 'w'):
pass
os.rename(
os.path.join(new_path, 'old_filename'),
os.path.join(new_path, 'new_filename'))
os.remove(os.path.join('new_folder', 'new_filename'))
os.rmdir('new_folder')
events3 = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=64, cookie=events3[3][0].cookie, len=16), ['IN_MOVED_FROM'], new_path, 'old_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=128, cookie=events3[4][0].cookie, len=16), ['IN_MOVED_TO'], new_path, 'new_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=512, cookie=0, len=16), ['IN_DELETE'], new_path, 'new_filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=1024, cookie=0, len=0), ['IN_DELETE_SELF'], new_path, ''),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32768, cookie=0, len=0), ['IN_IGNORED'], new_path, ''),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742336, cookie=0, len=16), ['IN_ISDIR', 'IN_DELETE'], path, 'new_folder'),
]
self.assertEquals(events3, expected)
def test__automatic_new_watches_on_new_paths(self):
# Tests that watches are actively established as new folders are
# created.
with inotify.test_support.temp_path() as path:
i = inotify.adapters.InotifyTree(path)
path1 = os.path.join(path, 'folder1')
path2 = os.path.join(path1, 'folder2')
os.mkdir(path1)
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=1073742080, cookie=0, len=16), ['IN_ISDIR', 'IN_CREATE'], path, 'folder1'),
]
self.assertEquals(events, expected)
os.mkdir(path2)
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=1073742080, cookie=0, len=16), ['IN_ISDIR', 'IN_CREATE'], path1, 'folder2'),
]
self.assertEquals(events, expected)
with open(os.path.join(path2,'filename'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'filename'),
]
self.assertEquals(events, expected)
def test__automatic_new_watches_on_existing_paths(self):
# Tests whether the watches are recursively established when we
# initialize.
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'folder1')
path2 = os.path.join(path1, 'folder2')
os.mkdir(path1)
os.mkdir(path2)
i = inotify.adapters.InotifyTree(path)
with open(os.path.join(path2,'filename'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'filename'),
(inotify.adapters._INOTIFY_EVENT(wd=3, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'filename'),
]
self.assertEquals(events, expected)
class TestInotifyTrees(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.maxDiff = None
super(TestInotifyTrees, self).__init__(*args, **kwargs)
def __read_all_events(self, i):
events = list(i.event_gen(timeout_s=1, yield_nones=False))
return events
def test__cycle(self):
with inotify.test_support.temp_path() as path:
path1 = os.path.join(path, 'aa')
os.mkdir(path1)
path2 = os.path.join(path, 'bb')
os.mkdir(path2)
i = inotify.adapters.InotifyTrees([path1, path2])
with open(os.path.join(path1, 'seen_new_file1'), 'w'):
pass
with open(os.path.join(path2, 'seen_new_file2'), 'w'):
pass
events = self.__read_all_events(i)
expected = [
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=256, cookie=0, len=16), ['IN_CREATE'], path1, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=32, cookie=0, len=16), ['IN_OPEN'], path1, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=1, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path1, 'seen_new_file1'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=256, cookie=0, len=16), ['IN_CREATE'], path2, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=32, cookie=0, len=16), ['IN_OPEN'], path2, 'seen_new_file2'),
(inotify.adapters._INOTIFY_EVENT(wd=2, mask=8, cookie=0, len=16), ['IN_CLOSE_WRITE'], path2, 'seen_new_file2'),
]
self.assertEquals(events, expected)
|
dsoprea/PyInotify
|
tests/test_inotify.py
|
Python
|
gpl-2.0
| 17,960 | 0.003905 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.