text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
# created by chris@drumminhands.com
# modified by varunmehta
# see instructions at http://www.drumminhands.com/2014/06/15/raspberry-pi-photo-booth/
import atexit
import glob
import logging
import math
import os
import subprocess
import sys
import time
import traceback
from time import sleep
import RPi.GPIO as GPIO
import picamera # http://picamera.readthedocs.org/en/release-1.4/install2.html
import pygame
from pygame.locals import QUIT, KEYDOWN, K_ESCAPE
import config # this is the config python file config.py
########################
### Variables Config ###
########################
led_pin = 17 # LED
btn_pin = 2 # pin for the start button
total_pics = 2 # number of pics to be taken
capture_delay = 1 # delay between pics
prep_delay = 3 # number of seconds at step 1 as users prep to have photo taken
restart_delay = 3 # how long to display finished message before beginning a new session
# full frame of v1 camera is 2592x1944. Wide screen max is 2592,1555
# if you run into resource issues, try smaller, like 1920x1152.
# or increase memory http://picamera.readthedocs.io/en/release-1.12/fov.html#hardware-limits
high_res_w = 1190 # width of high res image, if taken
high_res_h = 790 # height of high res image, if taken
#############################
### Variables that Change ###
#############################
# Do not change these variables, as the code will change it anyway
transform_x = config.monitor_w # how wide to scale the jpg when replaying
transfrom_y = config.monitor_h # how high to scale the jpg when replaying
offset_x = 0 # how far off to left corner to display photos
offset_y = 0 # how far off to left corner to display photos
replay_delay = 1 # how much to wait in-between showing pics on-screen after taking
replay_cycles = 1 # how many times to show each photo on-screen after taking
####################
### Other Config ###
####################
real_path = os.path.dirname(os.path.realpath(__file__))
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(led_pin, GPIO.OUT) # LED
GPIO.setup(btn_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.output(led_pin, False) # for some reason the pin turns on at the beginning of the program. Why?
# initialize pygame
pygame.init()
pygame.display.set_mode((config.monitor_w, config.monitor_h))
screen = pygame.display.get_surface()
pygame.display.set_caption('Photo Booth Pics')
pygame.mouse.set_visible(False) # hide the mouse cursor
pygame.display.toggle_fullscreen()
# init logging
logging.basicConfig(format='%(asctime)s %(message)s', filename='photobooth.log', level=logging.INFO)
#################
### Functions ###
#################
# clean up running programs as needed when main program exits
def cleanup():
logging.critical('Ended abruptly')
pygame.quit()
GPIO.cleanup()
atexit.register(cleanup)
# A function to handle keyboard/mouse/device input events
def input(events):
for event in events: # Hit the ESC key to quit the slideshow.
if (event.type == QUIT or
(event.type == KEYDOWN and event.key == K_ESCAPE)):
pygame.quit()
# delete files in folder
def clear_pics(channel):
files = glob.glob(config.file_path + '*')
for f in files:
os.remove(f)
# light the lights in series to show completed
logging.warning("Deleted previous pics")
for x in range(0, 3): # blink light
GPIO.output(led_pin, True)
sleep(0.25)
GPIO.output(led_pin, False)
sleep(0.25)
def init_event_folders():
if (not os.path.exists(config.file_path)):
os.mkdir(config.file_path)
os.mkdir(config.file_path + "/final")
logging.info("Initalized event folder")
# set variables to properly display the image on screen at right ratio
def set_dimensions(img_w, img_h):
# Note this only works when in booting in desktop mode.
# When running in terminal, the size is not correct (it displays small). Why?
# connect to global vars
global transform_y, transform_x, offset_y, offset_x
# based on output screen resolution, calculate how to display
ratio_h = (config.monitor_w * img_h) / img_w
if (ratio_h < config.monitor_h):
# Use horizontal black bars
# print("horizontal black bars")
transform_y = ratio_h
transform_x = config.monitor_w
offset_y = (config.monitor_h - ratio_h) / 2
offset_x = 0
elif (ratio_h > config.monitor_h):
# Use vertical black bars
# print("vertical black bars")
transform_x = (config.monitor_h * img_w) / img_h
transform_y = config.monitor_h
offset_x = (config.monitor_w - transform_x) / 2
offset_y = 0
else:
# No need for black bars as photo ratio equals screen ratio
# print("no black bars")
transform_x = config.monitor_w
transform_y = config.monitor_h
offset_y = offset_x = 0
# Ceil and floor floats to integers
transform_x = math.ceil(transform_x)
transform_y = math.ceil(transform_y)
offset_x = math.floor(offset_x)
offset_y = math.floor(offset_y)
# uncomment these lines to troubleshoot screen ratios
# print(str(img_w) + " x " + str(img_h))
# print("ratio_h: " + str(ratio_h))
# print("transform_x: " + str(transform_x))
# print("transform_y: " + str(transform_y))
# print("offset_y: " + str(offset_y))
# print("offset_x: " + str(offset_x))
# display one image on screen
def show_image(image_path):
# print(" Displaying... " + image_path)
# clear the screen
screen.fill((0, 0, 0))
# load the image
img = pygame.image.load(image_path)
img = img.convert()
# set pixel dimensions based on image
set_dimensions(img.get_width(), img.get_height())
# rescale the image to fit the current display
img = pygame.transform.scale(img, (transform_x, transfrom_y))
screen.blit(img, (offset_x, offset_y))
pygame.display.flip()
# display a blank screen
def clear_screen():
screen.fill((0, 0, 0))
pygame.display.flip()
# display a group of images
def display_pics(jpg_group):
for i in range(0, replay_cycles): # show pics a few times
for i in range(1, total_pics + 1): # show each pic
show_image(config.file_path + jpg_group + "-0" + str(i) + ".jpg")
time.sleep(replay_delay) # pause
# define the photo taking function for when the big button is pressed
def start_photobooth():
input(pygame.event.get()) # press escape to exit pygame. Then press ctrl-c to exit python.
################################# Begin Step 1 #################################
logging.info("Get Ready")
GPIO.output(led_pin, False)
show_image(real_path + "/instructions.png")
sleep(prep_delay)
# clear the screen
clear_screen()
camera = picamera.PiCamera()
camera.vflip = False
camera.hflip = True # flip for preview, showing users a mirror image
camera.rotation = 0 # revisit this depending upon final camera placement
# camera.saturation = -100 # comment out this line if you want color images
# camera.iso = config.camera_iso
camera.resolution = (high_res_w, high_res_h) # set camera resolution to high res
################################# Begin Step 2 #################################
logging.info("Starting to take pics...")
# All images will be number appended by now, 20160310113034-01.jpg
now = time.strftime("%Y%m%d-%H%M%S") # get the current date and time for the start of the filename
montage_img = now + "-" + config.event_name + ".jpg" # montage file name
if config.capture_count_pics:
logging.debug("Decided to go count pics")
try: # take the photos
for i in range(1, total_pics + 1):
show_image(real_path + "/pose" + str(i) + ".png")
time.sleep(capture_delay) # pause in-between shots
clear_screen()
camera.hflip = True # preview a mirror image
camera.start_preview(
resolution=(high_res_w, high_res_h)) # start preview at low res but the right ratio
time.sleep(2) # warm up camera
# GPIO.output(led_pin, True) # turn on the LED
filename = config.file_path + now + '-0' + str(i) + '.jpg'
camera.hflip = False # flip back when taking photo
camera.capture(filename)
logging.info("captured: " + filename)
# GPIO.output(led_pin, False) # turn off the LED
camera.stop_preview()
# show_image(real_path + "/pose" + str(i) + ".png")
time.sleep(capture_delay) # pause in-between shots
clear_screen()
if i == total_pics + 1:
break
finally:
camera.close()
else:
logging.debug("capture_continuous")
camera.start_preview(
resolution=(high_res_w, high_res_h)) # start preview at low res but the right ratio
time.sleep(2) # warm up camera
try: # take the photos
for i, filename in enumerate(camera.capture_continuous(config.file_path + now + '-' + '{counter:02d}.jpg')):
GPIO.output(led_pin, True) # turn on the LED
logging.info("captured: " + filename)
time.sleep(capture_delay) # pause in-between shots
GPIO.output(led_pin, False) # turn off the LED
if i == total_pics - 1:
break
finally:
camera.stop_preview()
camera.close()
########################### Begin Step 3 #################################
input(pygame.event.get()) # press escape to exit pygame. Then press ctrl-c to exit python.
logging.info("Creating mosaic ")
show_image(real_path + "/processing.png")
# Create a montage of the images
montage = "gm montage -mode concatenate -resize 1190x1770 -borderwidth 5 -bordercolor white " \
+ config.file_path + "/" + now + "-01.jpg " + real_path + "/diwali-middle.jpg " \
+ config.file_path + "/" + now + "-02.jpg -tile 1x3 " \
+ config.file_path + "/final/" + montage_img
print(montage)
processed = subprocess.call(montage, shell=True)
logging.info("gm montage for " + now + "-final.jpg = " + str(processed))
########################### Begin Step 4 #################################
input(pygame.event.get()) # press escape to exit pygame. Then press ctrl-c to exit python.
try:
display_pics(now)
# show preview of finally created image
show_image(config.file_path + "/final/" + montage_img)
time.sleep(2)
except Exception as e:
tb = sys.exc_info()[2]
traceback.print_exception(e.__class__, e, tb)
pygame.quit()
logging.info("Session for " + now + " complete")
show_image(real_path + "/finished2.png")
# upload to dropbox
# subprocess.call(
# "/opt/Dropbox-Uploader/dropbox_uploader.sh -f /home/pi/.dropbox_uploader upload " + config.file_path + "final/" + montage_img + " " + montage_img)
time.sleep(restart_delay)
show_image(real_path + "/intro.png")
GPIO.output(led_pin, True) # turn on the LED
####################
### Main Program ###
####################
## clear the previously stored pics based on config settings
if config.clear_on_startup:
clear_pics(1)
# check if files and folders exist for the event, or create them
init_event_folders()
logging.warning("Starting photo booth...")
for x in range(0, 5): # blink light to show the app is running
GPIO.output(led_pin, True)
sleep(0.25)
GPIO.output(led_pin, False)
sleep(0.25)
show_image(real_path + "/intro.png")
while True:
GPIO.output(led_pin, True) # turn on the light showing users they can push the button
input(pygame.event.get()) # press escape to exit pygame. Then press ctrl-c to exit python.
GPIO.wait_for_edge(btn_pin, GPIO.FALLING)
time.sleep(config.debounce) # debounce
start_photobooth()
logging.warning("----------------------")
| varunmehta/photobooth | photobooth.py | Python | mit | 12,204 | 0.002786 |
from serial_settings import SerialSettings
class AbstractStream(object):
def __init__(self, config, name):
"""
:type name: str
"""
self.config = config
self.name = name
def open(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def read(self, num_bytes=1):
raise NotImplementedError
def write(self, data):
raise NotImplementedError
def reconfigure(self, config):
raise NotImplementedError
| ThomasGerstenberg/serial_monitor | stream/__init__.py | Python | bsd-3-clause | 551 | 0 |
#!/usr/bin/env python
"""
Synopsis: %(prog)s [-h|-b|-g|-r|-a|-d] [ picklefile ] dbfile
Read the given picklefile as a series of key/value pairs and write to a new
database. If the database already exists, any contents are deleted. The
optional flags indicate the type of the output database:
-a - open using anydbm
-b - open as bsddb btree file
-d - open as dbm file
-g - open as gdbm file
-h - open as bsddb hash file
-r - open as bsddb recno file
The default is hash. If a pickle file is named it is opened for read
access. If no pickle file is named, the pickle input is read from standard
input.
Note that recno databases can only contain integer keys, so you can't dump a
hash or btree database using db2pickle.py and reconstitute it to a recno
database with %(prog)s unless your keys are integers.
"""
import getopt
try:
import bsddb
except ImportError:
bsddb = None
try:
import dbm
except ImportError:
dbm = None
try:
import gdbm
except ImportError:
gdbm = None
try:
import anydbm
except ImportError:
anydbm = None
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
prog = sys.argv[0]
def usage():
sys.stderr.write(__doc__ % globals())
def main(args):
try:
opts, args = getopt.getopt(args, "hbrdag",
["hash", "btree", "recno", "dbm", "anydbm",
"gdbm"])
except getopt.error:
usage()
return 1
if len(args) == 0 or len(args) > 2:
usage()
return 1
elif len(args) == 1:
pfile = sys.stdin
dbfile = args[0]
else:
try:
pfile = open(args[0], 'rb')
except IOError:
sys.stderr.write("Unable to open %s\n" % args[0])
return 1
dbfile = args[1]
dbopen = None
for opt, arg in opts:
if opt in ("-h", "--hash"):
try:
dbopen = bsddb.hashopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-b", "--btree"):
try:
dbopen = bsddb.btopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-r", "--recno"):
try:
dbopen = bsddb.rnopen
except AttributeError:
sys.stderr.write("bsddb module unavailable.\n")
return 1
elif opt in ("-a", "--anydbm"):
try:
dbopen = anydbm.open
except AttributeError:
sys.stderr.write("anydbm module unavailable.\n")
return 1
elif opt in ("-g", "--gdbm"):
try:
dbopen = gdbm.open
except AttributeError:
sys.stderr.write("gdbm module unavailable.\n")
return 1
elif opt in ("-d", "--dbm"):
try:
dbopen = dbm.open
except AttributeError:
sys.stderr.write("dbm module unavailable.\n")
return 1
if dbopen is None:
if bsddb is None:
sys.stderr.write("bsddb module unavailable - ")
sys.stderr.write("must specify dbtype.\n")
return 1
else:
dbopen = bsddb.hashopen
try:
db = dbopen(dbfile, 'c')
except bsddb.error:
sys.stderr.write("Unable to open %s. " % dbfile)
sys.stderr.write("Check for format or version mismatch.\n")
return 1
else:
for k in db.keys():
del db[k]
while 1:
try:
(key, val) = pickle.load(pfile)
except EOFError:
break
db[key] = val
db.close()
pfile.close()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| google/google-ctf | third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Tools/scripts/pickle2db.py | Python | apache-2.0 | 4,089 | 0.000734 |
# Import time (for delay) library (for SmartHome api) and GPIO (for raspberry pi gpio)
from library import SmartHomeApi
import RPi.GPIO as GPIO
import time
from datetime import datetime
# 7 -> LED
# Create the client with pre-existing credentials
api = SmartHomeApi("http://localhost:5000/api/0.1", id=10, api_key="api_eMxSb7n6G10Svojn3PlU5P6srMaDrFxmKAnWvnW6UyzmBG")
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
last_status = "UNKNOWN"
while True:
preferences = api.GetUserPrefences(2)['results']
print(preferences)
preference = (item for item in preferences if item["key"] == "bedtime").next()
if not preference:
print("Could not fin 'bedtime' preference!")
api.AddPreference(2, "bedtime", "00:00")
print("Created bedtime preference! Please set it to the correct value in your dashboard")
else:
bedtime = preference['value']
if not bedtime:
print("Unexpected error occured!")
else:
print(bedtime)
time_str = datetime.now().strftime('%H:%M')
print("time: {}".format(time_str))
bedtime_dt = datetime.strptime(bedtime, "%H:%M")
time_hm = datetime.strptime(time_str, "%H:%M")
if time_hm >= bedtime_dt:
print("Going to bed! Currently: {}, going to bed at {}".format(time_str, bedtime))
GPIO.output(7, GPIO.LOW)
else:
print("Not yet time for bed. Currently: {}, going to bed at {}".format(time_str, bedtime))
GPIO.output(7, GPIO.HIGH)
time.sleep(1)
| How2Compute/SmartHome | cli/demo2.py | Python | mit | 1,643 | 0.008521 |
# coding=UTF-8
"""
tests for overrides
"""
import datetime
import mock
import pytz
from nose.plugins.attrib import attr
from ccx_keys.locator import CCXLocator
from courseware.courses import get_course_by_id
from courseware.field_overrides import OverrideFieldData
from courseware.testutils import FieldOverrideTestMixin
from django.test.utils import override_settings
from lms.djangoapps.courseware.tests.test_field_overrides import inject_field_overrides
from request_cache.middleware import RequestCache
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import (
SharedModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import override_field_for_ccx
from lms.djangoapps.ccx.tests.utils import flatten, iter_blocks
@attr('shard_1')
@override_settings(
XBLOCK_FIELD_DATA_WRAPPERS=['lms.djangoapps.courseware.field_overrides:OverrideModulestoreFieldData.wrap'],
MODULESTORE_FIELD_OVERRIDE_PROVIDERS=['ccx.overrides.CustomCoursesForEdxOverrideProvider'],
)
class TestFieldOverrides(FieldOverrideTestMixin, SharedModuleStoreTestCase):
"""
Make sure field overrides behave in the expected manner.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
"""
Course is created here and shared by all the class's tests.
"""
super(TestFieldOverrides, cls).setUpClass()
cls.course = CourseFactory.create()
cls.course.enable_ccx = True
# Create a course outline
start = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=pytz.UTC)
due = datetime.datetime(2010, 7, 7, 0, 0, tzinfo=pytz.UTC)
chapters = [ItemFactory.create(start=start, parent=cls.course)
for _ in xrange(2)]
sequentials = flatten([
[ItemFactory.create(parent=chapter) for _ in xrange(2)]
for chapter in chapters])
verticals = flatten([
[ItemFactory.create(due=due, parent=sequential) for _ in xrange(2)]
for sequential in sequentials])
blocks = flatten([ # pylint: disable=unused-variable
[ItemFactory.create(parent=vertical) for _ in xrange(2)]
for vertical in verticals])
def setUp(self):
"""
Set up tests
"""
super(TestFieldOverrides, self).setUp()
self.ccx = ccx = CustomCourseForEdX(
course_id=self.course.id,
display_name='Test CCX',
coach=AdminFactory.create())
ccx.save()
patch = mock.patch('ccx.overrides.get_current_ccx')
self.get_ccx = get_ccx = patch.start()
get_ccx.return_value = ccx
self.addCleanup(patch.stop)
self.addCleanup(RequestCache.clear_request_cache)
inject_field_overrides(iter_blocks(ccx.course), self.course, AdminFactory.create())
self.ccx_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
self.ccx_course = get_course_by_id(self.ccx_key, depth=None)
def cleanup_provider_classes():
"""
After everything is done, clean up by un-doing the change to the
OverrideFieldData object that is done during the wrap method.
"""
OverrideFieldData.provider_classes = None
self.addCleanup(cleanup_provider_classes)
def test_override_start(self):
"""
Test that overriding start date on a chapter works.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
self.assertEquals(chapter.start, ccx_start)
def test_override_num_queries_new_field(self):
"""
Test that for creating new field executed only create query
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
# One outer SAVEPOINT/RELEASE SAVEPOINT pair around everything caused by the
# transaction.atomic decorator wrapping override_field_for_ccx.
# One SELECT and one INSERT.
# One inner SAVEPOINT/RELEASE SAVEPOINT pair around the INSERT caused by the
# transaction.atomic down in Django's get_or_create()/_create_object_from_params().
with self.assertNumQueries(6):
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
def test_override_num_queries_update_existing_field(self):
"""
Test that overriding existing field executed create, fetch and update queries.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
new_ccx_start = datetime.datetime(2015, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
with self.assertNumQueries(3):
override_field_for_ccx(self.ccx, chapter, 'start', new_ccx_start)
def test_override_num_queries_field_value_not_changed(self):
"""
Test that if value of field does not changed no query execute.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
with self.assertNumQueries(2): # 2 savepoints
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
def test_overriden_field_access_produces_no_extra_queries(self):
"""
Test no extra queries when accessing an overriden field more than once.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
# One outer SAVEPOINT/RELEASE SAVEPOINT pair around everything caused by the
# transaction.atomic decorator wrapping override_field_for_ccx.
# One SELECT and one INSERT.
# One inner SAVEPOINT/RELEASE SAVEPOINT pair around the INSERT caused by the
# transaction.atomic down in Django's get_or_create()/_create_object_from_params().
with self.assertNumQueries(6):
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
def test_override_is_inherited(self):
"""
Test that sequentials inherit overridden start date from chapter.
"""
ccx_start = datetime.datetime(2014, 12, 25, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
override_field_for_ccx(self.ccx, chapter, 'start', ccx_start)
self.assertEquals(chapter.get_children()[0].start, ccx_start)
self.assertEquals(chapter.get_children()[1].start, ccx_start)
def test_override_is_inherited_even_if_set_in_mooc(self):
"""
Test that a due date set on a chapter is inherited by grandchildren
(verticals) even if a due date is set explicitly on grandchildren in
the mooc.
"""
ccx_due = datetime.datetime(2015, 1, 1, 00, 00, tzinfo=pytz.UTC)
chapter = self.ccx_course.get_children()[0]
chapter.display_name = 'itsme!'
override_field_for_ccx(self.ccx, chapter, 'due', ccx_due)
vertical = chapter.get_children()[0].get_children()[0]
self.assertEqual(vertical.due, ccx_due)
| shabab12/edx-platform | lms/djangoapps/ccx/tests/test_overrides.py | Python | agpl-3.0 | 7,563 | 0.001587 |
"""
Pynt is a Python client that wraps the Open Beer Database API.
Questions, comments? m@h0ke.com
"""
__author__ = "Matthew Hokanson <m@h0ke.com>"
__version__ = "0.2.0"
from beer import Beer
from brewery import Brewery
from request import Request
from settings import Settings
| h0ke/pynt | pynt/pynt.py | Python | mit | 290 | 0 |
import click
import os
import os.path
import ntpath
import serial
import sys
import prosflasher.ports
import prosflasher.upload
import prosconfig
from proscli.utils import default_cfg, AliasGroup
from proscli.utils import get_version
@click.group(cls=AliasGroup)
def flasher_cli():
pass
@flasher_cli.command(short_help='Upload binaries to the microcontroller.', aliases=['upload'])
@click.option('-sfs/-dfs', '--save-file-system/--delete-file-system', is_flag=True, default=False,
help='Specify whether or not to save the file system when writing to the Cortex. Saving the '
'file system takes more time.')
@click.option('-y', is_flag=True, default=False,
help='Automatically say yes to all confirmations.')
@click.option('-f', '-b', '--file', '--binary', default='default', metavar='FILE',
help='Specifies a binary file, project directory, or project config file.')
@click.option('-p', '--port', default='auto', metavar='PORT', help='Specifies the serial port.')
@click.option('--no-poll', is_flag=True, default=False)
@click.option('-r', '--retry', default=2,
help='Specify the number of times the flasher should retry the flash when it detects a failure'
' (default two times).')
@default_cfg
# @click.option('-m', '--strategy', default='cortex', metavar='STRATEGY',
# help='Specify the microcontroller upload strategy. Not currently used.')
def flash(ctx, save_file_system, y, port, binary, no_poll, retry):
"""Upload binaries to the microcontroller. A serial port and binary file need to be specified.
By default, the port is automatically selected (if you want to be pedantic, 'auto').
Otherwise, a system COM port descriptor needs to be used. In Windows/NT, this takes the form of COM1.
In *nx systems, this takes the form of /dev/tty1 or /dev/acm1 or similar.
\b
Specifying 'all' as the COM port will automatically upload to all available microcontrollers.
By default, the CLI will look around for a proper binary to upload to the microcontroller. If one was not found, or
if you want to change the default binary, you can specify it.
"""
click.echo(' ====:: PROS Flasher v{} ::===='.format(get_version()))
if port == 'auto':
ports = prosflasher.ports.list_com_ports()
if len(ports) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
click.get_current_context().abort()
sys.exit(1)
port = ports[0].device
if len(ports) > 1 and port is not None and y is False:
port = None
for p in ports:
if click.confirm('Download to ' + p.device, default=True):
port = p.device
break
if port is None:
click.echo('No additional ports found.')
click.get_current_context().abort()
sys.exit(1)
if port == 'all':
port = [p.device for p in prosflasher.ports.list_com_ports()]
if len(port) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
click.get_current_context().abort()
sys.exit(1)
if y is False:
click.confirm('Download to ' + ', '.join(port), default=True, abort=True, prompt_suffix='?')
else:
port = [port]
if binary == 'default':
binary = os.getcwd()
if ctx.verbosity > 3:
click.echo('Default binary selected, new directory is {}'.format(binary))
binary = find_binary(binary)
if binary is None:
click.echo('No binary was found! Ensure you are in a built PROS project (run make) '
'or specify the file with the -f flag',
err=True)
click.get_current_context().exit()
if ctx.verbosity > 3:
click.echo('Final binary is {}'.format(binary))
click.echo('Flashing ' + binary + ' to ' + ', '.join(port))
for p in port:
tries = 1
code = prosflasher.upload.upload(p, y, binary, no_poll, ctx)
while tries <= retry and (not code or code == -1000):
click.echo('Retrying...')
code = prosflasher.upload.upload(p, y, binary, no_poll, ctx)
tries += 1
def find_binary(path):
"""
Helper function for finding the binary associated with a project
The algorithm is as follows:
- if it is a file, then check if the name of the file is 'pros.config':
- if it is 'pros.config', then find the binary based off the pros.config value (or default 'bin/output.bin')
- otherwise, can only assume it is the binary file to upload
- if it is a directory, start recursively searching up until 'pros.config' is found. max 10 times
- if the pros.config file was found, find binary based off of the pros.config value
- if no pros.config file was found, start recursively searching up (from starting path) until a directory
named bin is found
- if 'bin' was found, return 'bin/output.bin'
:param path: starting path to start the search
:param ctx:
:return:
"""
# logger = logging.getLogger(ctx.log_key)
# logger.debug('Finding binary for {}'.format(path))
if os.path.isfile(path):
if ntpath.basename(path) == 'pros.config':
pros_cfg = prosconfig.ProjectConfig(path)
return os.path.join(path, pros_cfg.output)
return path
elif os.path.isdir(path):
try:
cfg = prosconfig.ProjectConfig(path, raise_on_error=True)
if cfg is not None and os.path.isfile(os.path.join(cfg.directory, cfg.output)):
return os.path.join(cfg.directory, cfg.output)
except prosconfig.ConfigNotFoundException:
search_dir = path
for n in range(10):
dirs = [d for d in os.listdir(search_dir)
if os.path.isdir(os.path.join(path, search_dir, d)) and d == 'bin']
if len(dirs) == 1: # found a bin directory
if os.path.isfile(os.path.join(path, search_dir, 'bin', 'output.bin')):
return os.path.join(path, search_dir, 'bin', 'output.bin')
search_dir = ntpath.split(search_dir)[:-1][0] # move to parent dir
return None
@flasher_cli.command('poll', short_help='Polls a microcontroller for its system info')
@click.option('-y', '--yes', is_flag=True, default=False,
help='Automatically say yes to all confirmations.')
@click.argument('port', default='all')
@default_cfg
def get_sys_info(cfg, yes, port):
if port == 'auto':
ports = prosflasher.ports.list_com_ports()
if len(ports) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
sys.exit(1)
port = prosflasher.ports.list_com_ports()[0].device
if port is not None and yes is False:
click.confirm('Poll ' + port, default=True, abort=True, prompt_suffix='?')
if port == 'all':
port = [p.device for p in prosflasher.ports.list_com_ports()]
if len(port) == 0:
click.echo('No microcontrollers were found. Please plug in a cortex or manually specify a serial port.\n',
err=True)
sys.exit(1)
else:
port = [port]
for p in port:
sys_info = prosflasher.upload.ask_sys_info(prosflasher.ports.create_serial(p, serial.PARITY_EVEN), cfg)
click.echo(repr(sys_info))
pass
@flasher_cli.command(short_help='List connected microcontrollers')
@default_cfg
def lsusb(cfg):
if len(prosflasher.ports.list_com_ports()) == 0 or prosflasher.ports.list_com_ports() is None:
click.echo('No serial ports found.')
else:
click.echo('Available Ports:')
click.echo(prosflasher.ports.create_port_list(cfg.verbosity > 0))
# @flasher_cli.command(name='dump-cortex', short_help='Dumps user flash contents to a specified file')
# @click.option('-v', '--verbose', is_flag=True)
# @click.argument('file', default=sys.stdout, type=click.File())
# def dump_cortex(file, verbose):
# pass
| purduesigbots/purdueros-cli | proscli/flasher.py | Python | bsd-3-clause | 8,643 | 0.003934 |
"""
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
import datetime
import logging
import math
import re
import zlib
from contextlib import contextmanager
from time import time
import pymongo
import pytz
import six
from six.moves import cPickle as pickle
from contracts import check, new_contract
from mongodb_proxy import autoretry_read
# Import this just to export it
from pymongo.errors import DuplicateKeyError # pylint: disable=unused-import
from xmodule.exceptions import HeartbeatFailure
from xmodule.modulestore import BlockData
from xmodule.modulestore.split_mongo import BlockKey
from xmodule.mongo_utils import connect_to_mongodb, create_collection_index
try:
from django.core.cache import caches, InvalidCacheBackendError
DJANGO_AVAILABLE = True
except ImportError:
DJANGO_AVAILABLE = False
new_contract('BlockData', BlockData)
log = logging.getLogger(__name__)
def get_cache(alias):
"""
Return cache for an `alias`
Note: The primary purpose of this is to mock the cache in test_split_modulestore.py
"""
return caches[alias]
def round_power_2(value):
"""
Return value rounded up to the nearest power of 2.
"""
if value == 0:
return 0
return math.pow(2, math.ceil(math.log(value, 2)))
class Tagger(object):
"""
An object used by :class:`QueryTimer` to allow timed code blocks
to add measurements and tags to the timer.
"""
def __init__(self, default_sample_rate):
self.added_tags = []
self.measures = []
self.sample_rate = default_sample_rate
def measure(self, name, size):
"""
Record a measurement of the timed data. This would be something to
indicate the size of the value being timed.
Arguments:
name: The name of the measurement.
size (float): The size of the measurement.
"""
self.measures.append((name, size))
def tag(self, **kwargs):
"""
Add tags to the timer.
Arguments:
**kwargs: Each keyword is treated as a tag name, and the
value of the argument is the tag value.
"""
self.added_tags.extend(list(kwargs.items()))
@property
def tags(self):
"""
Return all tags for this (this includes any tags added with :meth:`tag`,
and also all of the added measurements, bucketed into powers of 2).
"""
return [
'{}:{}'.format(name, round_power_2(size))
for name, size in self.measures
] + [
'{}:{}'.format(name, value)
for name, value in self.added_tags
]
class QueryTimer(object):
"""
An object that allows timing a block of code while also recording measurements
about that code.
"""
def __init__(self, metric_base, sample_rate=1):
"""
Arguments:
metric_base: The prefix to be used for all queries captured
with this :class:`QueryTimer`.
"""
self._metric_base = metric_base
self._sample_rate = sample_rate
@contextmanager
def timer(self, metric_name, course_context):
"""
Contextmanager which acts as a timer for the metric ``metric_name``,
but which also yields a :class:`Tagger` object that allows the timed block
of code to add tags and quantity measurements. Tags are added verbatim to the
timer output. Measurements are recorded as histogram measurements in their own,
and also as bucketed tags on the timer measurement.
Arguments:
metric_name: The name used to aggregate all of these metrics.
course_context: The course which the query is being made for.
"""
tagger = Tagger(self._sample_rate)
metric_name = "{}.{}".format(self._metric_base, metric_name)
start = time() # lint-amnesty, pylint: disable=unused-variable
try:
yield tagger
finally:
end = time() # lint-amnesty, pylint: disable=unused-variable
tags = tagger.tags
tags.append('course:{}'.format(course_context))
TIMER = QueryTimer(__name__, 0.01)
def structure_from_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a list [block_data] to a map
{BlockKey: block_data}.
Converts 'root' from [block_type, block_id] to BlockKey.
Converts 'blocks.*.fields.children' from [[block_type, block_id]] to [BlockKey].
N.B. Does not convert any other ReferenceFields (because we don't know which fields they are at this level).
Arguments:
structure: The document structure to convert
course_context (CourseKey): For metrics gathering, the CourseKey
for the course that this data is being processed for.
"""
with TIMER.timer('structure_from_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('seq[2]', structure['root'])
check('list(dict)', structure['blocks'])
for block in structure['blocks']:
if 'children' in block['fields']:
check('list(list[2])', block['fields']['children'])
structure['root'] = BlockKey(*structure['root'])
new_blocks = {}
for block in structure['blocks']:
if 'children' in block['fields']:
block['fields']['children'] = [BlockKey(*child) for child in block['fields']['children']]
new_blocks[BlockKey(block['block_type'], block.pop('block_id'))] = BlockData(**block)
structure['blocks'] = new_blocks
return structure
def structure_to_mongo(structure, course_context=None):
"""
Converts the 'blocks' key from a map {BlockKey: block_data} to
a list [block_data], inserting BlockKey.type as 'block_type'
and BlockKey.id as 'block_id'.
Doesn't convert 'root', since namedtuple's can be inserted
directly into mongo.
"""
with TIMER.timer('structure_to_mongo', course_context) as tagger:
tagger.measure('blocks', len(structure['blocks']))
check('BlockKey', structure['root'])
check('dict(BlockKey: BlockData)', structure['blocks'])
for block in six.itervalues(structure['blocks']):
if 'children' in block.fields:
check('list(BlockKey)', block.fields['children'])
new_structure = dict(structure)
new_structure['blocks'] = []
for block_key, block in six.iteritems(structure['blocks']):
new_block = dict(block.to_storable())
new_block.setdefault('block_type', block_key.type)
new_block['block_id'] = block_key.id
new_structure['blocks'].append(new_block)
return new_structure
class CourseStructureCache(object):
"""
Wrapper around django cache object to cache course structure objects.
The course structures are pickled and compressed when cached.
If the 'course_structure_cache' doesn't exist, then don't do anything for
for set and get.
"""
def __init__(self):
self.cache = None
if DJANGO_AVAILABLE:
try:
self.cache = get_cache('course_structure_cache')
except InvalidCacheBackendError:
pass
def get(self, key, course_context=None):
"""Pull the compressed, pickled struct data from cache and deserialize."""
if self.cache is None:
return None
with TIMER.timer("CourseStructureCache.get", course_context) as tagger:
try:
compressed_pickled_data = self.cache.get(key)
tagger.tag(from_cache=str(compressed_pickled_data is not None).lower())
if compressed_pickled_data is None:
# Always log cache misses, because they are unexpected
tagger.sample_rate = 1
return None
tagger.measure('compressed_size', len(compressed_pickled_data))
pickled_data = zlib.decompress(compressed_pickled_data)
tagger.measure('uncompressed_size', len(pickled_data))
if six.PY2:
return pickle.loads(pickled_data)
else:
return pickle.loads(pickled_data, encoding='latin-1')
except Exception: # lint-amnesty, pylint: disable=broad-except
# The cached data is corrupt in some way, get rid of it.
log.warning("CourseStructureCache: Bad data in cache for %s", course_context)
self.cache.delete(key)
return None
def set(self, key, structure, course_context=None):
"""Given a structure, will pickle, compress, and write to cache."""
if self.cache is None:
return None
with TIMER.timer("CourseStructureCache.set", course_context) as tagger:
pickled_data = pickle.dumps(structure, 4) # Protocol can't be incremented until cache is cleared
tagger.measure('uncompressed_size', len(pickled_data))
# 1 = Fastest (slightly larger results)
compressed_pickled_data = zlib.compress(pickled_data, 1)
tagger.measure('compressed_size', len(compressed_pickled_data))
# Stuctures are immutable, so we set a timeout of "never"
self.cache.set(key, compressed_pickled_data, None)
class MongoConnection(object):
"""
Segregation of pymongo functions from the data modeling mechanisms for split modulestore.
"""
def __init__(
self, db, collection, host, port=27017, tz_aware=True, user=None, password=None,
asset_collection=None, retry_wait_time=0.1, **kwargs # lint-amnesty, pylint: disable=unused-argument
):
"""
Create & open the connection, authenticate, and provide pointers to the collections
"""
# Set a write concern of 1, which makes writes complete successfully to the primary
# only before returning. Also makes pymongo report write errors.
kwargs['w'] = 1
self.database = connect_to_mongodb(
db, host,
port=port, tz_aware=tz_aware, user=user, password=password,
retry_wait_time=retry_wait_time, **kwargs
)
self.course_index = self.database[collection + '.active_versions']
self.structures = self.database[collection + '.structures']
self.definitions = self.database[collection + '.definitions']
def heartbeat(self):
"""
Check that the db is reachable.
"""
try:
# The ismaster command is cheap and does not require auth.
self.database.client.admin.command('ismaster')
return True
except pymongo.errors.ConnectionFailure:
raise HeartbeatFailure("Can't connect to {}".format(self.database.name), 'mongo') # lint-amnesty, pylint: disable=raise-missing-from
def get_structure(self, key, course_context=None):
"""
Get the structure from the persistence mechanism whose id is the given key.
This method will use a cached version of the structure if it is available.
"""
with TIMER.timer("get_structure", course_context) as tagger_get_structure:
cache = CourseStructureCache()
structure = cache.get(key, course_context)
tagger_get_structure.tag(from_cache=str(bool(structure)).lower())
if not structure:
# Always log cache misses, because they are unexpected
tagger_get_structure.sample_rate = 1
with TIMER.timer("get_structure.find_one", course_context) as tagger_find_one:
doc = self.structures.find_one({'_id': key})
if doc is None:
log.warning(
"doc was None when attempting to retrieve structure for item with key %s",
six.text_type(key)
)
return None
tagger_find_one.measure("blocks", len(doc['blocks']))
structure = structure_from_mongo(doc, course_context)
tagger_find_one.sample_rate = 1
cache.set(key, structure, course_context)
return structure
@autoretry_read()
def find_structures_by_id(self, ids, course_context=None):
"""
Return all structures that specified in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_structures_by_id", course_context) as tagger:
tagger.measure("requested_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({'_id': {'$in': ids}})
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_courselike_blocks_by_id(self, ids, block_type, course_context=None):
"""
Find all structures that specified in `ids`. Among the blocks only return block whose type is `block_type`.
Arguments:
ids (list): A list of structure ids
block_type: type of block to return
"""
with TIMER.timer("find_courselike_blocks_by_id", course_context) as tagger:
tagger.measure("requested_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find(
{'_id': {'$in': ids}},
{'blocks': {'$elemMatch': {'block_type': block_type}}, 'root': 1}
)
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_structures_derived_from(self, ids, course_context=None):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
with TIMER.timer("find_structures_derived_from", course_context) as tagger:
tagger.measure("base_ids", len(ids))
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({'previous_version': {'$in': ids}})
]
tagger.measure("structures", len(docs))
return docs
@autoretry_read()
def find_ancestor_structures(self, original_version, block_key, course_context=None):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
with TIMER.timer("find_ancestor_structures", course_context) as tagger:
docs = [
structure_from_mongo(structure, course_context)
for structure in self.structures.find({
'original_version': original_version,
'blocks': {
'$elemMatch': {
'block_id': block_key.id,
'block_type': block_key.type,
'edit_info.update_version': {
'$exists': True,
},
},
},
})
]
tagger.measure("structures", len(docs))
return docs
def insert_structure(self, structure, course_context=None):
"""
Insert a new structure into the database.
"""
with TIMER.timer("insert_structure", course_context) as tagger:
tagger.measure("blocks", len(structure["blocks"]))
self.structures.insert_one(structure_to_mongo(structure, course_context))
def get_course_index(self, key, ignore_case=False):
"""
Get the course_index from the persistence mechanism whose id is the given key
"""
with TIMER.timer("get_course_index", key):
if ignore_case:
query = {
key_attr: re.compile(u'^{}$'.format(re.escape(getattr(key, key_attr))), re.IGNORECASE)
for key_attr in ('org', 'course', 'run')
}
else:
query = {
key_attr: getattr(key, key_attr)
for key_attr in ('org', 'course', 'run')
}
return self.course_index.find_one(query)
def find_matching_course_indexes(
self,
branch=None,
search_targets=None,
org_target=None,
course_context=None,
course_keys=None
):
"""
Find the course_index matching particular conditions.
Arguments:
branch: If specified, this branch must exist in the returned courses
search_targets: If specified, this must be a dictionary specifying field values
that must exist in the search_targets of the returned courses
org_target: If specified, this is an ORG filter so that only course_indexs are
returned for the specified ORG
"""
with TIMER.timer("find_matching_course_indexes", course_context):
query = {}
if course_keys:
courses_queries = self._generate_query_from_course_keys(branch, course_keys)
query['$or'] = courses_queries
else:
if branch is not None:
query['versions.{}'.format(branch)] = {'$exists': True}
if search_targets:
for key, value in six.iteritems(search_targets):
query['search_targets.{}'.format(key)] = value
if org_target:
query['org'] = org_target
return self.course_index.find(query)
def _generate_query_from_course_keys(self, branch, course_keys):
"""
Generate query for courses using course keys
"""
courses_queries = []
query = {}
if branch:
query = {'versions.{}'.format(branch): {'$exists': True}}
for course_key in course_keys:
course_query = {
key_attr: getattr(course_key, key_attr)
for key_attr in ('org', 'course', 'run')
}
course_query.update(query)
courses_queries.append(course_query)
return courses_queries
def insert_course_index(self, course_index, course_context=None):
"""
Create the course_index in the db
"""
with TIMER.timer("insert_course_index", course_context):
course_index['last_update'] = datetime.datetime.now(pytz.utc)
self.course_index.insert_one(course_index)
def update_course_index(self, course_index, from_index=None, course_context=None):
"""
Update the db record for course_index.
Arguments:
from_index: If set, only update an index if it matches the one specified in `from_index`.
"""
with TIMER.timer("update_course_index", course_context):
if from_index:
query = {"_id": from_index["_id"]}
# last_update not only tells us when this course was last updated but also helps
# prevent collisions
if 'last_update' in from_index:
query['last_update'] = from_index['last_update']
else:
query = {
'org': course_index['org'],
'course': course_index['course'],
'run': course_index['run'],
}
course_index['last_update'] = datetime.datetime.now(pytz.utc)
self.course_index.replace_one(query, course_index, upsert=False,)
def delete_course_index(self, course_key):
"""
Delete the course_index from the persistence mechanism whose id is the given course_index
"""
with TIMER.timer("delete_course_index", course_key):
query = {
key_attr: getattr(course_key, key_attr)
for key_attr in ('org', 'course', 'run')
}
return self.course_index.remove(query)
def get_definition(self, key, course_context=None):
"""
Get the definition from the persistence mechanism whose id is the given key
"""
with TIMER.timer("get_definition", course_context) as tagger:
definition = self.definitions.find_one({'_id': key})
tagger.measure("fields", len(definition['fields']))
tagger.tag(block_type=definition['block_type'])
return definition
def get_definitions(self, definitions, course_context=None):
"""
Retrieve all definitions listed in `definitions`.
"""
with TIMER.timer("get_definitions", course_context) as tagger:
tagger.measure('definitions', len(definitions))
definitions = self.definitions.find({'_id': {'$in': definitions}})
return definitions
def insert_definition(self, definition, course_context=None):
"""
Create the definition in the db
"""
with TIMER.timer("insert_definition", course_context) as tagger:
tagger.measure('fields', len(definition['fields']))
tagger.tag(block_type=definition['block_type'])
self.definitions.insert_one(definition)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
create_collection_index(
self.course_index,
[
('org', pymongo.ASCENDING),
('course', pymongo.ASCENDING),
('run', pymongo.ASCENDING)
],
unique=True,
background=True
)
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.database.client.close()
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
connection = self.database.client
if database:
connection.drop_database(self.database.name)
elif collections:
self.course_index.drop()
self.structures.drop()
self.definitions.drop()
else:
self.course_index.remove({})
self.structures.remove({})
self.definitions.remove({})
if connections:
connection.close()
| stvstnfrd/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/mongo_connection.py | Python | agpl-3.0 | 23,649 | 0.002241 |
from models import *
donation = Donation()
donor = Donor()
donor.first_name = "FirstName"
donor.last_name = "LastName"
print(donor)
| kylebegovich/ICIdo | mainsite/temp.py | Python | mit | 138 | 0.007246 |
import Orange
import logging
import random
from discretization import *
from FeatureSelector import *
from utils import *
from sklearn import svm
from sklearn import cross_validation
from sklearn.metrics import f1_score, precision_recall_fscore_support
from sklearn.feature_extraction import DictVectorizer
import numpy as np
# Vars
testsetPercentage = .2
validationsetPercentage = .3
progress = False
baseline = .9496
# Utilities
logging.basicConfig(filename='main.log',level=logging.DEBUG,format='%(levelname)s\t%(message)s')
def logmessage(message, color):
print color(message)
logging.info(message)
def copyDataset(dataset):
return Orange.data.Table(dataset)
# Compute S Threshold
# =============================================================================
boxmessage("Start", warning)
data = Orange.data.Table("dataset.tab")
data.randomGenerator = Orange.orange.RandomGenerator(random.randint(0, 10))
logmessage("Main Dataset Loaded", success)
# =============================================================================
# Extracts Test Set
boxmessage("Extracting Test Set and Working Set", info)
testSet = None
workingSet = None
if progress:
try:
with open("finaltestset.tab"):
logmessage("Final Test Set found", info)
with open("trainingset.tab"):
logmessage("Working Set found", info)
testSet = Orange.data.Table("finaltestset.tab")
workingSet = Orange.data.Table("trainingset.tab")
except IOError:
logmessage("IOError in loading final and working sets", error)
pass
else:
selection = Orange.orange.MakeRandomIndices2(data, testsetPercentage)
testSet = data.select(selection, 0)
testSet.save("finaltestset.tab")
workingSet = data.select(selection, 1)
workingSet.save("workingset.tab")
print success("Extraction performed")
print info("Test Instances: %s" % len(testSet))
print info("Training + Validation Instances: %s" % len(workingSet))
# =============================================================================
# Starts Iterations
K = 1
S = 0
C = 0
boxmessage("Starting main Loop", info)
#while(performanceIncrease):
# Split
if not progress:
info("Splitting Working Dataset for training and validation (70-30)")
selection = Orange.orange.MakeRandomIndices2(workingSet, validationsetPercentage)
validationSet = workingSet.select(selection, 0)
trainingSet = workingSet.select(selection, 1)
trainingSet.save("trainingset.tab")
validationSet.save("validationset.tab")
else:
validationSet = Orange.data.Table("validationset.tab")
trainingSet = Orange.data.Table("trainingset.tab")
# Discretization
ds = Discretizer(trainingSet, K, logging)
if progress:
try:
with open("discretizer.K.gains"):
print info("Loading Previous Iteration")
ds.load()
except IOError:
logmessage("IOError in loading found gains", error)
pass
else:
ds.findThresholds()
if progress:
try:
with open("discretized.tab"):
trainingSet = Orange.data.Table("discretized.tab")
print info("Discretized Dataset Loaded")
except IOError:
logmessage("IOError in loading discretized training dataset", error)
else:
trainingSet = ds.discretizeDataset(trainingSet)
trainingSet.save("discretized.tab")
# ============================================================================ #
# Feature Selection
fs = FeatureSelector()
if progress:
try:
with open("featureselected.tab"):
trainingSet = Orange.data.Table("featureselected.tab")
print info("Features Selected Dataset Loaded")
except IOError:
fs.computeThreshold(trainingSet)
fs.save()
trainingSet = fs.select(trainingSet)
trainingSet.save("featureselected.tab")
print info("New training dataset is %s" %len(trainingSet))
print info("New training dataset features are %s" % len(trainingSet[0]))
# Model Training
# Convert Train Dataset
# Apply transformation, from labels to you know what I mean
converted_train_data = ([[ d[f].value for f in trainingSet.domain if f != trainingSet.domain.class_var] for d in trainingSet])
converted_train_data = [dict(enumerate(d)) for d in converted_train_data]
vector = DictVectorizer(sparse=False)
converted_train_data = vector.fit_transform(converted_train_data)
converted_train_targets = ([ 0 if d[trainingSet.domain.class_var].value == 'ALL' else 1 for d in trainingSet ])
clf = svm.SVC(kernel='linear')
clf.fit(converted_train_data, converted_train_targets)
logmessage("Model learnt", success)
# Performances
# Apply Discretization and feature selection to validation set
validationSet = ds.discretizeDataset(validationSet)
validationSet = fs.select(validationSet)
logmessage("Validation set length is %s" % len(validationSet), info)
logmessage("Validation feature length is %s" % len(validationSet[0]), info)
# Convert Test Dataset
converted_test_data = ([[ d[f].value for f in validationSet.domain if f != validationSet.domain.class_var] for d in validationSet])
converted_test_data = [dict(enumerate(d)) for d in converted_test_data]
converted_test_data = vector.fit_transform(converted_test_data)
converted_test_targets = ([0 if d[validationSet.domain.class_var].value == 'ALL' else 1 for d in validationSet ])
logmessage("Starting Prediction Task", info)
prediction = clf.predict(converted_test_data)
p, r, f1, support = precision_recall_fscore_support(converted_test_targets, prediction)
f1_avg = np.average(f1)
logmessage("Average F1(Over 2 classes): %s" % f1_avg, info)
if f1_avg > baseline:
logmessage("Performance Increased", success)
logmessage("Using K: %s, S: %s, C: default" % (ds.K, fs.threshold), info)
else:
logmessage("Performance Decreased", error)
# =============================================================================
# Final Test
| Sh1n/AML-ALL-classifier | main.py | Python | gpl-2.0 | 5,647 | 0.015583 |
#
# __init__.py
#
# Copyright (C) 2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Module for board specific settings
#
import importlib
import re
import pkgutil
from kano.logging import logger
from kano.utils.hardware import RPI_1_CPU_PROFILE, get_board_property, \
get_rpi_model
__author__ = 'Kano Computing Ltd.'
__email__ = 'dev@kano.me'
def get_board_props(board_name=None):
if not board_name:
board_name = get_rpi_model()
cpu_profile = get_board_property(board_name, 'cpu_profile')
if not cpu_profile:
cpu_profile = RPI_1_CPU_PROFILE
board_module = re.sub(r'[-/ ]', '_', cpu_profile).lower()
try:
board = importlib.import_module(
'{}.{}'.format(__name__, board_module)
)
except ImportError:
logger.error('Board not found')
return None
required_props = ['CLOCKING', 'DEFAULT_CONFIG']
for prop in required_props:
if not hasattr(board, prop):
logger.error('No {} data in board config'
.format(prop.replace('_', ' ').lower()))
return None
# TODO: Validate board info
return board
| KanoComputing/kano-settings | kano_settings/system/boards/__init__.py | Python | gpl-2.0 | 1,199 | 0 |
import mock
from pantsmud.driver import hook
from spacegame.core import hook_types
from spacegame.universe.star_system import StarSystem
from spacegame.universe.universe import Universe
from tests.unit.util import UnitTestCase
class StarSystemUnitTestCase(UnitTestCase):
def setUp(self):
UnitTestCase.setUp(self)
self.hook_star_system_reset = mock.MagicMock()
self.hook_star_system_reset.__name__ = 'hook_star_system_reset'
hook.add(hook_types.STAR_SYSTEM_RESET, self.hook_star_system_reset)
self.star_system = StarSystem()
self.star_system.reset_interval = 10
def test_links(self):
u = Universe()
s1 = StarSystem()
u.add_star_system(s1)
s2 = StarSystem()
u.add_star_system(s2)
s1.link_uuids.add(s2.uuid)
self.assertEqual({s2}, s1.links)
self.assertEqual(set(), s2.links)
def test_change_reset_interval_from_negative_updates_reset_timer(self):
self.star_system.reset_interval = -1
self.star_system.reset_timer = -1
self.star_system.reset_interval = 10
self.assertEqual(self.star_system.reset_timer, 10)
def test_change_reset_interval_with_reset_timer_below_one_updates_reset_timer(self):
self.star_system.reset_timer = 0
self.star_system.reset_interval = 5
self.assertEqual(self.star_system.reset_timer, 5)
def test_reduce_reset_interval_below_reset_timer_updates_reset_timer(self):
self.star_system.reset_interval = 10
self.star_system.reset_timer = 10
self.star_system.reset_interval = 5
self.assertEqual(self.star_system.reset_timer, 5)
def test_increase_reset_interval_above_reset_timer_does_not_change_reset_timer(self):
self.star_system.reset_timer = 10
self.star_system.reset_interval = 20
self.assertEqual(self.star_system.reset_timer, 10)
def test_force_reset_resets_reset_timer(self):
self.star_system.force_reset()
self.assertEqual(self.star_system.reset_timer, self.star_system.reset_interval)
def test_force_reset_calls_hook_star_system_reset(self):
self.star_system.force_reset()
self.hook_star_system_reset.assert_called()
def test_force_reset_with_negative_reset_interval_calls_hook_star_system_reset(self):
self.star_system.reset_interval = -1
self.star_system.force_reset()
self.hook_star_system_reset.assert_called()
def test_pulse_with_reset_timer_above_one_does_not_call_hook_star_system_reset(self):
self.star_system.reset_timer = 2
self.star_system.pulse()
self.hook_star_system_reset.assert_not_called()
def test_pulse_with_reset_timer_at_one_calls_hook_star_system_reset(self):
self.star_system.reset_timer = 1
self.star_system.pulse()
self.hook_star_system_reset.assert_called()
def test_pulse_with_reset_timer_below_one_does_not_call_hook_star_system_reset(self):
self.star_system.reset_timer = 0
self.star_system.pulse()
self.hook_star_system_reset.assert_not_called()
def test_pulse_with_reset_timer_above_one_decrements_reset_timer(self):
self.star_system.reset_timer = 2
self.star_system.pulse()
self.assertEqual(self.star_system.reset_timer, 1)
def test_pulse_with_reset_timer_at_one_resets_reset_timer(self):
self.star_system.reset_timer = 1
self.star_system.pulse()
self.assertEqual(self.star_system.reset_timer, self.star_system.reset_interval)
def test_pulse_with_reset_timer_at_zero_decrements_reset_timer(self):
self.star_system.reset_timer = 0
self.star_system.pulse()
self.assertEqual(self.star_system.reset_timer, -1)
def test_pulse_with_reset_timer_below_zero_does_not_change_reset_timer(self):
self.star_system.reset_timer = -1
self.star_system.pulse()
self.assertEqual(self.star_system.reset_timer, -1)
| ecdavis/spacegame | tests/unit/test_star_system.py | Python | apache-2.0 | 3,976 | 0.002012 |
# -*- coding: utf8 -*-
class Mv:
def command(self):
self.config = {
"command": {
"mv": {
"function": self.mvScreams,
"usage": "mv <user>",
"help": "Le clavier y colle!"
}
}}
return self.config
def mvScreams(self, Morphux, infos):
print(infos)
if (len(infos['args']) == 0 and infos['nick'] == "valouche"):
Morphux.sendMessage("Ta mere la chauve", infos['nick'])
elif (len(infos['args']) == 0 and infos['nick'] == "Ne02ptzero"):
Morphux.sendMessage("TU VAS LA CHIER TA CHIASSE?", infos['nick'])
elif (len(infos['args']) == 0):
Morphux.sendMessage("SARACE BOULBA", infos['nick'])
elif (infos['args'][0] == "allow"):
Morphux.sendMessage("ALLOW?", infos['nick'])
elif (infos['args'][0] == "thunes"):
Morphux.sendMessage("Money equals power", infos['nick'])
elif (infos['args'][0] == "theodule"):
Morphux.sendMessage("THEODUUULE", infos['nick'])
elif (infos['args'][0] == "gg"):
Morphux.sendMessage("Le beau jeu, le beau geste, la lucidité !", infos['nick'])
elif (Morphux.userExists(infos['args'][0]) == 0):
Morphux.sendMessage("Respecte toi " + infos['args'][0] + "!", infos['nick'])
| Morphux/IRC-Bot | modules/mv/mv.py | Python | gpl-2.0 | 1,360 | 0.001472 |
#!/usr/bin/env python3
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import zipfile
import os
import sys
def _zip_dir(path, zip_file, prefix):
path = path.rstrip('/\\')
for root, dirs, files in os.walk(path):
for file in files:
zip_file.write(os.path.join(root, file), os.path.join(
root.replace(path, prefix), file))
def main(args):
zip_file = zipfile.ZipFile(args.output, 'w', zipfile.ZIP_DEFLATED)
for path, archive_name in args.input_pairs:
if os.path.isdir(path):
_zip_dir(path, zip_file, archive_name)
else:
zip_file.write(path, archive_name)
zip_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='This script creates zip files.')
parser.add_argument('-o', dest='output', action='store',
help='The name of the output zip file.')
parser.add_argument('-i', dest='input_pairs', nargs=2, action='append',
help='The input file and its destination location in the zip archive.')
sys.exit(main(parser.parse_args()))
| jamesr/sky_engine | build/zip.py | Python | bsd-3-clause | 1,167 | 0.011997 |
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
is_modified = False
for i in xrange(len(nums) - 1):
if nums[i] > nums[i+1]:
if is_modified:
return False
else:
if i == 0 or nums[i-1] <= nums[i+1]:
nums[i] = nums[i+1]
else:
nums[i+1] = nums[i]
is_modified = True
return True
| Chasego/cod | leetcode/665-Non-decreasing-Array/NonDecreasingArr.py | Python | mit | 567 | 0.001764 |
from setuptools import setup ; setup()
| karpierz/libpcap | setup.py | Python | bsd-3-clause | 39 | 0.051282 |
#!/usr/bin/env python
"""
Prototype to DOT (Graphviz) converter by Dario Gomez
Table format from django-extensions
"""
from protoExt.utils.utilsBase import Enum, getClassName
from protoExt.utils.utilsConvert import slugify2
class GraphModel():
def __init__(self):
self.tblStyle = False
self.dotSource = 'digraph Sm {'
self.dotSource += 'fontname="Helvetica";fontsize = 8;'
self.GRAPH_LEVEL = Enum(['all', 'essential', 'required' , 'primary', 'title'])
self.GRAPH_FORM = Enum(['orf', 'erf', 'drn'])
if self.tblStyle:
self.dotSource += 'node [shape="plaintext"];\n'
self.tblTitle = '\n{0} [label=<<TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0" style="width:100px"><TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4"> <FONT FACE="Helvetica Bold" COLOR="white">{1}</FONT> </TD></TR>'
self.tblField = '\n<TR><TD ALIGN="LEFT" BORDER="0"><FONT FACE="Helvetica {2}">{0}</FONT></TD><TD ALIGN="LEFT"><FONT FACE="Helvetica {2}">{1}</FONT></TD></TR>'
else:
# Animal [label = "{{{1}|+ name : string\l+ age : int\l|+ die() : void\l}"]
self.dotSource += 'rankdir = BT;node [shape=record,width=0,height=0,concentrate=true];\n'
self.tblRecord = '\n{0} [label = "{{{1}|'
self.lnkComposition = '[dir=both,arrowhead=diamond,arrowtail=none]\n'
self.lnkAgregation = '[dir=both,arrowhead=ediamond,arrowtail=none]\n'
self.lnkNoCascade = '[dir=both,arrowhead=diamondtee,arrowtail=none]\n'
self.lnkHeritage = '[dir=both,arrowhead=empty,arrowtail=none]\n'
self.lnkER = '[dir=both,arrowhead=none,arrowtail=invempty]\n'
def getDiagramDefinition(self, diagramSet):
self.diagrams = []
self.entities = []
for pDiag in diagramSet:
gDiagram = {
'code': getClassName(pDiag.code) ,
'label': slugify2( pDiag.code ),
'clusterName': slugify2( getattr(pDiag, 'title', pDiag.code)),
'graphLevel' : getattr(pDiag, 'graphLevel' , self.GRAPH_LEVEL.all),
'graphForm' : getattr(pDiag, 'graphForm' , self.GRAPH_FORM.orf),
'showPrpType': getattr(pDiag, 'showPrpType' , False),
'showBorder' : getattr(pDiag, 'showBorder' , False),
'showFKey' : getattr(pDiag, 'showFKey' , False),
'prefix' : slugify2( getattr(pDiag, 'prefix' , '')),
'entities': []
}
for pDiagEntity in pDiag.diagramentity_set.all():
pEntity = pDiagEntity.entity
enttCode = self.getEntityCode(pEntity.code, gDiagram.get('prefix'))
# Si ya se encuentra en otro diagrama no la dibuja
if enttCode in self.entities:
continue
self.entities.append(enttCode)
gEntity = {
'code': enttCode,
'fields': [],
'relations': []
}
for pProperty in pEntity.property_set.all():
pptCode = slugify2(pProperty.code, '_')
if pProperty.isForeign:
pLinkTo = self.getEntityCode(pProperty.relationship.refEntity.code, gDiagram.get('prefix'))
gEntity['relations'].append({
'code': pptCode,
'linkTo': pLinkTo,
'primary': pProperty.isPrimary,
'required': pProperty.isRequired,
'essential': pProperty.isEssential,
'foreign': True
})
else:
pType = slugify2(pProperty.baseType , '_')
gEntity['fields'].append({
'code': pptCode,
'type': pType or 'string',
'primary': pProperty.isPrimary,
'required': pProperty.isRequired,
'essential': pProperty.isEssential,
'foreign': False
})
gDiagram['entities'].append(gEntity)
self.diagrams.append(gDiagram)
def generateDotModel(self):
# Dibuja las entidades
for gDiagram in self.diagrams:
if gDiagram.get('graphLevel') < self.GRAPH_LEVEL.title :
self.dotSource += '\nsubgraph cluster_{0} {{'.format(gDiagram.get('code'))
if not gDiagram.get('showBorder', False) :
self.dotSource += 'style=dotted;'
if len(gDiagram.get('label', '')) > 0:
self.dotSource += 'label="{}";'.format(gDiagram.get('label', ''))
for gEntity in gDiagram['entities']:
self.entity2dot(gDiagram, gEntity)
self.dotSource += '}\n'
# Dibuja los vinculos
for gDiagram in self.diagrams:
for gEntity in gDiagram['entities']:
self.link2dot(gEntity, gDiagram.get( 'showFKey'))
self.dotSource += '}'
# Dibuja las relaciones
# for gDiagram in self.diagrams:
# for relation in gEntity['relations']:
# if relation['target'] in nodes:
# relation['needs_node'] = False
return self.dotSource
def link2dot(self, gEntity, showFKey):
for gLink in gEntity['relations']:
pEntity = gEntity.get('code')
pLinkTo = gLink.get('linkTo')
if ( not showFKey ) and ( pLinkTo not in self.entities ):
continue
self.dotSource += '{0} -> {1} '.format(pEntity, pLinkTo) + self.lnkComposition
def entity2dot(self, gDiagram, gEntity):
if self.tblStyle:
enttTable = self.tblTitle.format(gEntity.get('code'), gEntity.get('label', gEntity.get('code')))
else:
enttRecord = self.tblRecord.format(gEntity.get('code'), gEntity.get('label', gEntity.get('code')))
# 0 : colName; 1 : baseType; 2 : Bold / Italic
for gField in gEntity['fields'] + gEntity['relations'] :
if gDiagram.get('showPrpType') :
sPrpType = gField.get('type', ' ')
else : sPrpType = ' '
sPk = ''
fildLv = 0
diagLv = gDiagram.get('graphLevel')
if gField.get('primary') :
fildLv = self.GRAPH_LEVEL.primary
sPk = 'Bold'
elif gField.get('required'):
fildLv = self.GRAPH_LEVEL.required
elif gField.get('essential'):
fildLv = self.GRAPH_LEVEL.essential
# Si no alcanza el nivel
if fildLv >= diagLv:
sFk = ''
if gField.get('foreign'):
sFk = ' Italic'
if self.tblStyle:
enttTable += self.tblField.format(gField.get('code'), sPrpType, sPk + sFk)
else:
if len(sPk) > 0:
sPk = '*'
if len(sFk) > 0:
sPk += '+'
if len(sPk) > 0:
sPk += ' '
if len(sPrpType) > 1:
sPrpType = ': ' + sPrpType
enttRecord += '{2}{0}{1}\l'.format(gField.get('code'), sPrpType, sPk)
if self.tblStyle:
enttTable += '</TABLE>>]\n'
else:
enttRecord += '}"]\n'
# self.dotSource += enttTable
self.dotSource += enttRecord
def getEntityCode(self, code, prefix):
# Formatea el nombre de la entidad
enttCode = code.lower()
prefix = prefix or ''
if len(prefix) and enttCode.startswith(prefix.lower()):
enttCode = enttCode[len(prefix):]
return getClassName(enttCode)
| DarioGT/docker-carra | src/prototype/actions/graphModel.py | Python | mit | 8,442 | 0.014215 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import unittest
from bs4 import BeautifulSoup
from compute.code import CodeExtractor
logging.basicConfig(level=logging.INFO, format="%(message)s")
class ExtractCodeTest(unittest.TestCase):
def setUp(self):
self.code_extractor = CodeExtractor()
def _extract_code(self, document):
return self.code_extractor.extract(document)
def _make_document_with_body(self, body):
return BeautifulSoup('\n'.join([
"<html>",
" <body>",
body,
" </body>",
"</html>",
]), 'html.parser')
def test_extract_valid_javascript(self):
document = self._make_document_with_body("<code>var i = 0;</code")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], "var i = 0;")
def test_extract_valid_javascript_with_padding(self):
# In the past, some parsers I have used have had trouble parsing with whitespace
# surrounding the parsed content. This is a sanity test to make sure that the
# backend parser will still detect JavaScript padded with whitespace.
document = self._make_document_with_body("<code>\n\n\t var i = 0;\t \n</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(snippets[0], "\n\n\t var i = 0;\t \n")
def test_extract_valid_multiline_javascript(self):
document = self._make_document_with_body('\n'.join([
"<code>for (var i = 0; i < 2; i++) {",
" console.log(\"Hello, world!\");",
"}</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(snippets[0], '\n'.join([
"for (var i = 0; i < 2; i++) {",
" console.log(\"Hello, world!\");",
"}",
]))
def test_extract_multiple_blocks(self):
document = self._make_document_with_body('\n'.join([
"<code>var i = 0;</code>",
"<code>i = i + 1;</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 2)
self.assertIn("var i = 0;", snippets)
self.assertIn("i = i + 1;", snippets)
def test_fail_to_detect_text_in_code_block(self):
document = self._make_document_with_body("<code>This is a plain English sentence.</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
def test_fail_to_detect_command_line(self):
document = self._make_document_with_body("<code>npm install package</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
def test_skip_whitespace_only(self):
document = self._make_document_with_body("<code>\t \n</code>")
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 0)
# In practice I don't expect the next two scenarios to come up. But the expected behavior of
# the code extractor is to scan children of all nodes that are marked as invalid. This
# test makes sure that functionality is correct.
def test_skip_child_of_code_block_parent(self):
document = self._make_document_with_body('\n'.join([
"<code>",
"var outer = 0;",
"<code>var inner = 1;</code>",
"</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], '\n'.join([
"",
"var outer = 0;",
"var inner = 1;",
"",
]))
def test_detect_code_block_nested_inside_invalid_code_block(self):
document = self._make_document_with_body('\n'.join([
"<code>",
" This plaintext invalidates this block as a whole.",
" <code>var i = 0; // But this child will be valid</code>",
"</code>",
]))
snippets = self.code_extractor.extract(document)
self.assertEqual(len(snippets), 1)
self.assertEqual(snippets[0], "var i = 0; // But this child will be valid")
| andrewhead/Package-Qualifiers | tests/compute/test_compute_code.py | Python | mit | 4,317 | 0.001853 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-19 00:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20160218_2359'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kind', models.CharField(choices=[('E', 'Email'), ('P', 'Phone')], max_length=1)),
('value', models.CharField(max_length=255)),
('speaker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Speaker')),
],
),
]
| Golker/wttd | eventex/core/migrations/0003_contact.py | Python | mit | 822 | 0.00365 |
import unittest
import sys
import os
sys.path.append(os.environ.get("PROJECT_ROOT_DIRECTORY", "."))
from moduledependency.tokeniser import Token, Tokeniser
class TestToken(unittest.TestCase):
def test_construction(self):#
# Test with invalid token type
with self.assertRaises(ValueError):
Token("HAHAHAHAHA")
# Test with valid token types (one with value and one without)
token = Token("identifier", "testVariable")
self.assertEqual(token.type, "identifier")
self.assertEqual(token.value, "testVariable")
token = Token("from")
self.assertEqual(token.type, "from")
self.assertEqual(token.value, "from")
class TestTokeniser(unittest.TestCase):
def setUp(self):
self.tokeniser = Tokeniser()
# Create test data
self.noImportSource = """
def testFunction(x):
\"\"\"This is a docstring but I'm not sure
how far it goes.
\"\"\"
return x * 2
\'\'\'Another multi
line string\'\'\'
'test'
something = [ "hello" ]
"""
self.importSource = """#comment here
import a
from a import b
from c import *
from d import e, f
from g import dummy, *
from . import h
from . import i, j
from .k import l
from .m import *
from .n import o.p
from .q import another_dummy, *
class DummyClass:
def something():
# Hello World!
from sys import path # test
print(path)
def somethingEntirelyDifferent():
import bang
bang.start()
"""
self.noImportTokens = [
Token("identifier", "def"), Token("identifier", "testFunction"),
Token("other", "("), Token("identifier", "x"), Token("other", ")"), Token("other", ":"),
Token("identifier", "return"), Token("identifier", "x"),
Token("*"), Token("other", "2"), Token("identifier", "something"),
Token("other", "="), Token("other", "["), Token("other", "]"),
]
self.importTokens = [
Token("import"), Token("identifier", "a"),
Token("from"), Token("identifier", "a"), Token("import"), Token("identifier", "b"),
Token("from"), Token("identifier", "c"), Token("import"), Token("*"),
Token("from"), Token("identifier", "d"), Token("import"), Token("identifier", "e"), Token(","), Token("identifier", "f"),
Token("from"), Token("identifier", "g"), Token("import"), Token("identifier", "dummy"), Token(","), Token("*"),
Token("from"), Token("."), Token("import"), Token("identifier", "h"),
Token("from"), Token("."), Token("import"), Token("identifier", "i"), Token(","), Token("identifier", "j"),
Token("from"), Token("."), Token("identifier", "k"), Token("import"), Token("identifier", "l"),
Token("from"), Token("."), Token("identifier", "m"), Token("import"), Token("*"),
Token("from"), Token("."), Token("identifier", "n"), Token("import"), Token("identifier", "o"), Token("."), Token("identifier", "p"),
Token("from"), Token("."), Token("identifier", "q"), Token("import"), Token("identifier", "another_dummy"), Token(","), Token("*"),
Token("identifier", "class"), Token("identifier", "DummyClass"), Token("other", ":"),
Token("identifier", "def"), Token("identifier", "something"), Token("other", "("), Token("other", ")"), Token("other", ":"),
Token("from"), Token("identifier", "sys"), Token("import"), Token("identifier", "path"),
Token("identifier", "print"), Token("other", "("), Token("identifier", "path"), Token("other", ")"),
Token("identifier", "def"), Token("identifier", "somethingEntirelyDifferent"), Token("other", "("), Token("other", ")"), Token("other", ":"),
Token("import"), Token("identifier", "bang"),
Token("identifier", "bang"), Token("."), Token("identifier", "start"), Token("other", "("), Token("other", ")")
]
def tearDown(self):
self.tokeniser = None
self.noImportSource = None
self.importSource = None
self.noImportTokens = None
self.importTokens = None
def test_tokenise(self):
# Test with invalid type
with self.assertRaises(TypeError):
self.tokeniser.tokenise(3636)
# Test with empty Python source code
self.assertEqual(self.tokeniser.tokenise(""), [])
# Test with source code that has no imports
self.assertEqual(self.tokeniser.tokenise(self.noImportSource), self.noImportTokens)
# Test with source code that has imports
self.assertEqual(self.tokeniser.tokenise(self.importSource), self.importTokens)
# Test with source that ends STRAIGHT after import
self.assertEqual(self.tokeniser.tokenise("from . import pack"),
[ Token("from"), Token("."), Token("import"), Token("identifier", "pack") ])
def test_skipComment(self):
# First element of tuple is the index to start skipping from
# and the second element is the desired end element
TEST_SOURCE = """#comment at the start
hello = 5 # comment at the end of a thing
# # # # nestetd comment
"""
TEST_INDICES = [ (0, 21), (31, 65), (66, 91) ]
for test in TEST_INDICES:
self.tokeniser.clear()
self.tokeniser.source = TEST_SOURCE
self.tokeniser.index = test[0]
self.tokeniser.skipComment()
self.assertEqual(self.tokeniser.index, test[1])
def test_skipString(self):
# Contains tuples where the first element is the index of
# the character the test should start at, the second
# element is where the tokeniser should stop skipping and
# the third element is the delimiter of the test string
TEST_INDICES = [
(31, 8, "\""),
(51, 7, "\'"),
(70, 24, "\"\"\""),
(106, 38, "'''"),
(155, 14, "\"")
]
# Set the source code that will be used for comment skipping
TEST_SOURCE = """#comment at the start
test = "hel\\"lo"
test2 = 'el\\'lo'
test3 = \"\"\""hello"
multiline\"\"\"
test4 = '''can be multiline but 'test' isn't'''
no_end=" ijruiytie
"""
for test in TEST_INDICES:
self.tokeniser.clear()
self.tokeniser.source = TEST_SOURCE
self.tokeniser.index = test[0]
self.tokeniser.skipString(test[2])
self.assertEqual(self.tokeniser.index, test[0] + test[1]) | DonaldWhyte/module-dependency | tests/test_tokeniser.py | Python | mit | 6,029 | 0.026373 |
import logging
import os
import json
import shutil
import sys
import datetime
import csv, math
from tld import get_tld
from collections import OrderedDict
from utils import Util
from components.data.data import Data
from components.iana.iana_transform import IanaTransform
from components.nc.network_context import NetworkContext
from multiprocessing import Process
import pandas as pd
import time
class OA(object):
def __init__(self,date,limit=500,logger=None):
self._initialize_members(date,limit,logger)
def _initialize_members(self,date,limit,logger):
# get logger if exists. if not, create new instance.
self._logger = logging.getLogger('OA.DNS') if logger else Util.get_logger('OA.DNS',create_file=False)
# initialize required parameters.
self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
self._date = date
self._table_name = "dns"
self._dns_results = []
self._limit = limit
self._data_path = None
self._ipynb_path = None
self._ingest_summary_path = None
self._dns_scores = []
self._dns_scores_headers = []
self._results_delimiter = '\t'
self._details_limit = 250
# get app configuration.
self._spot_conf = Util.get_spot_conf()
# get scores fields conf
conf_file = "{0}/dns_conf.json".format(self._scrtip_path)
self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)
# initialize data engine
self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '')
self._engine = Data(self._db,self._table_name ,self._logger)
def start(self):
####################
start = time.time()
####################
self._create_folder_structure()
self._add_ipynb()
self._get_dns_results()
self._add_tld_column()
self._add_reputation()
self._add_hh_and_severity()
self._add_iana()
self._add_network_context()
self._create_dns_scores_csv()
self._get_oa_details()
self._ingest_summary()
##################
end = time.time()
print(end - start)
##################
def _create_folder_structure(self):
# create date folder structure if it does not exist.
self._logger.info("Creating folder structure for OA (data and ipynb)")
self._data_path,self._ingest_summary_path,self._ipynb_path = Util.create_oa_folders("dns",self._date)
def _add_ipynb(self):
if os.path.isdir(self._ipynb_path):
self._logger.info("Adding edge investigation IPython Notebook")
shutil.copy("{0}/ipynb_templates/Edge_Investigation_master.ipynb".format(self._scrtip_path),"{0}/Edge_Investigation.ipynb".format(self._ipynb_path))
self._logger.info("Adding threat investigation IPython Notebook")
shutil.copy("{0}/ipynb_templates/Threat_Investigation_master.ipynb".format(self._scrtip_path),"{0}/Threat_Investigation.ipynb".format(self._ipynb_path))
else:
self._logger.error("There was a problem adding the IPython Notebooks, please check the directory exists.")
def _get_dns_results(self):
self._logger.info("Getting {0} Machine Learning Results from HDFS".format(self._date))
dns_results = "{0}/dns_results.csv".format(self._data_path)
# get hdfs path from conf file.
HUSER = self._spot_conf.get('conf', 'HUSER').replace("'", "").replace('"', '')
hdfs_path = "{0}/dns/scored_results/{1}/scores/dns_results.csv".format(HUSER,self._date)
# get results file from hdfs.
get_command = Util.get_ml_results_form_hdfs(hdfs_path,self._data_path)
self._logger.info("{0}".format(get_command))
# validate files exists
if os.path.isfile(dns_results):
# read number of results based in the limit specified.
self._logger.info("Reading {0} dns results file: {1}".format(self._date,dns_results))
self._dns_results = Util.read_results(dns_results,self._limit,self._results_delimiter)[:]
if len(self._dns_results) == 0: self._logger.error("There are not flow results.");sys.exit(1)
else:
self._logger.error("There was an error getting ML results from HDFS")
sys.exit(1)
# add headers.
self._logger.info("Adding headers")
self._dns_scores_headers = [ str(key) for (key,value) in self._conf['dns_score_fields'].items() ]
# add dns content.
self._dns_scores = [ conn[:] for conn in self._dns_results][:]
def _move_time_stamp(self,dns_data):
for dns in dns_data:
time_stamp = dns[1]
dns.remove(time_stamp)
dns.append(time_stamp)
return dns_data
def _create_dns_scores_csv(self):
dns_scores_csv = "{0}/dns_scores.csv".format(self._data_path)
dns_scores_final = self._move_time_stamp(self._dns_scores)
dns_scores_final.insert(0,self._dns_scores_headers)
Util.create_csv_file(dns_scores_csv,dns_scores_final)
# create bk file
dns_scores_bu_csv = "{0}/dns_scores_bu.csv".format(self._data_path)
Util.create_csv_file(dns_scores_bu_csv,dns_scores_final)
def _add_tld_column(self):
qry_name_col = self._conf['dns_results_fields']['dns_qry_name']
self._dns_scores = [conn + [ get_tld("http://" + str(conn[qry_name_col]), fail_silently=True) if "http://" not in str(conn[qry_name_col]) else get_tld(str(conn[qry_name_col]), fail_silently=True)] for conn in self._dns_scores ]
def _add_reputation(self):
# read configuration.
reputation_conf_file = "{0}/components/reputation/reputation_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self._logger.info("Reading reputation configuration file: {0}".format(reputation_conf_file))
rep_conf = json.loads(open(reputation_conf_file).read())
# initialize reputation services.
self._rep_services = []
self._logger.info("Initializing reputation services.")
for service in rep_conf:
config = rep_conf[service]
module = __import__("components.reputation.{0}.{0}".format(service), fromlist=['Reputation'])
self._rep_services.append(module.Reputation(config,self._logger))
# get columns for reputation.
rep_cols = {}
indexes = [ int(value) for key, value in self._conf["add_reputation"].items()]
self._logger.info("Getting columns to add reputation based on config file: dns_conf.json".format())
for index in indexes:
col_list = []
for conn in self._dns_scores:
col_list.append(conn[index])
rep_cols[index] = list(set(col_list))
# get reputation per column.
self._logger.info("Getting reputation for each service in config")
rep_services_results = []
if self._rep_services :
for key,value in rep_cols.items():
rep_services_results = [ rep_service.check(None,value) for rep_service in self._rep_services]
rep_results = {}
for result in rep_services_results:
rep_results = {k: "{0}::{1}".format(rep_results.get(k, ""), result.get(k, "")).strip('::') for k in set(rep_results) | set(result)}
self._dns_scores = [ conn + [ rep_results[conn[key]] ] for conn in self._dns_scores ]
else:
self._dns_scores = [ conn + [""] for conn in self._dns_scores ]
def _add_hh_and_severity(self):
# add hh value and sev columns.
dns_date_index = self._conf["dns_results_fields"]["frame_time"]
self._dns_scores = [conn + [ filter(None,conn[dns_date_index].split(" "))[3].split(":")[0]] + [0] + [0] for conn in self._dns_scores ]
def _add_iana(self):
iana_conf_file = "{0}/components/iana/iana_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.path.isfile(iana_conf_file):
iana_config = json.loads(open(iana_conf_file).read())
dns_iana = IanaTransform(iana_config["IANA"])
dns_qry_class_index = self._conf["dns_results_fields"]["dns_qry_class"]
dns_qry_type_index = self._conf["dns_results_fields"]["dns_qry_type"]
dns_qry_rcode_index = self._conf["dns_results_fields"]["dns_qry_rcode"]
self._dns_scores = [ conn + [ dns_iana.get_name(conn[dns_qry_class_index],"dns_qry_class")] + [dns_iana.get_name(conn[dns_qry_type_index],"dns_qry_type")] + [ dns_iana.get_name(conn[dns_qry_rcode_index],"dns_qry_rcode") ] for conn in self._dns_scores ]
else:
self._dns_scores = [ conn + ["","",""] for conn in self._dns_scores ]
def _add_network_context(self):
nc_conf_file = "{0}/components/nc/nc_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.path.isfile(nc_conf_file):
nc_conf = json.loads(open(nc_conf_file).read())["NC"]
dns_nc = NetworkContext(nc_conf,self._logger)
ip_dst_index = self._conf["dns_results_fields"]["ip_dst"]
self._dns_scores = [ conn + [dns_nc.get_nc(conn[ip_dst_index])] for conn in self._dns_scores ]
else:
self._dns_scores = [ conn + [""] for conn in self._dns_scores ]
def _get_oa_details(self):
self._logger.info("Getting OA DNS suspicious details/chord diagram")
# start suspicious connects details process.
p_sp = Process(target=self._get_suspicious_details)
p_sp.start()
# start chord diagram process.
p_dn = Process(target=self._get_dns_dendrogram)
p_dn.start()
p_sp.join()
p_dn.join()
def _get_suspicious_details(self):
iana_conf_file = "{0}/components/iana/iana_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.path.isfile(iana_conf_file):
iana_config = json.loads(open(iana_conf_file).read())
dns_iana = IanaTransform(iana_config["IANA"])
for conn in self._dns_scores:
# get data to query
date=conn[self._conf["dns_score_fields"]["frame_time"]].split(" ")
date = filter(None,date)
if len(date) == 5:
year=date[2]
month=datetime.datetime.strptime(date[0], '%b').strftime('%m')
day=date[1]
hh=conn[self._conf["dns_score_fields"]["hh"]]
dns_qry_name = conn[self._conf["dns_score_fields"]["dns_qry_name"]]
self._get_dns_details(dns_qry_name,year,month,day,hh,dns_iana)
def _get_dns_details(self,dns_qry_name,year,month,day,hh,dns_iana):
limit = self._details_limit
edge_file ="{0}/edge-{1}_{2}_00.csv".format(self._data_path,dns_qry_name.replace("/","-"),hh)
edge_tmp ="{0}/edge-{1}_{2}_00.tmp".format(self._data_path,dns_qry_name.replace("/","-"),hh)
if not os.path.isfile(edge_file):
dns_qry = ("SELECT frame_time,frame_len,ip_dst,ip_src,dns_qry_name,dns_qry_class,dns_qry_type,dns_qry_rcode,dns_a FROM {0}.{1} WHERE y={2} AND m={3} AND d={4} AND dns_qry_name LIKE '%{5}%' AND h={6} LIMIT {7};").format(self._db,self._table_name,year,month,day,dns_qry_name,hh,limit)
# execute query
try:
self._engine.query(dns_qry,edge_tmp)
except:
self._logger.error("ERROR. Edge file couldn't be created for {0}, skipping this step".format(dns_qry_name))
else:
# add IANA to results.
if dns_iana:
update_rows = []
self._logger.info("Adding IANA translation to details results")
with open(edge_tmp) as dns_details_csv:
rows = csv.reader(dns_details_csv, delimiter=',', quotechar='|')
try:
next(rows)
update_rows = [[conn[0]] + [conn[1]] + [conn[2]] + [conn[3]] + [conn[4]] + [dns_iana.get_name(conn[5],"dns_qry_class")] + [dns_iana.get_name(conn[6],"dns_qry_type")] + [dns_iana.get_name(conn[7],"dns_qry_rcode")] + [conn[8]] for conn in rows]
update_rows = filter(None, update_rows)
header = [ "frame_time", "frame_len", "ip_dst","ip_src","dns_qry_name","dns_qry_class_name","dns_qry_type_name","dns_qry_rcode_name","dns_a" ]
update_rows.insert(0,header)
except IndexError:
pass
else:
self._logger.info("WARNING: NO IANA configured.")
# create edge file.
self._logger.info("Creating edge file:{0}".format(edge_file))
with open(edge_file,'wb') as dns_details_edge:
writer = csv.writer(dns_details_edge, quoting=csv.QUOTE_ALL)
if update_rows:
writer.writerows(update_rows)
else:
shutil.copy(edge_tmp,edge_file)
os.remove(edge_tmp)
def _get_dns_dendrogram(self):
limit = self._details_limit
for conn in self._dns_scores:
date=conn[self._conf["dns_score_fields"]["frame_time"]].split(" ")
date = filter(None,date)
if len(date) == 5:
year=date[2]
month=datetime.datetime.strptime(date[0], '%b').strftime('%m')
day=date[1]
ip_dst=conn[self._conf["dns_score_fields"]["ip_dst"]]
self._get_dendro(self._db,self._table_name,ip_dst,year,month,day, limit)
def _get_dendro(self,db,table,ip_dst,year,month,day,limit):
dendro_file = "{0}/dendro-{1}.csv".format(self._data_path,ip_dst)
if not os.path.isfile(dendro_file):
dndro_qry = ("SELECT dns_a, dns_qry_name, ip_dst FROM (SELECT susp.ip_dst, susp.dns_qry_name, susp.dns_a FROM {0}.{1} as susp WHERE susp.y={2} AND susp.m={3} AND susp.d={4} AND susp.ip_dst='{5}' LIMIT {6}) AS tmp GROUP BY dns_a, dns_qry_name, ip_dst").format(db,table,year,month,day,ip_dst,limit)
# execute query
self._engine.query(dndro_qry,dendro_file)
def _ingest_summary(self):
# get date parameters.
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
self._logger.info("Getting ingest summary data for the day")
ingest_summary_cols = ["date","total"]
result_rows = []
df_filtered = pd.DataFrame()
ingest_summary_file = "{0}/is_{1}{2}.csv".format(self._ingest_summary_path,yr,mn)
ingest_summary_tmp = "{0}.tmp".format(ingest_summary_file)
if os.path.isfile(ingest_summary_file):
df = pd.read_csv(ingest_summary_file, delimiter=',')
#discards previous rows from the same date
df_filtered = df[df['date'].str.contains("{0}-{1}-{2}".format(yr, mn, dy)) == False]
else:
df = pd.DataFrame()
# get ingest summary.
ingest_summary_qry = ("SELECT frame_time, COUNT(*) as total "
" FROM {0}.{1}"
" WHERE y={2} AND m={3} AND d={4} "
" AND unix_tstamp IS NOT NULL AND frame_time IS NOT NULL"
" AND frame_len IS NOT NULL AND dns_qry_name IS NOT NULL"
" AND ip_src IS NOT NULL "
" AND (dns_qry_class IS NOT NULL AND dns_qry_type IS NOT NULL AND dns_qry_rcode IS NOT NULL ) "
" GROUP BY frame_time;")
ingest_summary_qry = ingest_summary_qry.format(self._db,self._table_name, yr, mn, dy)
results_file = "{0}/results_{1}.csv".format(self._ingest_summary_path,self._date)
self._engine.query(ingest_summary_qry,output_file=results_file,delimiter=",")
if os.path.isfile(results_file):
df_results = pd.read_csv(results_file, delimiter=',')
# Forms a new dataframe splitting the minutes from the time column
df_new = pd.DataFrame([["{0}-{1}-{2} {3}:{4}".format(yr, mn, dy,val['frame_time'].split(" ")[3].split(":")[0].zfill(2),val['frame_time'].split(" ")[3].split(":")[1].zfill(2)), int(val['total']) if not math.isnan(val['total']) else 0 ] for key,val in df_results.iterrows()],columns = ingest_summary_cols)
#Groups the data by minute
sf = df_new.groupby(by=['date'])['total'].sum()
df_per_min = pd.DataFrame({'date':sf.index, 'total':sf.values})
df_final = df_filtered.append(df_per_min, ignore_index=True)
df_final.to_csv(ingest_summary_tmp,sep=',', index=False)
os.remove(results_file)
os.rename(ingest_summary_tmp,ingest_summary_file)
else:
self._logger.info("No data found for the ingest summary")
| kpeiruza/incubator-spot | spot-oa/oa/dns/dns_oa.py | Python | apache-2.0 | 17,663 | 0.018174 |
# encoding: utf-8
"""
IMPORTANT - COLOUR SUPPORT IS CURRENTLY EXTREMELY EXPERIMENTAL. THE API MAY CHANGE, AND NO DEFAULT
WIDGETS CURRENTLY TAKE ADVANTAGE OF THEME SUPPORT AT ALL.
"""
import curses
from . import global_options
def disable_color():
global_options.DISABLE_ALL_COLORS = True
def enable_color():
global_options.DISABLE_ALL_COLORS = False
class ThemeManager(object):
_colors_to_define = (
# DO NOT DEFINE THIS COLOR - THINGS BREAK
#('WHITE_BLACK', DO_NOT_DO_THIS, DO_NOT_DO_THIS),
('BLACK_WHITE', curses.COLOR_BLACK, curses.COLOR_WHITE),
#('BLACK_ON_DEFAULT', curses.COLOR_BLACK, -1),
#('WHITE_ON_DEFAULT', curses.COLOR_WHITE, -1),
('BLUE_BLACK', curses.COLOR_BLUE, curses.COLOR_BLACK),
('CYAN_BLACK', curses.COLOR_CYAN, curses.COLOR_BLACK),
('GREEN_BLACK', curses.COLOR_GREEN, curses.COLOR_BLACK),
('MAGENTA_BLACK', curses.COLOR_MAGENTA, curses.COLOR_BLACK),
('RED_BLACK', curses.COLOR_RED, curses.COLOR_BLACK),
('YELLOW_BLACK', curses.COLOR_YELLOW, curses.COLOR_BLACK),
('BLACK_RED', curses.COLOR_BLACK, curses.COLOR_RED),
('BLACK_GREEN', curses.COLOR_BLACK, curses.COLOR_GREEN),
('BLACK_YELLOW', curses.COLOR_BLACK, curses.COLOR_YELLOW),
('BLUE_WHITE', curses.COLOR_BLUE, curses.COLOR_WHITE),
('CYAN_WHITE', curses.COLOR_CYAN, curses.COLOR_WHITE),
('GREEN_WHITE', curses.COLOR_GREEN, curses.COLOR_WHITE),
('MAGENTA_WHITE', curses.COLOR_MAGENTA, curses.COLOR_WHITE),
('RED_WHITE', curses.COLOR_RED, curses.COLOR_WHITE),
('YELLOW_WHITE', curses.COLOR_YELLOW, curses.COLOR_WHITE),
)
default_colors = {
'DEFAULT' : 'WHITE_BLACK',
'FORMDEFAULT' : 'WHITE_BLACK',
'NO_EDIT' : 'BLUE_BLACK',
'STANDOUT' : 'CYAN_BLACK',
'CURSOR' : 'WHITE_BLACK',
'LABEL' : 'GREEN_BLACK',
'LABELBOLD' : 'WHITE_BLACK',
'CONTROL' : 'YELLOW_BLACK',
'IMPORTANT' : 'GREEN_BLACK',
'SAFE' : 'GREEN_BLACK',
'WARNING' : 'YELLOW_BLACK',
'DANGER' : 'RED_BLACK',
'CRITICAL' : 'BLACK_RED',
'GOOD' : 'GREEN_BLACK',
'GOODHL' : 'GREEN_BLACK',
'VERYGOOD' : 'BLACK_GREEN',
'CAUTION' : 'YELLOW_BLACK',
'CAUTIONHL' : 'BLACK_YELLOW',
}
def __init__(self):
#curses.use_default_colors()
self._defined_pairs = {}
self._names = {}
try:
self._max_pairs = curses.COLOR_PAIRS - 1
do_color = True
except AttributeError:
# curses.start_color has failed or has not been called
do_color = False
# Disable all color use across the application
disable_color()
if do_color and curses.has_colors():
self.initialize_pairs()
self.initialize_names()
def find_pair(self, caller, request='DEFAULT'):
if not curses.has_colors() or global_options.DISABLE_ALL_COLORS:
return False
if request == 'DEFAULT':
request = caller.color
# Locate the requested color pair. Default to default if not found.
try:
pair = self._defined_pairs[self._names[request]]
except:
pair = self._defined_pairs[self._names['DEFAULT']]
# now make the actual attribute
color_attribute = curses.color_pair(pair[0])
return color_attribute
def set_default(self, caller):
return False
def initialize_pairs(self):
# White on Black is fixed as color_pair 0
self._defined_pairs['WHITE_BLACK'] = (0, curses.COLOR_WHITE, curses.COLOR_BLACK)
for cp in self.__class__._colors_to_define:
if cp[0] == 'WHITE_BLACK':
# silently protect the user from breaking things.
continue
self.initalize_pair(cp[0], cp[1], cp[2])
def initialize_names(self):
self._names.update(self.__class__.default_colors)
def initalize_pair(self, name, fg, bg):
#Initialize a color_pair for the required color and return the number.
#Raise an exception if this is not possible.
if (len(list(self._defined_pairs.keys())) + 1) == self._max_pairs:
raise Exception("Too many colors")
_this_pair_number = len(list(self._defined_pairs.keys())) + 1
curses.init_pair(_this_pair_number, fg, bg)
self._defined_pairs[name] = (_this_pair_number, fg, bg)
return _this_pair_number
def get_pair_number(self, name):
return self._defined_pairs[name][0]
| tescalada/npyscreen-restructure | npyscreen/ThemeManagers.py | Python | bsd-2-clause | 4,810 | 0.005821 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('naf_autoticket', '0023_hostdevice_hostlocation'),
]
operations = [
migrations.CreateModel(
name='AlertCorrelationWeight',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('AlertCompare', models.CharField(max_length=100)),
('TimeWeight', models.CharField(max_length=50)),
('LocationWeight', models.CharField(max_length=255, null=True)),
('LogicalWeight', models.CharField(max_length=255, null=True)),
('AlertInfo', models.ForeignKey(to='naf_autoticket.AlertInfo')),
],
options={
'db_table': 'nafautoticket_alertcorrelationweight',
'verbose_name_plural': 'alertcorrelationweight',
},
),
]
| kevinnguyeneng/django-uwsgi-nginx | app/naf_autoticket/migrations/0024_alertcorrelationweight.py | Python | gpl-3.0 | 1,043 | 0.002876 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pythymio
import random
from gardenworld import *
init('info2_1')
with pythymio.thymio(["acc"],[]) as Thym:
state = dict([])
state["time"] = 0
state["delay"] = 10
def dispatch(evtid, evt_name, evt_args):
# https://www.thymio.org/en:thymioapi prox freq is 16Hz
if evt_name == "fwd.acc": # every 0.0625 sec
state["time"] += 0.0625
state["delay"] -= 1
if state["delay"] < 0:
if 7 < evt_args[1] < 14:
if evt_args[0] > 10:
state["delay"] = 20
tg()
elif evt_args[0] < -10:
state["delay"] = 20
td()
elif evt_args[1] > 20 and abs(evt_args[0]) < 8:
state["delay"] = 10
av()
elif evt_args[1] < 5:
if evt_args[0] > 10:
state["delay"] = 20
dp()
elif evt_args[0] < -10:
state["delay"] = 20
ra()
else: # Wat?
print evt_name
# Now lets start the loopy thing
Thym.loop(dispatch)
print "state is %s" % state
print "Sayonara"
| pierreboudes/pyThymio | garden_real.py | Python | lgpl-3.0 | 1,328 | 0.003765 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Week 2: Project 1: Download and print json from reddit's main page.
This file contains methods to download and parse Reddit.com's main page.
One could do this hourly to sample reddit activity, and plot it over some metric.
hmmm....
CHANGELOG
version 1.1. Fixed bug to send output to the wrong path.
Tweaked method attributes.
Pretty print is more obvious
version 1.2. Set time to sleep 3 seconds. Gets around timeout bug.
"""
__author__ = 'Bernie Hogan'
__version__= '1.2'
import json
# import simplejson as json # alternate for < 2.6
import urllib2
import string
import os
import time
PATH = (os.getcwd())
def getRedditJson(count=0,after="",url = "http://www.reddit.com/.json",prettyPrint=False):
'''getRedditJson will append an after and a count to a reddit.com url.
It can also be used with subreddits/elsewhere, using the optional url'''
if count > 0:
url += "?count=%d&after=%s" % (count,after)
redditfile = urllib2.urlopen(url)
if prettyPrint:
return downloadJsonToPretty(url,"reddit-json_%d_%s.json" % (count,after))
else:
return json.load( redditfile )
def addToTable(jsondata,fileoutpath,count,header=False):
'''This method takes a json file and adds it to a table.
Notice the header is only added if the count is 0.
- Certainly, there\'s tidier way to do this?'''
outstr = ""
queries = ["rank","ups","downs"]
if count == 0:
fileout = open(fileoutpath,'w')
outstr += "queries\tups\tdowns\tscore\tcomments\tsubmitter\n"
else:
fileout = open(fileoutpath,'a')
for c,i in enumerate(jsondata):
outlist = []
outlist.append(str(c+count+1))
outlist.append(i["data"]["ups"])
outlist.append(i["data"]["downs"])
outlist.append(i["data"]["score"])
outlist.append(i["data"]["num_comments"])
outlist.append(i["data"]["author"])
outstr += string.join([unicode(x) for x in outlist],'\t') + "\n"
fileout.write(outstr)
outstr = ""
fileout.close()
# Note: os.sep below was not in the earlier version,
# causing file to be written in dir immediately above.
def getIteratedReddits(max=200,url="http://www.reddit.com/.json"):
'''This is the main controller method. Notice _i_ is in a range stepping by 25.
This is a user configurable setting, so if this code worked on a logged in user
it would have to be changed. I look at 50 reddits per page, for example.'''
after = ""
step = 25
for i in range(0,max,step):
print "Downloading stories from %d to %d (after %s)" % (i,i+step,after)
reddit = getRedditJson(i,after,url)
time.sleep(3)
addToTable(reddit["data"]["children"],PATH+os.sep+"redditstats.txt",i)
after = reddit["data"]["after"]
print after
print "Finished downloading. File available at %s" % PATH + os.sep+"redditstats.txt"
# This is an unused helper method.
# Use it to have a cleaner look at json than is provided raw from the server.
def downloadJsonToPretty(url = "http://www.reddit.com/.json", name="prettyjson.txt"):
fileout = open(PATH + os.sep + name, 'w')
jsonfile = json.load(urllib2.urlopen(url))
fileout.write(json.dumps(jsonfile, indent = 4))
fileout.close()
return jsonfile
# This method calls the other two.
# See method above for optional arguments.
getIteratedReddits(150)
# This method will print the main page by default to prettyjson.txt.
# downloadJsonToPretty() | oxfordinternetinstitute/scriptingcourse | Lecture 2/PR_printRedditJson1.1.py | Python | gpl-3.0 | 3,376 | 0.039396 |
# encoding: utf-8
import os
def emulator_rom_launch_command(emulator, rom):
"""Generates a command string that will launch `rom` with `emulator` (using
the format provided by the user). The return value of this function should
be suitable to use as the `Exe` field of a Steam shortcut"""
# Normalizing the strings is just removing any leading/trailing quotes.
# The beautiful thing is that strip does nothing if it doesnt contain quotes,
# so normalizing it then adding quotes should do what I want 100% of the time
normalize = lambda s: s.strip("\"")
add_quotes = lambda s: "\"%s\"" % s
# We don't know if the user put quotes around the emulator location. If
# so, we dont want to add another pair and screw things up.
#
# The user didnt give us the ROM information, but screw it, I already
# have some code to add quotes to a string, might as well use it.
quoted_location = add_quotes(normalize(emulator.location))
quoted_rom = add_quotes(normalize(rom.path))
# The format string contains a bunch of specifies that users can use to
# substitute values in at runtime. Right now the only supported values are:
# %l - The location of the emulator (to avoid sync bugs)
# %r - The location of the ROM (so the emulator knows what to launch)
# %fn - The ROM filename without its extension (for emulators that utilize separete configuration files)
#
# More may be added in the future, but for now this is what we support
return (
emulator.format
.replace("%l", quoted_location)
.replace("%r", quoted_rom)
.replace("%fn", os.path.splitext(os.path.basename(rom.path))[0])
)
def emulator_startdir(emulator):
"""Returns the directory which stores the emulator. The return value of this
function should be suitable to use as the 'StartDir' field of a Steam
shortcut"""
return os.path.dirname(emulator.location)
| scottrice/Ice | ice/emulators.py | Python | mit | 1,887 | 0.015898 |
# CVS conversion code inspired by hg-cvs-import and git-cvsimport
import os, locale, re, socket
from cStringIO import StringIO
from mercurial import util
from common import NoRepo, commit, converter_source, checktool
class convert_cvs(converter_source):
def __init__(self, ui, path, rev=None):
super(convert_cvs, self).__init__(ui, path, rev=rev)
cvs = os.path.join(path, "CVS")
if not os.path.exists(cvs):
raise NoRepo("%s does not look like a CVS checkout" % path)
self.cmd = ui.config('convert', 'cvsps', 'cvsps -A -u --cvs-direct -q')
cvspsexe = self.cmd.split(None, 1)[0]
for tool in (cvspsexe, 'cvs'):
checktool(tool)
self.changeset = {}
self.files = {}
self.tags = {}
self.lastbranch = {}
self.parent = {}
self.socket = None
self.cvsroot = file(os.path.join(cvs, "Root")).read()[:-1]
self.cvsrepo = file(os.path.join(cvs, "Repository")).read()[:-1]
self.encoding = locale.getpreferredencoding()
self._parse()
self._connect()
def _parse(self):
if self.changeset:
return
maxrev = 0
cmd = self.cmd
if self.rev:
# TODO: handle tags
try:
# patchset number?
maxrev = int(self.rev)
except ValueError:
try:
# date
util.parsedate(self.rev, ['%Y/%m/%d %H:%M:%S'])
cmd = '%s -d "1970/01/01 00:00:01" -d "%s"' % (cmd, self.rev)
except util.Abort:
raise util.Abort('revision %s is not a patchset number or date' % self.rev)
d = os.getcwd()
try:
os.chdir(self.path)
id = None
state = 0
filerevids = {}
for l in util.popen(cmd):
if state == 0: # header
if l.startswith("PatchSet"):
id = l[9:-2]
if maxrev and int(id) > maxrev:
# ignore everything
state = 3
elif l.startswith("Date"):
date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"])
date = util.datestr(date)
elif l.startswith("Branch"):
branch = l[8:-1]
self.parent[id] = self.lastbranch.get(branch, 'bad')
self.lastbranch[branch] = id
elif l.startswith("Ancestor branch"):
ancestor = l[17:-1]
# figure out the parent later
self.parent[id] = self.lastbranch[ancestor]
elif l.startswith("Author"):
author = self.recode(l[8:-1])
elif l.startswith("Tag:") or l.startswith("Tags:"):
t = l[l.index(':')+1:]
t = [ut.strip() for ut in t.split(',')]
if (len(t) > 1) or (t[0] and (t[0] != "(none)")):
self.tags.update(dict.fromkeys(t, id))
elif l.startswith("Log:"):
# switch to gathering log
state = 1
log = ""
elif state == 1: # log
if l == "Members: \n":
# switch to gathering members
files = {}
oldrevs = []
log = self.recode(log[:-1])
state = 2
else:
# gather log
log += l
elif state == 2: # members
if l == "\n": # start of next entry
state = 0
p = [self.parent[id]]
if id == "1":
p = []
if branch == "HEAD":
branch = ""
if branch:
latest = None
# the last changeset that contains a base
# file is our parent
for r in oldrevs:
latest = max(filerevids.get(r, None), latest)
if latest:
p = [latest]
# add current commit to set
c = commit(author=author, date=date, parents=p,
desc=log, branch=branch)
self.changeset[id] = c
self.files[id] = files
else:
colon = l.rfind(':')
file = l[1:colon]
rev = l[colon+1:-2]
oldrev, rev = rev.split("->")
files[file] = rev
# save some information for identifying branch points
oldrevs.append("%s:%s" % (oldrev, file))
filerevids["%s:%s" % (rev, file)] = id
elif state == 3:
# swallow all input
continue
self.heads = self.lastbranch.values()
finally:
os.chdir(d)
def _connect(self):
root = self.cvsroot
conntype = None
user, host = None, None
cmd = ['cvs', 'server']
self.ui.status("connecting to %s\n" % root)
if root.startswith(":pserver:"):
root = root[9:]
m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
root)
if m:
conntype = "pserver"
user, passw, serv, port, root = m.groups()
if not user:
user = "anonymous"
if not port:
port = 2401
else:
port = int(port)
format0 = ":pserver:%s@%s:%s" % (user, serv, root)
format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
if not passw:
passw = "A"
pf = open(os.path.join(os.environ["HOME"], ".cvspass"))
for line in pf.read().splitlines():
part1, part2 = line.split(' ', 1)
if part1 == '/1':
# /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
part1, part2 = part2.split(' ', 1)
format = format1
else:
# :pserver:user@example.com:/cvsroot/foo Ah<Z
format = format0
if part1 == format:
passw = part2
break
pf.close()
sck = socket.socket()
sck.connect((serv, port))
sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
"END AUTH REQUEST", ""]))
if sck.recv(128) != "I LOVE YOU\n":
raise util.Abort("CVS pserver authentication failed")
self.writep = self.readp = sck.makefile('r+')
if not conntype and root.startswith(":local:"):
conntype = "local"
root = root[7:]
if not conntype:
# :ext:user@host/home/user/path/to/cvsroot
if root.startswith(":ext:"):
root = root[5:]
m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
# Do not take Windows path "c:\foo\bar" for a connection strings
if os.path.isdir(root) or not m:
conntype = "local"
else:
conntype = "rsh"
user, host, root = m.group(1), m.group(2), m.group(3)
if conntype != "pserver":
if conntype == "rsh":
rsh = os.environ.get("CVS_RSH") or "ssh"
if user:
cmd = [rsh, '-l', user, host] + cmd
else:
cmd = [rsh, host] + cmd
# popen2 does not support argument lists under Windows
cmd = [util.shellquote(arg) for arg in cmd]
cmd = util.quotecommand(' '.join(cmd))
self.writep, self.readp = os.popen2(cmd, 'b')
self.realroot = root
self.writep.write("Root %s\n" % root)
self.writep.write("Valid-responses ok error Valid-requests Mode"
" M Mbinary E Checked-in Created Updated"
" Merged Removed\n")
self.writep.write("valid-requests\n")
self.writep.flush()
r = self.readp.readline()
if not r.startswith("Valid-requests"):
raise util.Abort("server sucks")
if "UseUnchanged" in r:
self.writep.write("UseUnchanged\n")
self.writep.flush()
r = self.readp.readline()
def getheads(self):
return self.heads
def _getfile(self, name, rev):
def chunkedread(fp, count):
# file-objects returned by socked.makefile() do not handle
# large read() requests very well.
chunksize = 65536
output = StringIO()
while count > 0:
data = fp.read(min(count, chunksize))
if not data:
raise util.Abort("%d bytes missing from remote file" % count)
count -= len(data)
output.write(data)
return output.getvalue()
if rev.endswith("(DEAD)"):
raise IOError
args = ("-N -P -kk -r %s --" % rev).split()
args.append(self.cvsrepo + '/' + name)
for x in args:
self.writep.write("Argument %s\n" % x)
self.writep.write("Directory .\n%s\nco\n" % self.realroot)
self.writep.flush()
data = ""
while 1:
line = self.readp.readline()
if line.startswith("Created ") or line.startswith("Updated "):
self.readp.readline() # path
self.readp.readline() # entries
mode = self.readp.readline()[:-1]
count = int(self.readp.readline()[:-1])
data = chunkedread(self.readp, count)
elif line.startswith(" "):
data += line[1:]
elif line.startswith("M "):
pass
elif line.startswith("Mbinary "):
count = int(self.readp.readline()[:-1])
data = chunkedread(self.readp, count)
else:
if line == "ok\n":
return (data, "x" in mode and "x" or "")
elif line.startswith("E "):
self.ui.warn("cvs server: %s\n" % line[2:])
elif line.startswith("Remove"):
l = self.readp.readline()
l = self.readp.readline()
if l != "ok\n":
raise util.Abort("unknown CVS response: %s" % l)
else:
raise util.Abort("unknown CVS response: %s" % line)
def getfile(self, file, rev):
data, mode = self._getfile(file, rev)
self.modecache[(file, rev)] = mode
return data
def getmode(self, file, rev):
return self.modecache[(file, rev)]
def getchanges(self, rev):
self.modecache = {}
files = self.files[rev]
cl = files.items()
cl.sort()
return (cl, {})
def getcommit(self, rev):
return self.changeset[rev]
def gettags(self):
return self.tags
def getchangedfiles(self, rev, i):
files = self.files[rev].keys()
files.sort()
return files
| carlgao/lenga | images/lenny64-peon/usr/share/python-support/mercurial-common/hgext/convert/cvs.py | Python | mit | 11,997 | 0.001584 |
#!/usr/bin/env python
##\author Dominik Kirchner
##\brief Publishes diagnostic messages for diagnostic aggregator unit test
from debian.changelog import keyvalue
PKG = 'rosha_repair_executor'
import roslib; roslib.load_manifest(PKG)
import rospy
from time import sleep
#from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
from rosha_msgs.msg import RepairAction
if __name__ == '__main__':
rospy.init_node('repair_action_pub')
pub = rospy.Publisher('/repair_action', RepairAction)
#pub = rospy.Publisher('/testOut4', RepairAction)
msg = RepairAction()
msg.robotId = 12
#
# redundancy replace loc
#
msg.repairActionToPerform = 32
msg.compName = "GPS"
msg.compId = -1
msg.msgType = ""
#pub.publish(msg)
#sleep(2)
while not rospy.is_shutdown():
pub.publish(msg)
sleep(5)
| cott81/rosha | rosha/rosha_repair_executor/test/repair_action_RedundantLoc.py | Python | lgpl-3.0 | 905 | 0.01989 |
# -- coding: utf-8 --
# ===========================================================================
# eXe
# Copyright 2012, Pedro Peña Pérez, Open Phoenix IT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
'''
@author: Pedro Peña Pérez
'''
import sys
import logging
from exe.engine.persistxml import encodeObjectToXML
from exe.engine.path import Path
from exe.engine.package import Package
from exe.export.scormexport import ScormExport
from exe.export.imsexport import IMSExport
from exe.export.websiteexport import WebsiteExport
from exe.export.singlepageexport import SinglePageExport
from exe.export.xliffexport import XliffExport
from exe.export.epub3export import Epub3Export
from exe.export.textexport import TextExport
from exe.export.epub3subexport import Epub3SubExport
LOG = logging.getLogger(__name__)
ENCODING = sys.stdout.encoding or "UTF-8"
class CmdlineExporter(object):
extensions = {'xml': '.xml',
'scorm12': '.zip',
'scorm2004': '.zip',
'agrega': '.zip',
'ims': '.zip',
'website': '',
'webzip': '.zip',
'singlepage': '',
'xliff': '.xlf',
'epub3': '.epub',
'report': '.csv',
'text': '.txt'
}
def __init__(self, config, options):
self.config = config
self.options = options
self.web_dir = Path(self.config.webDir)
self.styles_dir = None
def do_export(self, inputf, outputf):
if hasattr(self, 'export_' + self.options["export"]):
LOG.debug("Exporting to type %s, in: %s, out: %s, overwrite: %s" \
% (self.options["export"], inputf, outputf, str(self.options["overwrite"])))
if not outputf:
if self.options["export"] in ('website', 'singlepage'):
outputf = inputf.rsplit(".elp")[0]
else:
outputf = inputf + self.extensions[self.options["export"]]
outputfp = Path(outputf)
if outputfp.exists() and not self.options["overwrite"]:
error = _(u'"%s" already exists.\nPlease try again \
with a different filename') % outputf
raise Exception(error.encode(ENCODING))
else:
if outputfp.exists() and self.options["overwrite"]:
if outputfp.isdir():
for filen in outputfp.walkfiles():
filen.remove()
outputfp.rmdir()
else:
outputfp.remove()
pkg = Package.load(inputf)
LOG.debug("Package %s loaded" % (inputf))
if not pkg:
error = _(u"Invalid input package")
raise Exception(error.encode(ENCODING))
self.styles_dir = self.config.stylesDir / pkg.style
LOG.debug("Styles dir: %s" % (self.styles_dir))
pkg.exportSource = self.options['editable']
getattr(self, 'export_' + self.options["export"])(pkg, outputf)
return outputf
else:
raise Exception(_(u"Export format not implemented")\
.encode(ENCODING))
def export_xml(self, pkg, outputf):
open(outputf, "w").write(encodeObjectToXML(pkg))
def export_scorm12(self, pkg, outputf):
scormExport = ScormExport(self.config, self.styles_dir, outputf,
'scorm1.2')
pkg.scowsinglepage = self.options['single-page']
pkg.scowwebsite = self.options['website']
scormExport.export(pkg)
def export_scorm2004(self, pkg, outputf):
scormExport = ScormExport(self.config, self.styles_dir, outputf,
'scorm2004')
pkg.scowsinglepage = self.options['single-page']
pkg.scowwebsite = self.options['website']
scormExport.export(pkg)
def export_ims(self, pkg, outputf):
imsExport = IMSExport(self.config, self.styles_dir, outputf)
imsExport.export(pkg)
def export_website(self, pkg, outputf):
outputfp = Path(outputf)
outputfp.makedirs()
websiteExport = WebsiteExport(self.config, self.styles_dir, outputf)
websiteExport.export(pkg)
def export_webzip(self, pkg, outputf):
websiteExport = WebsiteExport(self.config, self.styles_dir, outputf)
websiteExport.exportZip(pkg)
def export_singlepage(self, pkg, outputf, print_flag=0):
images_dir = self.web_dir.joinpath('images')
scripts_dir = self.web_dir.joinpath('scripts')
css_dir = self.web_dir.joinpath('css')
templates_dir = self.web_dir.joinpath('templates')
singlePageExport = SinglePageExport(self.styles_dir, outputf, \
images_dir, scripts_dir, css_dir, templates_dir)
singlePageExport.export(pkg, print_flag)
def export_xliff(self, pkg, outputf):
xliff = XliffExport(self.config, outputf, \
source_copied_in_target=self.options["copy-source"], \
wrap_cdata=self.options["wrap-cdata"])
xliff.export(pkg)
def export_epub3(self, pkg, outputf):
epub3Export = Epub3Export(self.config, self.styles_dir, outputf)
epub3Export.export(pkg)
def export_subepub3(self, pkg, outputf):
epub3SubExport = Epub3SubExport(self.config, self.styles_dir, outputf)
epub3SubExport.export(pkg)
def export_report(self, pkg, outputf):
websiteExport = WebsiteExport(self.config, self.styles_dir, outputf, report=True)
websiteExport.export(pkg)
def export_text(self, pkg, outputf):
textExport =TextExport(outputf)
textExport.export(pkg)
textExport.save(outputf)
| exelearning/iteexe | exe/export/cmdlineexporter.py | Python | gpl-2.0 | 6,556 | 0.002289 |
#!/usr/bin/python
#Pyxis and Original Sipie: Sirius Command Line Player
#Copyright (C) Corey Ling, Eli Criffield
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from StreamHandler import StreamHandler
class Player(object):
def __init__(self, opts):
self.streamHandler = StreamHandler(opts)
def play(self, url, stream):
self.streamHandler.play(url, stream)
def playing(self):
return self.streamHandler.playing();
def close(self):
self.streamHandler.close()
| ebruck/pyxis | pyxis/Player.py | Python | gpl-2.0 | 1,159 | 0.014668 |
from plata.payment.modules import cod
from django.shortcuts import redirect
from feincms.content.application.models import app_reverse
class CodPaymentProcessor(cod.PaymentProcessor):
def redirect(self, url_name):
return redirect(app_reverse(url_name,
'simpleshop.urls')) | sbaechler/simpleshop | simpleshop/payment_modules.py | Python | bsd-3-clause | 320 | 0.00625 |
#!/bin/python3
def aVeryBigSum(n, ar):
return sum(ar)
n = int(input().strip())
ar = list(map(int, input().strip().split(' ')))
result = aVeryBigSum(n, ar)
print(result)
| sazzadBuet08/programming-contest | hackar_rank/infolytx_mock_hackar_rank/ABigSum.py | Python | apache-2.0 | 176 | 0 |
## mostly copied from: http://norvig.com/spell-correct.html
import sys, random
import re, collections, time
TXT_FILE='';
BUF_DIR='';
NWORDS=None;
def words(text): return re.findall('[a-z]+', text)
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get)
#######################################################################################
if __name__ == '__main__':
TXT_FILE = sys.argv[1]
t0 = time.clock()
o_words = words(file(TXT_FILE).read())
NWORDS = train(o_words)
#print time.clock() - t0, " seconds build time"
#print "dictionary size: %d" %len(NWORDS)
et1 = time.clock() - t0
t_count = 10
rl = o_words[0:t_count] #random.sample(o_words, t_count)
orl = [''.join(random.sample(word, len(word))) for word in o_words]
t1 = time.clock()
r_count = 10
for i in range(0, r_count):
for w1, w2 in zip(rl, orl):
correct(w1); correct(w2)
et2 = (time.clock() - t1)/t_count/r_count/2
print '%d\t%f\t%f' %(len(NWORDS), et1, et2)
#######################################################################################
print 'Done'
| xulesc/spellchecker | impl1.py | Python | gpl-3.0 | 1,898 | 0.029505 |
# -*- coding: utf-8 -*-
import time
import re
from cStringIO import StringIO
from Sycamore import wikiutil
from Sycamore import config
from Sycamore import wikidb
from Sycamore import user
from Sycamore.Page import Page
def execute(macro, args, formatter=None):
if not formatter:
formatter = macro.formatter
request = macro.request
if args:
# personalized stats
htmltext = []
theuser = user.User(macro.request, name=args.lower())
wiki_info = theuser.getWikiInfo()
if not wiki_info.first_edit_date:
first_edit_date = "<em>unknown</em>"
else:
first_edit_date = request.user.getFormattedDateTime(
wiki_info.first_edit_date)
created_count = wiki_info.created_count
edit_count = wiki_info.edit_count
file_count = wiki_info.file_count
last_page_edited = wiki_info.last_page_edited
last_edit_date = wiki_info.last_edit_date
if not last_edit_date:
last_edit_date = "<em>unknown</em>"
else:
last_edit_date = request.user.getFormattedDateTime(last_edit_date)
if last_page_edited:
htmltext.append(
'<p><h2>%s\'s Statistics</h2></p>'
'<table width=100%% border=0><tr>'
'<td><b>Edits </b></td>'
'<td><b>Pages Created </b></td>'
'<td><b>Files Contributed </b></td>'
'<td><b>First Edit Date </b></td>'
'<td><b>Last Edit </b></td>'
'<td><b>Last Page Edited </b></td></tr>' % args)
htmltext.append('<tr>'
'<td>%s</td><td>%s</td><td>%s</td><td>%s</td>'
'<td>%s</td><td>%s</td>'
'</tr></table>' %
(edit_count, created_count, file_count,
first_edit_date, last_edit_date,
Page(last_page_edited, request).link_to()))
elif edit_count or wiki_info.first_edit_date:
htmltext.append('<p><h2>%s\'s Statistics</h2></p>'
'<table width=100%% border=0><tr>'
'<td><b>Edits </b></td>'
'<td><b>Pages Created </b></td>'
'<td><b>Files Contributed </b></td>'
'<td><b>First Edit Date </b></td>'
'<td><b>Last Edit </b></td>'
'<td><b>Last Page Edited </b></td>'
'</tr>' % args)
htmltext.append('<tr>'
'<td>%s</td><td>%s</td><td>%s</td><td>%s</td>'
'<td>%s</td><td> </td>'
'</tr></table>' %
(edit_count, created_count, file_count,
first_edit_date, last_edit_date))
else:
htmltext.append('<p>' + macro.formatter.highlight(1) +
'The user "%s" has not edited this wiki.' % args +
macro.formatter.highlight(0) + '</p>')
else:
htmltext = []
sort_by = 'edit_count'
if macro.request.form.has_key('sort_by'):
sort_by = macro.request.form['sort_by'][0]
# this is to prevent SQL exploits
if sort_by not in ['edit_count', 'created_count',
'first_edit_date', 'file_count',
'last_edit_date']:
sort_by = 'edit_count'
list = []
cursor = macro.request.cursor
if sort_by == 'first_edit_date':
cursor.execute(
"""SELECT users.propercased_name, userWikiInfo.first_edit_date,
userWikiInfo.created_count, userWikiInfo.edit_count,
userWikiInfo.file_count,
userWikiInfo.last_page_edited,
userWikiInfo.last_edit_date,
userWikiInfo.first_edit_date IS NULL AS join_isnull
FROM userWikiInfo, users
WHERE users.name !='' and userWikiInfo.edit_count >= 0 and
users.name=userWikiInfo.user_name and
userWikiInfo.wiki_id=%%(wiki_id)s
ORDER BY join_isnull ASC, %s DESC""" % sort_by,
{'wiki_id':macro.request.config.wiki_id})
elif sort_by == 'last_edit_date':
cursor.execute(
"""SELECT users.propercased_name, userWikiInfo.first_edit_date,
userWikiInfo.created_count, userWikiInfo.edit_count,
userWikiInfo.file_count,
userWikiInfo.last_page_edited,
userWikiInfo.last_edit_date,
userWikiInfo.last_edit_date IS NULL AS edit_isnull
FROM users, userWikiInfo
WHERE users.name !='' and userWikiInfo.edit_count >= 0 and
users.name=userWikiInfo.user_name and
userWikiInfo.wiki_id=%%(wiki_id)s
ORDER BY edit_isnull ASC, %s DESC""" % sort_by,
{'wiki_id':macro.request.config.wiki_id})
else:
cursor.execute(
"""SELECT users.propercased_name, userWikiInfo.first_edit_date,
userWikiInfo.created_count, userWikiInfo.edit_count,
userWikiInfo.file_count,
userWikiInfo.last_page_edited,
userWikiInfo.last_edit_date
FROM users, userWikiInfo
WHERE users.name !='' and userWikiInfo.edit_count >= 0 and
users.name=userWikiInfo.user_name and
userWikiInfo.wiki_id=%%(wiki_id)s
ORDER BY %s DESC""" % sort_by,
{'wiki_id':macro.request.config.wiki_id})
user_stats = cursor.fetchall()
page = Page("User Statistics", request)
htmltext.append('<p><h2>All User Statistics</h2></p>'
'<table width=100%% border=0><tr>'
'<td><b>User</b></td><td><b>%s </b></td>'
'<td><b>%s </b></td>'
'<td><b>%s </b></td>'
'<td><b>%s </b></td>'
'<td><b>%s </b></td>'
'<td><b>Last Page Edited </b></td>'
'</tr>' %
(page.link_to(know_status=True,
know_status_exists=True,
querystr="sort_by=edit_count",
text="Edits"),
page.link_to(know_status=True,
know_status_exists=True,
querystr="sort_by=created_count",
text="Pages Created"),
page.link_to(know_status=True,
know_status_exists=True,
querystr="sort_by=file_count",
text="Files Contributed"),
page.link_to(know_status=True,
know_status_exists=True,
querystr="sort_by=first_edit_date",
text="First Edit Date"),
page.link_to(know_status=True,
know_status_exists=True,
querystr="sort_by=last_edit_date",
text="Last Edit")))
toggle = -1
for result in user_stats:
toggle = toggle*(-1)
name = result[0]
first_edit_date = result[1]
# older system sometimes didn't log this/hard to tell
if not first_edit_date:
first_edit_date = '<em>unknown</em>'
else:
first_edit_date = request.user.getFormattedDateTime(
first_edit_date)
created_count = result[2]
edit_count = result[3]
file_count = result[4]
last_page_edited = result[5]
last_edit_date = result[6]
if not last_edit_date:
last_edit_date = '<em>unknown</em>'
else:
last_edit_date = request.user.getFormattedDateTime(
last_edit_date)
# we don't user User objects here because there's a hell of a
# lot of users, potentally
if toggle < 0:
if last_page_edited:
htmltext.append('<tr bgcolor="#E5E5E5">'
'<td>%s</td><td>%s</td><td>%s</td>'
'<td>%s</td><td>%s</td><td>%s</td>'
'<td>%s</td></tr>' %
(Page(config.user_page_prefix + name,
request).link_to(
know_status=True,
know_status_exists=True,
text=name),
edit_count, created_count, file_count,
first_edit_date, last_edit_date,
Page(last_page_edited,
request).link_to()))
else:
htmltext.append('<tr bgcolor="#E5E5E5">'
'<td>%s</td><td>%s</td><td>%s</td>'
'<td>%s</td><td>%s</td><td>%s</td>'
'<td> </td>'
'</tr>' %
(Page(config.user_page_prefix + name,
request).link_to(
know_status=True,
know_status_exists=True,
text=name),
edit_count, created_count, file_count,
first_edit_date, last_edit_date))
else:
if last_page_edited:
htmltext.append('<tr bgcolor="#E0FFFF">'
'<td>%s</a></td><td>%s</td><td>%s</td>'
'<td>%s</td><td>%s</td><td>%s</td>'
'<td>%s</td></tr>' %
(Page(config.user_page_prefix + name,
request).link_to(
know_status=True,
know_status_exists=True,
text=name),
edit_count, created_count, file_count,
first_edit_date, last_edit_date,
Page(last_page_edited,
request).link_to()))
else:
htmltext.append('<tr bgcolor="#E0FFFF"><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td> </td></tr>' % (Page(config.user_page_prefix + name, request).link_to(know_status=True, know_status_exists=True, text=name),edit_count,created_count,file_count,first_edit_date,last_edit_date))
htmltext.append('</table>')
return macro.formatter.rawHTML(u''.join(htmltext))
def compare_edit(x,y):
if int(x.getAttribute("edit_count")) == int(y.getAttribute("edit_count")):
return 0
elif int(x.getAttribute("edit_count")) < int(y.getAttribute("edit_count")):
return 1
else:
return -1
| rtucker/sycamore | Sycamore/macro/allusers.py | Python | gpl-2.0 | 12,303 | 0.003658 |
from __future__ import print_function, division, unicode_literals
from pprint import pprint
from itertools import groupby
from functools import wraps
from collections import namedtuple, deque
# OrderedDict was added in 2.7. ibm6 still uses python2.6
try:
from collections import OrderedDict
except ImportError:
from .ordereddict import OrderedDict
def group_entries_bylocus(entries):
d = {}
for e in entries:
if e.locus not in d:
d[e.locus] = [e]
else:
d[e.locus].append(e)
return d
class Entry(namedtuple("Entry", "vname, ptr, action, size, file, func, line, tot_memory, sidx")):
@classmethod
def from_line(cls, line, sidx):
args = line.split()
args.append(sidx)
return cls(*args)
def __new__(cls, *args):
"""Extends the base class adding type conversion of arguments."""
# write(logunt,'(a,t60,a,1x,2(i0,1x),2(a,1x),2(i0,1x))')&
# trim(vname), trim(act), addr, isize, trim(basename(file)), trim(func), line, memtot_abi%memory
return super(cls, Entry).__new__(cls,
vname=args[0],
action=args[1],
ptr=int(args[2]),
size=int(args[3]),
file=args[4],
func=args[5],
line=int(args[6]),
tot_memory=int(args[7]),
sidx=args[8],
)
def __repr__(self):
return self.as_repr(with_addr=True)
def as_repr(self, with_addr=True):
if with_addr:
return "<var=%s, %s@%s:%s:%s, addr=%s, size=%d, idx=%d>" % (
self.vname, self.action, self.file, self.func, self.line, hex(self.ptr), self.size, self.sidx)
else:
return "<var=%s, %s@%s:%s:%s, size=%d, idx=%d>" % (
self.vname, self.action, self.file, self.func, self.line, self.size, self.sidx)
@property
def basename(self):
return self.vname.split("%")[-1]
@property
def isalloc(self):
"""True if entry represents an allocation."""
return self.action == "A"
@property
def isfree(self):
"""True if entry represents a deallocation."""
return self.action == "D"
@property
def iszerosized(self):
"""True if this is a zero-sized alloc/free."""
return self.size == 0
@property
def locus(self):
"""This is almost unique"""
return self.func + "@" + self.file
def frees_onheap(self, other):
if (not self.isfree) or other.isalloc: return False
if self.size + other.size != 0: return False
return True
def frees_onstack(self, other):
if (not self.isfree) or other.isalloc: return False
if self.size + other.size != 0: return False
if self.locus != other.locus: return False
return True
class Heap(dict):
def show(self):
print("=== HEAP OF LEN %s ===" % len(self))
if not self: return
# for p, elist in self.items():
pprint(self, indent=4)
print("")
def pop_alloc(self, entry):
if not entry.isfree: return 0
elist = self.get[entry.ptr]
if elist is None: return 0
for i, olde in elist:
if entry.size + olde.size != 0:
elist.pop(i)
return 1
return 0
class Stack(dict):
def show(self):
print("=== STACK OF LEN %s ===)" % len(self))
if not self: return
pprint(self)
print("")
def catchall(method):
@wraps(method)
def wrapper(*args, **kwargs):
self = args[0]
try:
return method(*args, **kwargs)
except Exception as exc:
# Add info on file and re-raise.
msg = "Exception while parsing file: %s\n" % self.path
raise exc.__class__(msg + str(exc))
return wrapper
class AbimemParser(object):
def __init__(self, path):
self.path = path
#def __str__(self):
# lines = []
# app = lines.append
# return "\n".join(lines)
@catchall
def summarize(self):
with open(self.path, "rt") as fh:
l = fh.read()
print(l)
@catchall
def find_small_allocs(self, nbytes=160):
"""Zero sized allocations are not counted."""
smalles = []
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
e = Entry.from_line(line, lineno)
if not e.isalloc: continue
if 0 < e.size <= nbytes: smalles.append(e)
pprint(smalles)
return smalles
@catchall
def find_intensive(self, threshold=2000):
d = {}
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
e = Entry.from_line(line, lineno)
loc = e.locus
if loc not in d:
d[loc] = [e]
else:
d[loc].append(e)
# Remove entries below the threshold and perform DSU sort
dsu_list = [(elist, len(elist)) for _, elist in d.items() if len(elist) >= threshold]
intensive = [t[0] for t in sorted(dsu_list, key=lambda x: x[1], reverse=True)]
for elist in intensive:
loc = elist[0].locus
# assert all(e.locus == loc for e in elist)
print("[%s] has %s allocations/frees" % (loc, len(elist)))
return intensive
#def show_peaks(self):
@catchall
def find_zerosized(self):
elist = []
eapp = elist.append
for e in self.yield_all_entries():
if e.size == 0: eapp(e)
if elist:
print("Found %d zero-sized entries:" % len(elist))
pprint(elist)
else:
print("No zero-sized found")
return elist
@catchall
def find_weird_ptrs(self):
elist = []
eapp = elist.append
for e in self.yield_all_entries():
if e.ptr <= 0: eapp(e)
if elist:
print("Found %d weird entries:" % len(elist))
pprint(elist)
else:
print("No weird entries found")
return elist
def yield_all_entries(self):
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
yield Entry.from_line(line, lineno)
@catchall
def find_peaks(self, maxlen=20):
# the deque is bounded to the specified maximum length. Once a bounded length deque is full,
# when new items are added, a corresponding number of items are discarded from the opposite end.
peaks = deque(maxlen=maxlen)
for e in self.yield_all_entries():
size = e.size
if size == 0 or not e.isalloc: continue
if len(peaks) == 0:
peaks.append(e); continue
# TODO: Should remove redondant entries.
if size > peaks[0].size:
peaks.append(e)
peaks = deque(sorted(peaks, key=lambda x: x.size), maxlen=maxlen)
peaks = deque(sorted(peaks, key=lambda x: x.size, reverse=True), maxlen=maxlen)
for peak in peaks:
print(peak)
return peaks
@catchall
def plot_memory_usage(self, show=True):
memory = [e.tot_memory for e in self.yield_all_entries()]
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(memory)
if show: plt.show()
return fig
#def get_dataframe(self):
# import pandas as pd
# frame = pd.DataFrame()
# return frame
@catchall
def find_memleaks(self):
heap, stack = Heap(), Stack()
reallocs = []
with open(self.path, "rt") as fh:
for lineno, line in enumerate(fh):
if lineno == 0: continue
newe = Entry.from_line(line, lineno)
p = newe.ptr
if newe.size == 0: continue
# Store new entry in list if the ptr is not in d
# else we check if there's an allocation that matches a previous allocation
# (zero-sized arrays are not included)
# else there's a possible memory leak or some undected problems.
if p not in heap:
if newe.isalloc:
heap[p] = [newe]
else:
# Likely reallocation
reallocs.append(newe)
else:
if newe.isfree and len(heap[p]) == 1 and heap[p][0].size + newe.size == 0:
heap.pop(p)
else:
# In principle this should never happen but there are exceptions:
#
# 1) The compiler could decide to put the allocatable on the stack
# In this case the ptr reported by gfortran is 0.
#
# 2) The allocatable variable is "reallocated" by the compiler (F2003).
# Example:
#
# allocate(foo(2,1)) ! p0 = &foo
# foo = reshape([0,0], [2,1]) ! p1 = &foo. Reallocation of the LHS.
# ! Use foo(:) to avoid that
# deallocate(foo) ! p2 = &foo
#
# In this case, p2 != p0
#print("WARN:", newe.ptr, newe, "ptr already on the heap")
#print("HEAP:", heap[newe.ptr])
locus = newe.locus
if locus not in stack:
stack[locus] = [newe]
else:
#if newe.ptr != 0: print(newe)
stack_loc = stack[locus]
ifind = -1
for i, olde in enumerate(stack_loc):
if newe.frees_onstack(olde):
ifind = i
break
if ifind != -1:
stack_loc.pop(ifind)
#else:
# print(newe)
#if p == 0:
# stack[p] = newe
#else:
# print("varname", newe.vname, "in heap with size ",newe.size)
# for weirde in heap[p]:
# print("\tweird entry:", weirde)
# heap[p].append(newe)
if False and heap:
# Possible memory leaks.
count = -1
keyfunc = lambda e: abs(e.size)
for a, entries in heap.items():
count += 1
entries = [e for e in entries if e.size != 0]
entries = sorted(entries, key=keyfunc)
#if any(int(e.size) != 0 for e in l):
#msizes = []
for key, group in groupby(entries, keyfunc):
group = list(group)
#print([e.name for e in g])
pos_size = [e for e in group if e.size >0]
neg_size = [e for e in group if e.size <0]
if len(pos_size) != len(neg_size):
print("key", key)
for e in group:
print(e)
#print(list(g))
#for i, e in enumerate(entries):
# print("\t[%d]" % i, e)
#print("Count=%d" % count, 60 * "=")
if heap: heap.show()
if stack: stack.show()
if reallocs:
print("Possible reallocations:")
pprint(reallocs)
return len(heap) + len(stack) + len(reallocs)
| jmbeuken/abinit | tests/pymods/memprof.py | Python | gpl-3.0 | 12,121 | 0.00693 |
from datetime import datetime
from csv import DictReader
from django.core.management.base import BaseCommand, CommandError
from property.models import Property, Owner, MailingAddress, Assessment, Building, Sale
class Command(BaseCommand):
help = 'Imports property from CSV file'
def add_arguments(self, parser):
parser.add_argument('path')
parser.add_argument('--assessment-date',
dest='adate',
default=False,
help='Assessment date for the document (yyyy-mm-dd format)')
def convert_to_float(self, string):
try:
value = float(string.strip(' ').replace('$', '').replace(',',''))
except ValueError:
value = None
return value
def handle(self, *args, **options):
file_path = options['path']
adate = datetime.strptime(options['adate'], '%Y-%m-%d')
f = open(file_path)
items = DictReader(f)
for d in items:
print('Checking for existing property with ID #{0} at {1} {2}'.format(d['Account Number'],
d['Street Number'],
d['Street Name']))
(p, created) = Property.objects.get_or_create(account_number=d['Account Number'])
if created:
print('Created new property')
else:
print('Updating existing property')
p.account_number=d['Account Number']
p.street_number=d['Street Number']
p.street=d['Street Name']
p.city='Bangor'
p.state='Maine'
p.map_lot=d['Map/Lot']
p.book_page_1=d['Book & Page']
p.save()
a,created = Assessment.objects.get_or_create(assoc_property=p, date=adate)
if created:
print('Adding assessment for {0}'.format(adate.year))
a.land=self.convert_to_float(d['Land Value'])
a.building=self.convert_to_float(d['Building Value'])
a.exemption=self.convert_to_float(d['Exemption'])
a.tax_amount=self.convert_to_float(d['Tax Amount'])
a.date=adate
a.save()
o, created = Owner.objects.get_or_create(name=d["Owner's Name"])
try:
o.name_2=d["Owner's Name Part 2"]
o.save()
except:
pass
p.owners.add(o)
p.save()
| Code4Maine/suum | suum/apps/property/management/commands/import_property_csv.py | Python | bsd-3-clause | 2,559 | 0.010942 |
import urllib2
import re
JIRA_URL='https://bugreports.qt-project.org/browse'
class JIRA:
__instance__ = None
# Helper class
class Bug:
CREATOR = 'QTCREATORBUG'
SIMULATOR = 'QTSIM'
SDK = 'QTSDK'
QT = 'QTBUG'
QT_QUICKCOMPONENTS = 'QTCOMPONENTS'
# constructor of JIRA
def __init__(self, number, bugType=Bug.CREATOR):
if JIRA.__instance__ == None:
JIRA.__instance__ = JIRA.__impl(number, bugType)
JIRA.__dict__['_JIRA__instance__'] = JIRA.__instance__
else:
JIRA.__instance__._bugType = bugType
JIRA.__instance__._number = number
JIRA.__instance__.__fetchStatusAndResolutionFromJira__()
# overriden to make it possible to use JIRA just like the
# underlying implementation (__impl)
def __getattr__(self, attr):
return getattr(self.__instance__, attr)
# overriden to make it possible to use JIRA just like the
# underlying implementation (__impl)
def __setattr__(self, attr, value):
return setattr(self.__instance__, attr, value)
# function to get an instance of the singleton
@staticmethod
def getInstance():
if '_JIRA__instance__' in JIRA.__dict__:
return JIRA.__instance__
else:
return JIRA.__impl(0, Bug.CREATOR)
# function to check if the given bug is open or not
@staticmethod
def isBugStillOpen(number, bugType=Bug.CREATOR):
tmpJIRA = JIRA(number, bugType)
return tmpJIRA.isOpen()
# function similar to performWorkaroundForBug - but it will execute the
# workaround (function) only if the bug is still open
# returns True if the workaround function has been executed, False otherwise
@staticmethod
def performWorkaroundIfStillOpen(number, bugType=Bug.CREATOR, *args):
if JIRA.isBugStillOpen(number, bugType):
return JIRA.performWorkaroundForBug(number, bugType, *args)
else:
test.warning("Bug is closed... skipping workaround!",
"You should remove potential code inside performWorkaroundForBug()")
return False
# function that performs the workaround (function) for the given bug
# if the function needs additional arguments pass them as 3rd parameter
@staticmethod
def performWorkaroundForBug(number, bugType=Bug.CREATOR, *args):
functionToCall = JIRA.getInstance().__bugs__.get("%s-%d" % (bugType, number), None)
if functionToCall:
test.warning("Using workaround for %s-%d" % (bugType, number))
functionToCall(*args)
return True
else:
JIRA.getInstance()._exitFatal_(bugType, number)
return False
# implementation of JIRA singleton
class __impl:
# constructor of __impl
def __init__(self, number, bugType):
self._number = number
self._bugType = bugType
self._localOnly = os.getenv("SYSTEST_JIRA_NO_LOOKUP")=="1"
self.__initBugDict__()
self.__fetchStatusAndResolutionFromJira__()
# function to retrieve the status of the current bug
def getStatus(self):
return self._status
# function to retrieve the resolution of the current bug
def getResolution(self):
return self._resolution
# this function checks the resolution of the given bug
# and returns True if the bug can still be assumed as 'Open' and False otherwise
def isOpen(self):
# handle special cases
if self._resolution == None:
return True
if self._resolution in ('Duplicate', 'Moved', 'Incomplete', 'Cannot Reproduce', 'Invalid'):
test.warning("Resolution of bug is '%s' - assuming 'Open' for now." % self._resolution,
"Please check the bugreport manually and update this test.")
return True
return self._resolution != 'Done'
# this function tries to fetch the status and resolution from JIRA for the given bug
# if this isn't possible or the lookup is disabled it does only check the internal
# dict whether a function for the given bug is deposited or not
def __fetchStatusAndResolutionFromJira__(self):
global JIRA_URL
data = None
if not self._localOnly:
try:
bugReport = urllib2.urlopen('%s/%s-%d' % (JIRA_URL, self._bugType, self._number))
data = bugReport.read()
except:
data = self.__tryExternalTools__()
if data == None:
test.warning("Sorry, ssl module missing - cannot fetch data via HTTPS",
"Try to install the ssl module by yourself, or set the python "
"path inside SQUISHDIR/etc/paths.ini to use a python version with "
"ssl support OR install wget or curl to get rid of this warning!")
self._localOnly = True
if data == None:
if '%s-%d' % (self._bugType, self._number) in self.__bugs__:
test.warning("Using internal dict - bug status could have changed already",
"Please check manually!")
self._status = None
self._resolution = None
return
else:
test.fatal("No workaround function deposited for %s-%d" % (self._bugType, self._number))
self._resolution = 'Done'
return
else:
data = data.replace("\r", "").replace("\n", "")
resPattern = re.compile('<span\s+id="resolution-val".*?>(?P<resolution>.*?)</span>')
statPattern = re.compile('<span\s+id="status-val".*?>(.*?<img.*?>)?(?P<status>.*?)</span>')
status = statPattern.search(data)
resolution = resPattern.search(data)
if status:
self._status = status.group("status").strip()
else:
test.fatal("FATAL: Cannot get status of bugreport %s-%d" % (self._bugType, self._number),
"Looks like JIRA has changed.... Please verify!")
self._status = None
if resolution:
self._resolution = resolution.group("resolution").strip()
else:
test.fatal("FATAL: Cannot get resolution of bugreport %s-%d" % (self._bugType, self._number),
"Looks like JIRA has changed.... Please verify!")
self._resolution = None
# simple helper function - used as fallback if python has no ssl support
# tries to find curl or wget in PATH and fetches data with it instead of
# using urllib2
def __tryExternalTools__(self):
global JIRA_URL
cmdAndArgs = { 'curl':'-k', 'wget':'-qO-' }
for call in cmdAndArgs:
prog = which(call)
if prog:
return getOutputFromCmdline('"%s" %s %s/%s-%d' % (prog, cmdAndArgs[call], JIRA_URL, self._bugType, self._number))
return None
# this function initializes the bug dict for localOnly usage and
# for later lookup which function to call for which bug
# ALWAYS update this dict when adding a new function for a workaround!
def __initBugDict__(self):
self.__bugs__= {
'QTCREATORBUG-6853':self._workaroundCreator6853_,
'QTCREATORBUG-6918':self._workaroundCreator_MacEditorFocus_,
'QTCREATORBUG-6953':self._workaroundCreator_MacEditorFocus_,
'QTCREATORBUG-6994':self._workaroundCreator6994_,
'QTCREATORBUG-7002':self._workaroundCreator7002_
}
# helper function - will be called if no workaround for the requested bug is deposited
def _exitFatal_(self, bugType, number):
test.fatal("No workaround found for bug %s-%d" % (bugType, number))
############### functions that hold workarounds #################################
def _workaroundCreator6994_(self, *args):
if args[0] in ('Mobile Qt Application', 'Qt Gui Application', 'Qt Custom Designer Widget'):
args[1].remove('Harmattan')
test.xverify(False, "Removed Harmattan from expected targets.")
def _workaroundCreator6853_(self, *args):
if "Release" in args[0] and platform.system() == "Linux":
snooze(1)
def _workaroundCreator_MacEditorFocus_(self, *args):
editor = args[0]
nativeMouseClick(editor.mapToGlobal(QPoint(50, 50)).x, editor.mapToGlobal(QPoint(50, 50)).y, Qt.LeftButton)
def _workaroundCreator7002_(self, *args):
if platform.system() in ("Linux", "Darwin"):
result = args[0]
result.append(QtQuickConstants.Targets.EMBEDDED_LINUX)
| hdweiss/qt-creator-visualizer | tests/system/shared/workarounds.py | Python | lgpl-2.1 | 9,260 | 0.005508 |
from decimal import Decimal
from unittest import TestCase
from StringIO import StringIO
import simplejson as json
class TestDecimal(TestCase):
NUMS = "1.0", "10.00", "1.1", "1234567890.1234567890", "500"
def dumps(self, obj, **kw):
sio = StringIO()
json.dump(obj, sio, **kw)
res = json.dumps(obj, **kw)
self.assertEquals(res, sio.getvalue())
return res
def loads(self, s, **kw):
sio = StringIO(s)
res = json.loads(s, **kw)
self.assertEquals(res, json.load(sio, **kw))
return res
def test_decimal_encode(self):
for d in map(Decimal, self.NUMS):
self.assertEquals(self.dumps(d, use_decimal=True), str(d))
def test_decimal_decode(self):
for s in self.NUMS:
self.assertEquals(self.loads(s, parse_float=Decimal), Decimal(s))
def test_decimal_roundtrip(self):
for d in map(Decimal, self.NUMS):
# The type might not be the same (int and Decimal) but they
# should still compare equal.
self.assertEquals(
self.loads(
self.dumps(d, use_decimal=True), parse_float=Decimal),
d)
self.assertEquals(
self.loads(
self.dumps([d], use_decimal=True), parse_float=Decimal),
[d])
def test_decimal_defaults(self):
d = Decimal(1)
sio = StringIO()
# use_decimal=False is the default
self.assertRaises(TypeError, json.dumps, d, use_decimal=False)
self.assertRaises(TypeError, json.dumps, d)
self.assertRaises(TypeError, json.dump, d, sio, use_decimal=False)
self.assertRaises(TypeError, json.dump, d, sio) | geary/claslite | web/app/lib/simplejson/tests/test_decimal.py | Python | unlicense | 1,752 | 0.002854 |
"""OriginalityReports API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class OriginalityReportsAPI(BaseCanvasAPI):
"""OriginalityReports API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for OriginalityReportsAPI."""
super(OriginalityReportsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.OriginalityReportsAPI")
def create_originality_report(
self,
assignment_id,
originality_report_originality_score,
submission_id,
originality_report_attempt=None,
originality_report_error_message=None,
originality_report_file_id=None,
originality_report_originality_report_file_id=None,
originality_report_originality_report_url=None,
originality_report_tool_setting_resource_type_code=None,
originality_report_tool_setting_resource_url=None,
originality_report_workflow_state=None,
):
"""
Create an Originality Report.
Create a new OriginalityReport for the specified file
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - assignment_id
"""
ID
"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - submission_id
"""
ID
"""
path["submission_id"] = submission_id
# OPTIONAL - originality_report[file_id]
"""
The id of the file being given an originality score. Required
if creating a report associated with a file.
"""
if originality_report_file_id is not None:
data["originality_report[file_id]"] = originality_report_file_id
# REQUIRED - originality_report[originality_score]
"""
A number between 0 and 100 representing the measure of the
specified file's originality.
"""
data[
"originality_report[originality_score]"
] = originality_report_originality_score
# OPTIONAL - originality_report[originality_report_url]
"""
The URL where the originality report for the specified
file may be found.
"""
if originality_report_originality_report_url is not None:
data[
"originality_report[originality_report_url]"
] = originality_report_originality_report_url
# OPTIONAL - originality_report[originality_report_file_id]
"""
The ID of the file within Canvas that contains the originality
report for the submitted file provided in the request URL.
"""
if originality_report_originality_report_file_id is not None:
data[
"originality_report[originality_report_file_id]"
] = originality_report_originality_report_file_id
# OPTIONAL - originality_report[tool_setting][resource_type_code]
"""
The resource type code of the resource handler Canvas should use for the
LTI launch for viewing originality reports. If set Canvas will launch
to the message with type 'basic-lti-launch-request' in the specified
resource handler rather than using the originality_report_url.
"""
if originality_report_tool_setting_resource_type_code is not None:
data[
"originality_report[tool_setting][resource_type_code]"
] = originality_report_tool_setting_resource_type_code
# OPTIONAL - originality_report[tool_setting][resource_url]
"""
The URL Canvas should launch to when showing an LTI originality report.
Note that this value is inferred from the specified resource handler's
message "path" value (See `resource_type_code`) unless
it is specified. If this parameter is used a `resource_type_code`
must also be specified.
"""
if originality_report_tool_setting_resource_url is not None:
data[
"originality_report[tool_setting][resource_url]"
] = originality_report_tool_setting_resource_url
# OPTIONAL - originality_report[workflow_state]
"""
May be set to "pending", "error", or "scored". If an originality score
is provided a workflow state of "scored" will be inferred.
"""
if originality_report_workflow_state is not None:
data[
"originality_report[workflow_state]"
] = originality_report_workflow_state
# OPTIONAL - originality_report[error_message]
"""
A message describing the error. If set, the "workflow_state"
will be set to "error."
"""
if originality_report_error_message is not None:
data["originality_report[error_message]"] = originality_report_error_message
# OPTIONAL - originality_report[attempt]
"""
If no `file_id` is given, and no file is required for the assignment
(that is, the assignment allows an online text entry), this parameter
may be given to clarify which attempt number the report is for (in the
case of resubmissions). If this field is omitted and no `file_id` is
given, the report will be created (or updated, if it exists) for the
first submission attempt with no associated file.
"""
if originality_report_attempt is not None:
data["originality_report[attempt]"] = originality_report_attempt
self.logger.debug(
"POST /api/lti/assignments/{assignment_id}/submissions/{submission_id}/originality_report with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/lti/assignments/{assignment_id}/submissions/{submission_id}/originality_report".format(
**path
),
data=data,
params=params,
single_item=True,
)
def edit_originality_report_submissions(
self,
assignment_id,
id,
submission_id,
originality_report_error_message=None,
originality_report_originality_report_file_id=None,
originality_report_originality_report_url=None,
originality_report_originality_score=None,
originality_report_tool_setting_resource_type_code=None,
originality_report_tool_setting_resource_url=None,
originality_report_workflow_state=None,
):
"""
Edit an Originality Report.
Modify an existing originality report. An alternative to this endpoint is
to POST the same parameters listed below to the CREATE endpoint.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - assignment_id
"""
ID
"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - submission_id
"""
ID
"""
path["submission_id"] = submission_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# OPTIONAL - originality_report[originality_score]
"""
A number between 0 and 100 representing the measure of the
specified file's originality.
"""
if originality_report_originality_score is not None:
data[
"originality_report[originality_score]"
] = originality_report_originality_score
# OPTIONAL - originality_report[originality_report_url]
"""
The URL where the originality report for the specified
file may be found.
"""
if originality_report_originality_report_url is not None:
data[
"originality_report[originality_report_url]"
] = originality_report_originality_report_url
# OPTIONAL - originality_report[originality_report_file_id]
"""
The ID of the file within Canvas that contains the originality
report for the submitted file provided in the request URL.
"""
if originality_report_originality_report_file_id is not None:
data[
"originality_report[originality_report_file_id]"
] = originality_report_originality_report_file_id
# OPTIONAL - originality_report[tool_setting][resource_type_code]
"""
The resource type code of the resource handler Canvas should use for the
LTI launch for viewing originality reports. If set Canvas will launch
to the message with type 'basic-lti-launch-request' in the specified
resource handler rather than using the originality_report_url.
"""
if originality_report_tool_setting_resource_type_code is not None:
data[
"originality_report[tool_setting][resource_type_code]"
] = originality_report_tool_setting_resource_type_code
# OPTIONAL - originality_report[tool_setting][resource_url]
"""
The URL Canvas should launch to when showing an LTI originality report.
Note that this value is inferred from the specified resource handler's
message "path" value (See `resource_type_code`) unless
it is specified. If this parameter is used a `resource_type_code`
must also be specified.
"""
if originality_report_tool_setting_resource_url is not None:
data[
"originality_report[tool_setting][resource_url]"
] = originality_report_tool_setting_resource_url
# OPTIONAL - originality_report[workflow_state]
"""
May be set to "pending", "error", or "scored". If an originality score
is provided a workflow state of "scored" will be inferred.
"""
if originality_report_workflow_state is not None:
data[
"originality_report[workflow_state]"
] = originality_report_workflow_state
# OPTIONAL - originality_report[error_message]
"""
A message describing the error. If set, the "workflow_state"
will be set to "error."
"""
if originality_report_error_message is not None:
data["originality_report[error_message]"] = originality_report_error_message
self.logger.debug(
"PUT /api/lti/assignments/{assignment_id}/submissions/{submission_id}/originality_report/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/lti/assignments/{assignment_id}/submissions/{submission_id}/originality_report/{id}".format(
**path
),
data=data,
params=params,
single_item=True,
)
def edit_originality_report_files(
self,
assignment_id,
file_id,
originality_report_error_message=None,
originality_report_originality_report_file_id=None,
originality_report_originality_report_url=None,
originality_report_originality_score=None,
originality_report_tool_setting_resource_type_code=None,
originality_report_tool_setting_resource_url=None,
originality_report_workflow_state=None,
):
"""
Edit an Originality Report.
Modify an existing originality report. An alternative to this endpoint is
to POST the same parameters listed below to the CREATE endpoint.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - assignment_id
"""
ID
"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - file_id
"""
ID
"""
path["file_id"] = file_id
# OPTIONAL - originality_report[originality_score]
"""
A number between 0 and 100 representing the measure of the
specified file's originality.
"""
if originality_report_originality_score is not None:
data[
"originality_report[originality_score]"
] = originality_report_originality_score
# OPTIONAL - originality_report[originality_report_url]
"""
The URL where the originality report for the specified
file may be found.
"""
if originality_report_originality_report_url is not None:
data[
"originality_report[originality_report_url]"
] = originality_report_originality_report_url
# OPTIONAL - originality_report[originality_report_file_id]
"""
The ID of the file within Canvas that contains the originality
report for the submitted file provided in the request URL.
"""
if originality_report_originality_report_file_id is not None:
data[
"originality_report[originality_report_file_id]"
] = originality_report_originality_report_file_id
# OPTIONAL - originality_report[tool_setting][resource_type_code]
"""
The resource type code of the resource handler Canvas should use for the
LTI launch for viewing originality reports. If set Canvas will launch
to the message with type 'basic-lti-launch-request' in the specified
resource handler rather than using the originality_report_url.
"""
if originality_report_tool_setting_resource_type_code is not None:
data[
"originality_report[tool_setting][resource_type_code]"
] = originality_report_tool_setting_resource_type_code
# OPTIONAL - originality_report[tool_setting][resource_url]
"""
The URL Canvas should launch to when showing an LTI originality report.
Note that this value is inferred from the specified resource handler's
message "path" value (See `resource_type_code`) unless
it is specified. If this parameter is used a `resource_type_code`
must also be specified.
"""
if originality_report_tool_setting_resource_url is not None:
data[
"originality_report[tool_setting][resource_url]"
] = originality_report_tool_setting_resource_url
# OPTIONAL - originality_report[workflow_state]
"""
May be set to "pending", "error", or "scored". If an originality score
is provided a workflow state of "scored" will be inferred.
"""
if originality_report_workflow_state is not None:
data[
"originality_report[workflow_state]"
] = originality_report_workflow_state
# OPTIONAL - originality_report[error_message]
"""
A message describing the error. If set, the "workflow_state"
will be set to "error."
"""
if originality_report_error_message is not None:
data["originality_report[error_message]"] = originality_report_error_message
self.logger.debug(
"PUT /api/lti/assignments/{assignment_id}/files/{file_id}/originality_report with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/lti/assignments/{assignment_id}/files/{file_id}/originality_report".format(
**path
),
data=data,
params=params,
single_item=True,
)
def show_originality_report_submissions(self, assignment_id, id, submission_id):
"""
Show an Originality Report.
Get a single originality report
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - assignment_id
"""
ID
"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - submission_id
"""
ID
"""
path["submission_id"] = submission_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"GET /api/lti/assignments/{assignment_id}/submissions/{submission_id}/originality_report/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/lti/assignments/{assignment_id}/submissions/{submission_id}/originality_report/{id}".format(
**path
),
data=data,
params=params,
single_item=True,
)
def show_originality_report_files(self, assignment_id, file_id):
"""
Show an Originality Report.
Get a single originality report
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - assignment_id
"""
ID
"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - file_id
"""
ID
"""
path["file_id"] = file_id
self.logger.debug(
"GET /api/lti/assignments/{assignment_id}/files/{file_id}/originality_report with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/lti/assignments/{assignment_id}/files/{file_id}/originality_report".format(
**path
),
data=data,
params=params,
single_item=True,
)
class Toolsetting(BaseModel):
"""Toolsetting Model."""
def __init__(self, resource_type_code=None, resource_url=None):
"""Init method for Toolsetting class."""
self._resource_type_code = resource_type_code
self._resource_url = resource_url
self.logger = logging.getLogger("py3canvas.Toolsetting")
@property
def resource_type_code(self):
"""the resource type code of the resource handler to use to display originality reports."""
return self._resource_type_code
@resource_type_code.setter
def resource_type_code(self, value):
"""Setter for resource_type_code property."""
self.logger.warn(
"Setting values on resource_type_code will NOT update the remote Canvas instance."
)
self._resource_type_code = value
@property
def resource_url(self):
"""a URL that may be used to override the launch URL inferred by the specified resource_type_code. If used a 'resource_type_code' must also be specified."""
return self._resource_url
@resource_url.setter
def resource_url(self, value):
"""Setter for resource_url property."""
self.logger.warn(
"Setting values on resource_url will NOT update the remote Canvas instance."
)
self._resource_url = value
class Originalityreport(BaseModel):
"""Originalityreport Model."""
def __init__(
self,
id=None,
file_id=None,
originality_score=None,
originality_report_file_id=None,
originality_report_url=None,
tool_setting=None,
error_report=None,
submission_time=None,
root_account_id=None,
):
"""Init method for Originalityreport class."""
self._id = id
self._file_id = file_id
self._originality_score = originality_score
self._originality_report_file_id = originality_report_file_id
self._originality_report_url = originality_report_url
self._tool_setting = tool_setting
self._error_report = error_report
self._submission_time = submission_time
self._root_account_id = root_account_id
self.logger = logging.getLogger("py3canvas.Originalityreport")
@property
def id(self):
"""The id of the OriginalityReport."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def file_id(self):
"""The id of the file receiving the originality score."""
return self._file_id
@file_id.setter
def file_id(self, value):
"""Setter for file_id property."""
self.logger.warn(
"Setting values on file_id will NOT update the remote Canvas instance."
)
self._file_id = value
@property
def originality_score(self):
"""A number between 0 and 100 representing the originality score."""
return self._originality_score
@originality_score.setter
def originality_score(self, value):
"""Setter for originality_score property."""
self.logger.warn(
"Setting values on originality_score will NOT update the remote Canvas instance."
)
self._originality_score = value
@property
def originality_report_file_id(self):
"""The ID of the file within Canvas containing the originality report document (if provided)."""
return self._originality_report_file_id
@originality_report_file_id.setter
def originality_report_file_id(self, value):
"""Setter for originality_report_file_id property."""
self.logger.warn(
"Setting values on originality_report_file_id will NOT update the remote Canvas instance."
)
self._originality_report_file_id = value
@property
def originality_report_url(self):
"""A non-LTI launch URL where the originality score of the file may be found."""
return self._originality_report_url
@originality_report_url.setter
def originality_report_url(self, value):
"""Setter for originality_report_url property."""
self.logger.warn(
"Setting values on originality_report_url will NOT update the remote Canvas instance."
)
self._originality_report_url = value
@property
def tool_setting(self):
"""A ToolSetting object containing optional 'resource_type_code' and 'resource_url'."""
return self._tool_setting
@tool_setting.setter
def tool_setting(self, value):
"""Setter for tool_setting property."""
self.logger.warn(
"Setting values on tool_setting will NOT update the remote Canvas instance."
)
self._tool_setting = value
@property
def error_report(self):
"""A message describing the error. If set, the workflow_state will become 'error.'."""
return self._error_report
@error_report.setter
def error_report(self, value):
"""Setter for error_report property."""
self.logger.warn(
"Setting values on error_report will NOT update the remote Canvas instance."
)
self._error_report = value
@property
def submission_time(self):
"""The submitted_at date time of the submission."""
return self._submission_time
@submission_time.setter
def submission_time(self, value):
"""Setter for submission_time property."""
self.logger.warn(
"Setting values on submission_time will NOT update the remote Canvas instance."
)
self._submission_time = value
@property
def root_account_id(self):
"""The id of the root Account associated with the OriginalityReport."""
return self._root_account_id
@root_account_id.setter
def root_account_id(self, value):
"""Setter for root_account_id property."""
self.logger.warn(
"Setting values on root_account_id will NOT update the remote Canvas instance."
)
self._root_account_id = value
| tylerclair/py3canvas | py3canvas/apis/originality_reports.py | Python | mit | 24,351 | 0.001766 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 2017 at 14:20
@author: Mathias Aschwanden (mathias.aschwanden@gmail.com)
Makes various Variables available to the user.
IMPORTANT: All data has been included without warranty, express or implied.
References:
Molar Masses : From Wikipedia.org
"""
from . import ur
from . import entities as bs_entities
# VARIABLES
carbon = bs_entities.Variable('C', molar_mass=)
carbon_dioxide = bs_entites.Variable('CO2', molar_mass=)
methane = bs_entites.Variable('CH4', molar_mass=)
phosphate = bs_entities.Variable('PO4', molar_mass=94.9714*ur.gram/ur.mole)
phosphorus = bs_entities.Variable('P', molar_mass=)
nitrate = bs_entities.Variable('NO3', molar_mass=62.00*ur.gram/ur.mole)
nitrogen = bs_entities.Variable('P', molar_mass=)
# PROCESSES
# REACTIONS
# BOXES
# SYSTEMS
| maschwanden/boxsimu | boxsimu/builtins.py | Python | mit | 834 | 0.002398 |
# This is your project's main settings file that can be committed to your
# repo. If you need to override a setting locally, use local.py
import dj_database_url
from funfactory.settings_base import *
# Django Settings
##############################################################################
# Note: be sure not to put any spaces in the env var
ADMINS = [('admin', email) for email in
os.environ.get('ADMIN_EMAILS', '').split(',')]
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'localhost')
SERVER_EMAIL= os.environ.get('SERVER_EMAIL', 'root@localhost')
ROOT_URLCONF = 'nucleus.urls'
# Whether the app should run in debug-mode.
DEBUG = os.environ.get('DJANGO_DEBUG', False)
# Configure database from DATABASE_URL environment variable.
DATABASES = {'default': dj_database_url.config()}
# Pull secret keys from environment.
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '')
HMAC_KEYS = {'hmac_key': os.environ.get('DJANGO_HMAC_KEY', '')}
INSTALLED_APPS = [
# Nucleus and API apps.
'nucleus.base',
'rna',
# Django contrib apps.
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
# Third-party apps, patches, fixes.
'south', # Must come before django_nose.
'commonware.response.cookies',
'django_browserid',
'django_extensions',
'django_nose',
'funfactory',
'pagedown',
'rest_framework',
'rest_framework.authtoken',
'session_csrf',
]
AUTHENTICATION_BACKENDS = [
'django_browserid.auth.BrowserIDBackend',
'django.contrib.auth.backends.ModelBackend',
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'funfactory.context_processors.globals',
'django_browserid.context_processors.browserid',
)
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'multidb.middleware.PinningRouterMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'session_csrf.CsrfMiddleware', # Must be after auth middleware.
'django.contrib.messages.middleware.MessageMiddleware',
'commonware.middleware.FrameOptionsHeader',
)
LOGGING = {
'loggers': {
'playdoh': {
'level': logging.DEBUG
}
}
}
USE_TZ = True
# Needed for request.is_secure to work with stackato.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Third-party Libary Settings
##############################################################################
# Testing configuration.
NOSE_ARGS = ['--logging-clear-handlers', '--logging-filter=-south']
# Template paths that contain non-Jinja templates.
JINGO_EXCLUDE_APPS = (
'admin',
'registration',
'rest_framework',
'rna',
'browserid',
)
# Always generate a CSRF token for anonymous users.
ANON_ALWAYS = True
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rna.serializers.HyperlinkedModelSerializerWithPkField',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': ('rna.filters.TimestampedFilterBackend',)
}
# django-browserid -- no spaces allowed in stackato env vars
BROWSERID_AUDIENCES = os.environ.get('BROWSERID_AUDIENCES',
'http://localhost:8000').split(',')
# Nucleus-specific Settings
##############################################################################
# Should robots.txt deny everything or disallow a calculated list of URLs we
# don't want to be crawled? Default is false, disallow everything.
ENGAGE_ROBOTS = False
# RNA (Release Notes) Configuration
RNA = {
'BASE_URL': os.environ.get(
'RNA_BASE_URL', 'https://nucleus.mozilla.org/rna/'),
'LEGACY_API': os.environ.get('RNA_LEGACY_API', False)
}
| jgmize/nucleus | nucleus/settings/base.py | Python | bsd-3-clause | 4,534 | 0.000221 |
from makerUtilities import writeFile
from makerUtilities import readFile
import os
def scaffold(systemDir, defaultTheme):
return (
"""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<script src='file://"""
+ os.path.join(systemDir, "jquery.min.js")
+ """'></script>
<style type="text/css">
html {
background: -webkit-gradient(linear, left top, left bottom, from(#000), to(rgb(93,94,120)));
background-attachment:fixed;
}
body {
font-family: "Helvetica Neue";
font-size: 14px;
width:auto;
/* max-width:694px; */
color:#fff;
padding:20px 20px;
-webkit-transform: perspective( 600px );
}
a {
color: #ddd;
}
.thumbnail a {
text-decoration:none;
color:#000;
cursor:default;
}
p {
font-weight:lighter;
color:#fff;
letter-spacing:0.09em;
float:left;
font-size:0.9em;
line-height:1.45em;
text-align:left;
margin:-6px 0px 24px 10px;
}
h5 {
font-weight:lighter;
letter-spacing:0.050em;
margin:-28px 0px 0px 8px;
line-height:3em;
font-size:22px;
cursor:default;
}
img {
border:1px solid #333;
width:100%;
height:100%;
-webkit-box-reflect: below 0px -webkit-gradient(linear, left top, left bottom, from(transparent), color-stop(50%, transparent), to(rgba(0,0,0,0.2)));
-webkit-transform: perspective( 600px ) rotateY( 0deg);
margin-bottom:40px;
}
.row {
width:100%;
margin:0px 0px 40px 10px;
float:left;
clear:both;
}
.thumbnail {
width:17%;
padding:20px 20px 10px 20px;
margin:0px 20px 0px 0px;
float:left;
clear:right;
background:none;
}
.thumbnail img {
height:100px;
}
.thumbnail p {
text-align:center;
margin:-24px 0px 0px 0px;
width:100%;
font-size:14px;
cursor:default;
}
.thumbnail.selected {
border:1px solid #777;
padding:20px 20px 10px 20px;
-webkit-border-radius:10px;
background: -webkit-gradient(linear, left top, left bottom, from(rgba(140,140,140,0.1)), to(rgba(170,170,170,0.2)));
}
.info {
width:92%;
float:left;
clear:both;
display:none;
margin:40px 10px 0px 10px;
}
.info p {
float:left;
clear:right;
cursor:default;
}
.info img {
width:280px;
height:auto;
float:left;
clear:right;
margin:0px 48px 0px 8px;
-webkit-transform: perspective( 600px ) rotateY( 10deg );
/*
-webkit-transition: width, 0.5s;
*/
}
/*
.info img:hover {
width:320px;
-webkit-transform: perspective( 600px ) rotateY( 0deg );
}
*/
.info h5 {
margin-top:0px;
}
.info h5, p {
width:380px;
float:left;
}
a.button {
cursor:default;
color:#000;
}
a.button:active {
color:#000;
background: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#bbb));
}
</style>
<script type="text/javascript">
$(document).ready(function(){
$('#"""
+ defaultTheme
+ """').addClass('selected');
$('#info-"""
+ defaultTheme
+ """').show();
$('.thumbnail').click(function(){
$('.info').hide();
$('.thumbnail').removeClass('selected')
$(this).addClass('selected');
$($(this).data('info')).show();
});
});
</script>
</head>
<body>
"""
+ createThumbnails(systemDir)
+ createInfo(systemDir)
+ """
</body>
</html>
"""
)
def buildView(systemDir, viewPath):
writeFile(
os.path.join(viewPath, "yourTemplates.html"),
scaffold(systemDir, defaultTemplate()),
)
return os.path.join(viewPath, "yourTemplates.html")
def defaultTemplate():
# ===========================================================================
# This is used to set the default template for the application
# ===========================================================================
return "Simple-Markdown"
def createThumbnails(systemDir):
thumbnails = "<div class='row'>\n"
for template in os.listdir(os.path.join(systemDir, "templates")):
if not template.startswith("."):
thumbnails += makeThumbnail(systemDir, template)
thumbnails += "</div>"
return thumbnails
def createInfo(systemDir):
info = "<div class='row'>\n"
for template in os.listdir(os.path.join(systemDir, "templates")):
if not template.startswith("."):
s = readFile(
os.path.join(systemDir, "templates", template, "parts", "info.json")
)
data = eval(s)
info += makeInfo(systemDir, template, data)
info += "</div>"
return info
def makeInfo(systemDir, templateName, data):
previewImage = os.path.join(
systemDir, "templates", templateName, "parts/preview.jpg"
)
info = (
"""
<div class="info" id="info-"""
+ data["Title"]
+ """">
<img src='"""
+ previewImage
+ """' />
<h5>"""
+ data["Title"]
+ """</h5>
<p>"""
+ data["Description"]
+ """<br /><br />
Credit: """
+ data["Credit"]
+ """<br />
Support: <a href='"""
+ data["Support"]
+ """'>www.makercms.org</a><br />
</p>
</div>
"""
)
return info
def makeThumbnail(systemDir, templateName):
previewImage = os.path.join(
systemDir, "templates", templateName, "parts/preview.jpg"
)
thumbnail = (
"""
<div class='thumbnail' id='"""
+ templateName
+ """' data-info='#info-"""
+ templateName
+ """'>
<a href='--"""
+ templateName
+ """--'>
<img src='"""
+ previewImage
+ """' />
<p>"""
+ templateName
+ """</p></a>
</div>
"""
)
return thumbnail
| geraldspreer/the-maker | makerTemplateViewBuilder.py | Python | gpl-3.0 | 7,622 | 0.009315 |
import re
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.sql.sqltypes import DateTime
from alembic import autogenerate
from alembic.migration import MigrationContext
from alembic.testing import eq_
from alembic.testing import TestBase
from alembic.testing.suite._autogen_fixtures import _default_include_object
from alembic.testing.suite._autogen_fixtures import AutogenTest
from alembic.testing.suite._autogen_fixtures import ModelOne
class AutogenerateDiffTest(ModelOne, AutogenTest, TestBase):
__only_on__ = "sqlite"
def test_render_nothing(self):
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
"compare_type": True,
"compare_server_default": True,
"target_metadata": self.m1,
"upgrade_token": "upgrades",
"downgrade_token": "downgrades",
},
)
template_args = {}
autogenerate._render_migration_diffs(context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###""",
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###""",
)
def test_render_nothing_batch(self):
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
"compare_type": True,
"compare_server_default": True,
"target_metadata": self.m1,
"upgrade_token": "upgrades",
"downgrade_token": "downgrades",
"alembic_module_prefix": "op.",
"sqlalchemy_module_prefix": "sa.",
"render_as_batch": True,
"include_symbol": lambda name, schema: False,
},
)
template_args = {}
autogenerate._render_migration_diffs(context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###""",
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###""",
)
def test_render_diffs_standard(self):
"""test a full render including indentation"""
template_args = {}
autogenerate._render_migration_diffs(self.context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=100), nullable=True),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.CheckConstraint('len(description) > 5'),
sa.ForeignKeyConstraint(['order_id'], ['order.order_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('extra')
op.add_column('address', sa.Column('street', sa.String(length=50), \
nullable=True))
op.create_unique_constraint('uq_email', 'address', ['email_address'])
op.add_column('order', sa.Column('user_id', sa.Integer(), nullable=True))
op.alter_column('order', 'amount',
existing_type=sa.NUMERIC(precision=8, scale=2),
type_=sa.Numeric(precision=10, scale=2),
nullable=True,
existing_server_default=sa.text('0'))
op.create_foreign_key(None, 'order', 'user', ['user_id'], ['id'])
op.alter_column('user', 'name',
existing_type=sa.VARCHAR(length=50),
nullable=False)
op.alter_column('user', 'a1',
existing_type=sa.TEXT(),
server_default='x',
existing_nullable=True)
op.drop_index('pw_idx', table_name='user')
op.drop_column('user', 'pw')
# ### end Alembic commands ###""",
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('pw', sa.VARCHAR(length=50), \
nullable=True))
op.create_index('pw_idx', 'user', ['pw'], unique=False)
op.alter_column('user', 'a1',
existing_type=sa.TEXT(),
server_default=None,
existing_nullable=True)
op.alter_column('user', 'name',
existing_type=sa.VARCHAR(length=50),
nullable=True)
op.drop_constraint(None, 'order', type_='foreignkey')
op.alter_column('order', 'amount',
existing_type=sa.Numeric(precision=10, scale=2),
type_=sa.NUMERIC(precision=8, scale=2),
nullable=False,
existing_server_default=sa.text('0'))
op.drop_column('order', 'user_id')
op.drop_constraint('uq_email', 'address', type_='unique')
op.drop_column('address', 'street')
op.create_table('extra',
sa.Column('x', sa.CHAR(), nullable=True),
sa.Column('uid', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['uid'], ['user.id'], )
)
op.drop_table('item')
# ### end Alembic commands ###""",
)
def test_render_diffs_batch(self):
"""test a full render in batch mode including indentation"""
template_args = {}
self.context.opts["render_as_batch"] = True
autogenerate._render_migration_diffs(self.context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=100), nullable=True),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.CheckConstraint('len(description) > 5'),
sa.ForeignKeyConstraint(['order_id'], ['order.order_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('extra')
with op.batch_alter_table('address', schema=None) as batch_op:
batch_op.add_column(sa.Column('street', sa.String(length=50), nullable=True))
batch_op.create_unique_constraint('uq_email', ['email_address'])
with op.batch_alter_table('order', schema=None) as batch_op:
batch_op.add_column(sa.Column('user_id', sa.Integer(), nullable=True))
batch_op.alter_column('amount',
existing_type=sa.NUMERIC(precision=8, scale=2),
type_=sa.Numeric(precision=10, scale=2),
nullable=True,
existing_server_default=sa.text('0'))
batch_op.create_foreign_key(None, 'user', ['user_id'], ['id'])
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.alter_column('name',
existing_type=sa.VARCHAR(length=50),
nullable=False)
batch_op.alter_column('a1',
existing_type=sa.TEXT(),
server_default='x',
existing_nullable=True)
batch_op.drop_index('pw_idx')
batch_op.drop_column('pw')
# ### end Alembic commands ###""", # noqa,
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.add_column(sa.Column('pw', sa.VARCHAR(length=50), nullable=True))
batch_op.create_index('pw_idx', ['pw'], unique=False)
batch_op.alter_column('a1',
existing_type=sa.TEXT(),
server_default=None,
existing_nullable=True)
batch_op.alter_column('name',
existing_type=sa.VARCHAR(length=50),
nullable=True)
with op.batch_alter_table('order', schema=None) as batch_op:
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.alter_column('amount',
existing_type=sa.Numeric(precision=10, scale=2),
type_=sa.NUMERIC(precision=8, scale=2),
nullable=False,
existing_server_default=sa.text('0'))
batch_op.drop_column('user_id')
with op.batch_alter_table('address', schema=None) as batch_op:
batch_op.drop_constraint('uq_email', type_='unique')
batch_op.drop_column('street')
op.create_table('extra',
sa.Column('x', sa.CHAR(), nullable=True),
sa.Column('uid', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['uid'], ['user.id'], )
)
op.drop_table('item')
# ### end Alembic commands ###""", # noqa,
)
def test_imports_maintined(self):
template_args = {}
self.context.opts["render_as_batch"] = True
def render_item(type_, col, autogen_context):
autogen_context.imports.add(
"from mypackage import my_special_import"
)
autogen_context.imports.add("from foobar import bat")
self.context.opts["render_item"] = render_item
autogenerate._render_migration_diffs(self.context, template_args)
eq_(
set(template_args["imports"].split("\n")),
set(
[
"from foobar import bat",
"from mypackage import my_special_import",
]
),
)
class AddColumnOrderTest(AutogenTest, TestBase):
@classmethod
def _get_db_schema(cls):
m = MetaData()
Table(
"user",
m,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
)
return m
@classmethod
def _get_model_schema(cls):
m = MetaData()
Table(
"user",
m,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
Column("username", String(50)),
Column("password_hash", String(32)),
Column("timestamp", DateTime),
)
return m
def test_render_add_columns(self):
"""test #827"""
template_args = {}
autogenerate._render_migration_diffs(self.context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('username', sa.String(length=50), nullable=True))
op.add_column('user', sa.Column('password_hash', sa.String(length=32), nullable=True))
op.add_column('user', sa.Column('timestamp', sa.DateTime(), nullable=True))
# ### end Alembic commands ###""", # noqa E501
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'timestamp')
op.drop_column('user', 'password_hash')
op.drop_column('user', 'username')
# ### end Alembic commands ###""",
)
class AutogenerateDiffTestWSchema(ModelOne, AutogenTest, TestBase):
__only_on__ = "postgresql"
schema = "test_schema"
def test_render_nothing(self):
context = MigrationContext.configure(
connection=self.bind.connect(),
opts={
"compare_type": True,
"compare_server_default": True,
"target_metadata": self.m1,
"upgrade_token": "upgrades",
"downgrade_token": "downgrades",
"alembic_module_prefix": "op.",
"sqlalchemy_module_prefix": "sa.",
"include_object": lambda name, *args: False,
},
)
template_args = {}
autogenerate._render_migration_diffs(context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###""",
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###""",
)
def test_render_diffs_extras(self):
"""test a full render including indentation (include and schema)"""
template_args = {}
self.context.opts.update(
{
"include_object": _default_include_object,
"include_schemas": True,
}
)
autogenerate._render_migration_diffs(self.context, template_args)
eq_(
re.sub(r"u'", "'", template_args["upgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=100), nullable=True),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.CheckConstraint('len(description) > 5'),
sa.ForeignKeyConstraint(['order_id'], ['%(schema)s.order.order_id'], ),
sa.PrimaryKeyConstraint('id'),
schema='%(schema)s'
)
op.drop_table('extra', schema='%(schema)s')
op.add_column('address', sa.Column('street', sa.String(length=50), \
nullable=True), schema='%(schema)s')
op.create_unique_constraint('uq_email', 'address', ['email_address'], \
schema='test_schema')
op.add_column('order', sa.Column('user_id', sa.Integer(), nullable=True), \
schema='%(schema)s')
op.alter_column('order', 'amount',
existing_type=sa.NUMERIC(precision=8, scale=2),
type_=sa.Numeric(precision=10, scale=2),
nullable=True,
existing_server_default=sa.text('0'),
schema='%(schema)s')
op.create_foreign_key(None, 'order', 'user', ['user_id'], ['id'], \
source_schema='%(schema)s', referent_schema='%(schema)s')
op.alter_column('user', 'name',
existing_type=sa.VARCHAR(length=50),
nullable=False,
schema='%(schema)s')
op.alter_column('user', 'a1',
existing_type=sa.TEXT(),
server_default='x',
existing_nullable=True,
schema='%(schema)s')
op.drop_index('pw_idx', table_name='user', schema='test_schema')
op.drop_column('user', 'pw', schema='%(schema)s')
# ### end Alembic commands ###"""
% {"schema": self.schema},
)
eq_(
re.sub(r"u'", "'", template_args["downgrades"]),
"""# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('pw', sa.VARCHAR(length=50), \
autoincrement=False, nullable=True), schema='%(schema)s')
op.create_index('pw_idx', 'user', ['pw'], unique=False, schema='%(schema)s')
op.alter_column('user', 'a1',
existing_type=sa.TEXT(),
server_default=None,
existing_nullable=True,
schema='%(schema)s')
op.alter_column('user', 'name',
existing_type=sa.VARCHAR(length=50),
nullable=True,
schema='%(schema)s')
op.drop_constraint(None, 'order', schema='%(schema)s', type_='foreignkey')
op.alter_column('order', 'amount',
existing_type=sa.Numeric(precision=10, scale=2),
type_=sa.NUMERIC(precision=8, scale=2),
nullable=False,
existing_server_default=sa.text('0'),
schema='%(schema)s')
op.drop_column('order', 'user_id', schema='%(schema)s')
op.drop_constraint('uq_email', 'address', schema='test_schema', type_='unique')
op.drop_column('address', 'street', schema='%(schema)s')
op.create_table('extra',
sa.Column('x', sa.CHAR(length=1), autoincrement=False, nullable=True),
sa.Column('uid', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['uid'], ['%(schema)s.user.id'], \
name='extra_uid_fkey'),
schema='%(schema)s'
)
op.drop_table('item', schema='%(schema)s')
# ### end Alembic commands ###""" # noqa
% {"schema": self.schema},
)
| sqlalchemy/alembic | tests/test_autogen_composition.py | Python | mit | 16,541 | 0 |
"""
base 36 encoding/decoding taken from wikipedia sample code
http://en.wikipedia.org/wiki/Base_36#Python_Conversion_Code
"""
def encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
if number >= 0 and number <= 9:
return alphabet[number]
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def decode(number):
"""Converts a base36 string to an integer."""
return int(number, 36)
| dkm/skylines | skylines/lib/base36.py | Python | agpl-3.0 | 746 | 0 |
from django.contrib import admin
from helpcenter import models
class ArticleAdmin(admin.ModelAdmin):
""" Admin for the Article model """
date_hierarchy = 'time_published'
fieldsets = (
(None, {
'fields': ('category', 'title', 'body')
}),
('Publishing Options', {
'classes': ('collapse',),
'fields': ('draft', 'time_published')
}))
list_display = (
'title', 'category', 'time_published', 'time_edited', 'draft')
search_fields = ('title',)
class CategoryAdmin(admin.ModelAdmin):
""" Admin for the Category model """
fieldsets = (
(None, {
'fields': ('parent', 'title')
}),)
list_display = ('title', 'parent')
search_fields = ('title',)
admin.site.register(models.Article, ArticleAdmin)
admin.site.register(models.Category, CategoryAdmin)
| smalls12/django_helpcenter | helpcenter/admin.py | Python | mit | 883 | 0 |
# Copyright (C) 2013 Bernd Feige
# This file is part of avg_q and released under the GPL v3 (see avg_q/COPYING).
"""
Presentation utilities.
"""
from . import trgfile
class PresLog(object):
# Basic log file reading.
def __init__(self,logfile,part='events'):
'''part can be 'events' or 'trials' for the first or second part'''
self.logfile=logfile
self.log=open(self.logfile,"r")
fileheader=next(self.log).rstrip('\r\n')
if not fileheader.startswith('Scenario -'):
raise Exception("PresLog: File doesn't start with 'Scenario'")
self.scenario=fileheader[11:]
#print("Scenario: %s" % self.scenario)
fileheader2=next(self.log).rstrip('\r\n')
#print("fileheader2: %s" % fileheader2)
if fileheader2.startswith('Logfile written - '):
import datetime
self.timestamp=datetime.datetime.strptime(fileheader2[18:],"%m/%d/%Y %H:%M:%S")
#print(self.timestamp)
else:
self.timestamp=None
table_start=['Subject','Trial'] if part=='events' else ['Event Type']
self.header_fields=None
for line in self.log:
fields=line.rstrip('\r\n').split('\t')
if len(fields)<=1: continue
if self.header_fields is None:
# The first table is skipped...
if fields[0] in table_start:
self.header_fields=fields
self.atstart=True
break
def __iter__(self):
for line in self.log:
fields=line.rstrip('\r\n').split('\t')
if len(fields)<=1:
# Only at the start skip empty line(s)
if self.atstart: continue
else: break
self.atstart=False
yield fields
def __del__(self):
self.close()
def close(self):
if self.log:
self.log.close()
self.log=None
class PresLogfile(trgfile.trgfile):
def __init__(self,logfile,part='events'):
self.PL=PresLog(logfile,part)
trgfile.trgfile.__init__(self,self.PL)
self.preamble['Sfreq']=10000.0
def rdr(self):
for fields in self.reader:
data=dict(zip(self.PL.header_fields,fields))
point=int(data['Time'])
description=data['Event Type']
try:
code=int(data['Code'])
except:
code= -1
description=' '.join([description,data['Code']])
yield (point, code, description)
def close(self):
if self.PL:
self.PL.close()
self.PL=None
def gettuples_abstime(self):
# We are calculating backwards from the time the log was written, which is given
# in local time, and it may happen that a DST switch occurred between start and end.
# Most plots, simply working for a given time from the start, are totally okay if you don't
# mind that the end times are still in the old frame, but since the local time here may
# already be in the new frame we have to correct to achieve this "work-from-start" behavior.
import pytz
tuples=self.gettuples()
sfreq=float(self.preamble.get('Sfreq'))
last_s=pytz.datetime.timedelta(seconds=tuples[-1][0]/sfreq)
tz_aware_end=pytz.timezone('Europe/Berlin').localize(self.PL.timestamp)
# This computes the correct local start time considering a possible DST switch and
# converts it to the TZ-unaware local time we really want...
self.start_datetime=tz_aware_end.tzinfo.normalize(tz_aware_end-last_s).replace(tzinfo=None)
return trgfile.trgfile.gettuples_abstime(self)
| berndf/avg_q | python/avg_q/Presentation.py | Python | gpl-3.0 | 3,161 | 0.043341 |
"""empty message
Revision ID: c626e32ddcc
Revises: None
Create Date: 2016-01-23 14:47:09.205628
"""
# revision identifiers, used by Alembic.
revision = 'c626e32ddcc'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index('ix_roles_default', 'roles', ['default'], unique=False)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('location', sa.String(length=64), nullable=True),
sa.Column('about_me', sa.Text(), nullable=True),
sa.Column('member_since', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('avatar_hash', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_users_email', 'users', ['email'], unique=True)
op.create_index('ix_users_username', 'users', ['username'], unique=True)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_posts_timestamp', 'posts', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_posts_timestamp', 'posts')
op.drop_table('posts')
op.drop_index('ix_users_username', 'users')
op.drop_index('ix_users_email', 'users')
op.drop_table('users')
op.drop_index('ix_roles_default', 'roles')
op.drop_table('roles')
### end Alembic commands ###
| athkishore/vgr | migrations/versions/c626e32ddcc_.py | Python | mit | 2,615 | 0.01262 |
'''
Compute Hilbert Class Polynomials
'''
from mpmath import *
import mpmath
round = lambda x: mpmath.floor(x + 0.5)
def hilbert(d):
'''
Compute Hilbert Class Polynomial.
Follows pseudo code from Algorithm 7.5.8
Args:
d: fundamental discriminant
Returns:
Hilbert class number, Hilbert class polynomial, and all reduced forms
'''
# initialize
t = [1]
b = d % 2
r = floor(sqrt((-d)/3))
h = 0
red = set()
reduced_forms = reduced_form(d) # print h1
a_inverse_sum = sum(1/mpf(form[0]) for form in reduced_forms)
precision = round(pi*sqrt(-d)*a_inverse_sum / log(10)) + 10
mpmath.mp.dps = precision
# outer loop
while b <= r:
m = (b*b - d) / 4
m_sqrt = int(floor(sqrt(m)))
for a in range(1, m_sqrt+1):
if m % a != 0:
continue
c = m/a
if b > a:
continue
# optional polynomial setup
tau = (-b + 1j * sqrt(-d)) / (2*a)
f = power(dedekind_eta(2 * tau, precision) / dedekind_eta(tau, precision), 24)
j = power((256 * f + 1), 3) / f
if b==a or c==a or b==0:
# T = T * (X-j)
t = polynomial_mul(t, [-j, 1])
h += 1
red.add((a, b, c))
else:
poly = [j.real * j.real + j.imag * j.imag, -2 * j.real, 1]
t = polynomial_mul(t, poly)
h += 2
red.add((a, b, c))
red.add((a, -b, c))
b += 2
if red != reduced_forms:
raise ValueError('Reduced form inconsistent.')
return h, [int(floor(mpmath.re(p) + 0.5)) for p in t], red
def reduced_form(d):
'''
Given discriminant D compute its reduced forms. Used to calculate preicion
Args:
d:
Returns:
'''
# initialize
b = d % 2
r = floor(sqrt((-d)/3))
h = 0
red = set()
# outer loop
while b <= r:
m = (b*b - d) / 4
m_sqrt = int(floor(sqrt(m)))
for a in range(1, m_sqrt+1):
if m % a != 0:
continue
c = m / a
if b > a:
continue
# optional polynomial setup
if b==a or c==a or b==0:
# T = T * (X-j)
h += 1
red.add((a, b, c))
else:
h += 2
red.add((a, b, c))
red.add((a, -b, c))
b += 2
return red
def delta(q):
return q
def dedekind_eta(tau, precision):
"""
Implementation of dedekind's eta function.
This implementation follows the idea in NZMATH's implementation
Args:
tau:
precision: The desired position
Returns:
evalution of dedekind's eta function
"""
# a = 2 * mpmath.pi / mpmath.mpf(24)
# b = mpmath.exp(mpmath.mpc(0, a))
x = exp(mpc(0, 2 * pi / mpf(24)))
# b = e^(2pi*i/24)
outer = 1
absolute = 0
# functional equations
while absolute <= 1 - 0.1**5:
real_tau = round(tau.real)
if real_tau != 0:
tau -= real_tau
outer *= x ** real_tau
absolute = fabs(tau)
if absolute > 1 - 0.1**5:
break
ro = sqrt(power(tau, -1)*1j)
# ro = sqrt((tau^-1)*i)
if ro.real < 0:
ro = -ro
outer = outer*ro
tau = (-outer.real + outer.imag*1j) / absolute
#print 'tau=', tau, '\n p =', p
q1 = mpmath.exp((pi/12) * tau * 1j)
q = q1**24
# q = e^(2pi*tau*i)
sum = 1
qs = mpmath.mpc(1, 0)
qn = 1
bound = mpmath.mpf(10)**(-precision-2)
while fabs(qs) > bound:
t = -q*qn*qn*qs
qn *= q
qs = qn*t
sum += t + qs
return outer*q1*sum
# Compare to wolfram alpha the result is correct.
def polynomial_mul(p1, p2):
'''
Used to Compute T = T * (X-j)
'''
if len(p1) == 0 or len(p2) == 0:
raise ValueError('Polynomial Array empty.')
m = [0] * (len(p1) + len(p2) - 1)
for i in range(0, len(p1)):
for j in range(0, len(p2)):
m[i+j] += p1[i] * p2[j]
return m
| root-z/ECPP | hilbert.py | Python | gpl-2.0 | 4,207 | 0.002377 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_custom_forms_async.py
DESCRIPTION:
This sample demonstrates how to analyze a form from a document with a custom
trained model. The form must be of the same type as the forms the custom model
was trained on. To learn how to train your own models, look at
sample_train_model_without_labels_async.py and sample_train_model_with_labels_async.py
The model can be trained using the training files found here:
https://aka.ms/azsdk/formrecognizer/sampletrainingfiles-v3.1
USAGE:
python sample_recognize_custom_forms_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) CUSTOM_TRAINED_MODEL_ID - the ID of your custom trained model
-OR-
CONTAINER_SAS_URL_V2 - The shared access signature (SAS) Url of your Azure Blob Storage container with your forms.
A model will be trained and used to run the sample.
"""
import os
import asyncio
class RecognizeCustomFormsSampleAsync(object):
async def recognize_custom_forms(self, custom_model_id):
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
"..", "..", "..", "./sample_forms/forms/Form_1.jpg"))
# [START recognize_custom_forms_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
model_id = os.getenv("CUSTOM_TRAINED_MODEL_ID", custom_model_id)
async with FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
) as form_recognizer_client:
# Make sure your form's type is included in the list of form types the custom model can recognize
with open(path_to_sample_forms, "rb") as f:
poller = await form_recognizer_client.begin_recognize_custom_forms(
model_id=model_id, form=f, include_field_elements=True
)
forms = await poller.result()
for idx, form in enumerate(forms):
print("--------Recognizing Form #{}--------".format(idx+1))
print("Form has type {}".format(form.form_type))
print("Form has form type confidence {}".format(form.form_type_confidence))
print("Form was analyzed with model with ID {}".format(form.model_id))
for name, field in form.fields.items():
# each field is of type FormField
# label_data is populated if you are using a model trained without labels,
# since the service needs to make predictions for labels if not explicitly given to it.
if field.label_data:
print("...Field '{}' has label '{}' with a confidence score of {}".format(
name,
field.label_data.text,
field.confidence
))
print("...Label '{}' has value '{}' with a confidence score of {}".format(
field.label_data.text if field.label_data else name, field.value, field.confidence
))
# iterate over tables, lines, and selection marks on each page
for page in form.pages:
for i, table in enumerate(page.tables):
print("\nTable {} on page {}".format(i + 1, table.page_number))
for cell in table.cells:
print("...Cell[{}][{}] has text '{}' with confidence {}".format(
cell.row_index, cell.column_index, cell.text, cell.confidence
))
print("\nLines found on page {}".format(page.page_number))
for line in page.lines:
print("...Line '{}' is made up of the following words: ".format(line.text))
for word in line.words:
print("......Word '{}' has a confidence of {}".format(
word.text,
word.confidence
))
if page.selection_marks:
print("\nSelection marks found on page {}".format(page.page_number))
for selection_mark in page.selection_marks:
print("......Selection mark is '{}' and has a confidence of {}".format(
selection_mark.state,
selection_mark.confidence
))
print("-----------------------------------")
# [END recognize_custom_forms_async]
async def main():
sample = RecognizeCustomFormsSampleAsync()
model_id = None
if os.getenv("CONTAINER_SAS_URL_V2"):
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import FormTrainingClient
endpoint = os.getenv("AZURE_FORM_RECOGNIZER_ENDPOINT")
key = os.getenv("AZURE_FORM_RECOGNIZER_KEY")
if not endpoint or not key:
raise ValueError("Please provide endpoint and API key to run the samples.")
form_training_client = FormTrainingClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with form_training_client:
model = await (await form_training_client.begin_training(
os.getenv("CONTAINER_SAS_URL_V2"), use_training_labels=True)).result()
model_id = model.model_id
await sample.recognize_custom_forms(model_id)
if __name__ == '__main__':
asyncio.run(main())
| Azure/azure-sdk-for-python | sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.1/async_samples/sample_recognize_custom_forms_async.py | Python | mit | 6,386 | 0.004071 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('comercial', '0053_auto_20151118_1323'),
]
operations = [
migrations.AddField(
model_name='tipodeproposta',
name='tipo_contrato_mapeado',
field=models.ForeignKey(blank=True, to='comercial.TipodeContratoFechado', null=True),
),
]
| dudanogueira/microerp | microerp/comercial/migrations/0054_tipodeproposta_tipo_contrato_mapeado.py | Python | lgpl-3.0 | 472 | 0.002119 |
from lokp.models import DBSession
from lokp.protocols.activity_protocol import ActivityProtocol
from lokp.review.review import BaseReview
class ActivityReview(BaseReview):
def __init__(self, request):
super(ActivityReview, self).__init__(request)
self.protocol = ActivityProtocol(DBSession)
| CDE-UNIBE/lokp | lokp/review/activities.py | Python | gpl-3.0 | 314 | 0 |
from sys import stdin
import signal
# for i in xrange(1,10):
# print "Stuff", i
# print stdin.readline()
import os
pid = int(stdin.readline().strip())
print pid
os.kill(pid, signal.SIGINT) | sbarton272/StreetPong | IPC/printer.py | Python | apache-2.0 | 195 | 0.005128 |
#
# Test database for rsvndump
# written by Jonas Gehring
#
import os
import test_api
def info():
return "Add after delete test"
def setup(step, log):
if step == 0:
os.mkdir("dir1")
f = open("dir1/file1","wb")
print >>f, "hello1"
print >>f, "hello2"
f = open("dir1/file2","wb")
print >>f, "hello3"
test_api.run("svn", "add", "dir1", output = log)
return True
elif step == 1:
f = open("file1","wb")
print >>f, "hello4"
f = open("file12","wb")
print >>f, "hello5"
test_api.run("svn", "add", "file1", "file12", output = log)
return True
elif step == 2:
test_api.run("svn", "rm", "file1", output=log)
return True
elif step == 3:
f = open("file12","ab")
print >>f, "hello6"
return True
elif step == 4:
test_api.run("svn", "rm", "dir1", output=log)
return True
elif step == 5:
os.mkdir("dir1")
f = open("dir1/file1","wb")
print >>f, "hello7"
f = open("dir1/file2","wb")
print >>f, "hello8"
print >>f, "hello9"
test_api.run("svn", "add", "dir1", output = log)
return True
elif step == 6:
f = open("dir1/file1","ab")
print >>f, "hello10"
return True
else:
return False
# Runs the test
def run(id, args = []):
# Set up the test repository
test_api.setup_repos(id, setup)
odump_path = test_api.dump_original(id)
rdump_path = test_api.dump_rsvndump(id, args)
vdump_path = test_api.dump_reload(id, rdump_path)
return test_api.diff(id, odump_path, vdump_path)
| jgehring/rsvndump | tests/db/tests/delete_add.py | Python | gpl-3.0 | 1,440 | 0.046528 |
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup, find_packages
from b2tob3 import VERSION
with open('README.rst') as f:
README = f.read()
with open('LICENSE') as f:
LICENSE = f.read()
setup(
name='b2tob3',
version=VERSION,
packages=find_packages(),
long_description=README,
license=LICENSE,
author='Ramiro Gómez',
author_email='code@ramiro.org',
description='Help migrate HTML files and templates form bootstrap 2 to 3.',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Text Processing',
],
entry_points={
'console_scripts': [
'b2tob3=b2tob3.b2tob3:main'
]
}
) | metrey/b2tob3 | setup.py | Python | mit | 935 | 0.001071 |
from utils.edit_configs import get_json
class Logger:
config = get_json('server_configs')
log_config = {}
_instances = {}
@staticmethod
def get_singleton(client):
if client.shard_id not in Logger._instances:
Logger._instances[client.shard_id] = Logger()
return Logger._instances[client.shard_id]
@staticmethod
async def update(client):
config = get_json('server_configs')
log_config = {k: v['logging'] for k, v in config.items() if 'logging' in v}
temp = {}
for i in log_config:
ch = client.get_channel(log_config[i])
if ch:
temp[i] = ch
Logger.log_config[client.shard_id] = temp
@staticmethod
async def register_client_events(client):
await Logger.update(client)
@client.async_event
async def on_message_delete(message):
if message.server is not None and message.server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][message.server.id]
to_send = ':x: **Message deleted**: {0.author}: {0.content}'.format(message)
await client.send_message(logs, to_send)
@client.async_event
async def on_message_edit(before, after):
if after.server is not None and after.server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][after.server.id]
if before.content != after.content:
to_send = ':speech_left: **Message edited**: {0.author}: ~~{0.content}~~ | {1.content}'.format(before, after)
await client.send_message(logs, to_send)
@client.async_event
async def on_member_join(member):
if member.server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][member.server.id]
to_send = ':bust_in_silhouette::arrow_right: **User joined**: {0}'.format(member)
await client.send_message(logs, to_send)
@client.async_event
async def on_member_remove(member):
if member.server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][member.server.id]
to_send = ':bust_in_silhouette::arrow_left: **User left**: {0}'.format(member)
await client.send_message(logs, to_send)
@client.async_event
async def on_member_ban(member):
if member.server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][member.server.id]
to_send = ':bust_in_silhouette::x: **User banned**: {0}'.format(member)
await client.send_message(logs, to_send)
@client.async_event
async def on_member_unban(server, user):
if server.id in Logger.log_config[client.shard_id]:
logs = Logger.log_config[client.shard_id][server.id]
to_send = ':bust_in_silhouette::white_check_mark: **User unbanned**: {0}'.format(user)
await client.send_message(logs, to_send)
| initzx/aobot | utils/logger.py | Python | gpl-3.0 | 3,201 | 0.002812 |
"""
This package is a set of utilities and methods for building mime messages.
"""
import uuid
from flanker import _email
from flanker.mime import DecodingError
from flanker.mime.message import ContentType, scanner
from flanker.mime.message.headers import WithParams
from flanker.mime.message.headers.parametrized import fix_content_type
from flanker.mime.message.part import MimePart, Body, Part, adjust_content_type
def multipart(subtype):
return MimePart(
container=Part(
ContentType(
"multipart", subtype, {"boundary": uuid.uuid4().hex})),
is_root=True)
def message_container(message):
part = MimePart(
container=Part(ContentType("message", "rfc822")),
enclosed=message)
message.set_root(False)
return part
def text(subtype, body, charset=None, disposition=None, filename=None):
return MimePart(
container=Body(
content_type=ContentType("text", subtype),
body=body,
charset=charset,
disposition=disposition,
filename=filename),
is_root=True)
def binary(maintype, subtype, body, filename=None,
disposition=None, charset=None, trust_ctype=False):
return MimePart(
container=Body(
content_type=ContentType(maintype, subtype),
trust_ctype=trust_ctype,
body=body,
charset=charset,
disposition=disposition,
filename=filename),
is_root=True)
def attachment(content_type, body, filename=None,
disposition=None, charset=None):
"""Smarter method to build attachments that detects the proper content type
and form of the message based on content type string, body and filename
of the attachment
"""
# fix and sanitize content type string and get main and sub parts:
main, sub = fix_content_type(
content_type, default=('application', 'octet-stream'))
# adjust content type based on body or filename if it's not too accurate
content_type = adjust_content_type(
ContentType(main, sub), body, filename)
if content_type.main == 'message':
try:
message = message_container(from_string(body))
message.headers['Content-Disposition'] = WithParams(disposition)
return message
except DecodingError:
content_type = ContentType('application', 'octet-stream')
return binary(
content_type.main,
content_type.sub,
body, filename,
disposition,
charset, True)
def from_string(string):
return scanner.scan(string)
def from_python(message):
return from_string(_email.message_to_string(message))
def from_message(message):
return from_string(message.to_string())
| mailgun/flanker | flanker/mime/create.py | Python | apache-2.0 | 2,803 | 0 |
from django.conf.urls.defaults import *
urlpatterns = patterns('japos.dashboards.views',
(r'^$', 'index')
) | jyr/japos | dashboards/urls.py | Python | gpl-2.0 | 112 | 0.017857 |
quicksort(A, lo, hi):
if lo < hi:
p := partition(A, lo, hi)
quicksort(A, lo, p - 1)
quicksort(A, p + 1, hi)
| Chasego/codi | util/basic/quicksort.py | Python | mit | 120 | 0.008333 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Common functions
import os, sys
lib_path = os.path.abspath( os.path.join( '..', '..', 'lib' ) )
sys.path.append(lib_path)
from commons import *
from overpasser import *
from routing import *
from feriados import *
from make_json import *
def lower_capitalized(input):
output = lower_capitalized_master(input)
# Specific place names
output = output.replace(u"P. Itapoã", u"Praia de Itapoã")
output = output.replace(u"Beira M", u"Beira Mar").replace(u"Marar", u"Mar")
output = output.replace(u"B. Mar", u"Beira Mar")
output = output.replace(u"C. Itaparica", u"Coqueiral de Itaparica")
output = output.replace(u"Exp.", u"Expedito")
output = output.replace(u"Castelandia", u"Castelândia")
output = output.replace(u"J. Camburi", u"Jardim Camburi")
output = output.replace(u"P. Costa", u"Praia da Costa")
output = output.replace(u"S. Dourada", u"Serra Dourada")
output = output.replace(u"M. Noronha", u"Marcilio de Noronha")
output = output.replace(u"Marcilio de Noronha", u"Marcílio de Noronha")
return output.strip()
def getLines():
downloadURL = "https://sistemas.es.gov.br/webservices/ceturb/onibus/api/ConsultaLinha?Tipo_Linha=Seletivo"
routes = []
myJSON = None
r = False
while r == False:
try:
r = requests.get(downloadURL, timeout=30)
except requests.exceptions.ReadTimeout as e:
r = False
except requests.exceptions.ConnectionError as e:
r = False
try:
myJSON = json.dumps(json.loads(r.content))
except:
r = False
station = [ None, None ]
for i in json.loads(myJSON):
# if i["Terminal_Seq"] == 1:
# station[0] = i["Dest_Terminal"]
# if i["Terminal_Seq"] == 2:
# station[1] = i["Dest_Terminal"]
routes.append( [ str(int(i[u"Linha"])), lower_capitalized(unicode(i[u"Descricao"])) ] )
return routes
| Skippern/PDF-scraper-Lorenzutti | creators/seletivo/common.py | Python | gpl-3.0 | 1,983 | 0.013138 |
'''
This case can not execute parallelly.
This case will calculate max available VMs base on 1 host available disk space.
The it will try to create all VMs at the same time to see if zstack could
handle it.
@author: Youyk
'''
import os
import sys
import threading
import time
import random
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstacklib.utils.sizeunit as sizeunit
import apibinding.inventory as inventory
_config_ = {
'timeout' : 1000,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
original_rate = None
new_offering_uuid = None
exc_info = []
def parallelly_create_vm(vm_name, image_name, host_uuid, disk_offering_uuid):
try:
vm = test_stub.create_vm(vm_name = vm_name, \
image_name = image_name, \
host_uuid = host_uuid, \
disk_offering_uuids = [disk_offering_uuid])
test_obj_dict.add_vm(vm)
except Exception as e:
exc_info.append(sys.exc_info())
def check_thread_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
raise info1, None, info2
def test():
global original_rate
global new_offering_uuid
global delete_policy
test_util.test_dsc('Test memory allocation and reclaiming.')
cond = res_ops.gen_query_conditions('state', '=', 'Enabled')
cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond)
hosts = res_ops.query_resource_with_num(res_ops.HOST, cond)
if not hosts:
test_util.test_skip('No Enabled/Connected host was found, skip test.' )
return True
ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond)
if len(ps) > 1:
test_util.test_skip('multiple Enabled/Connected primary storage was found, skip test.' )
if len(ps) == 0:
test_util.test_skip('No Enabled/Connected primary storage was found, skip test.' )
return True
if ps[0].type != inventory.LOCAL_STORAGE_TYPE:
test_util.test_skip('skip test if PS is not local storage.' )
return True
host = random.choice(hosts)
ps = ps[0]
over_provision_rate = 1
target_vm_num = 5
host_res = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0]
avail_cap = host_res.availableCapacity
image_name = os.environ.get('imageName_net')
image = test_lib.lib_get_image_by_name(image_name)
image_size = image.size
original_rate = test_lib.lib_set_provision_storage_rate(over_provision_rate)
data_volume_size = int(avail_cap / target_vm_num * over_provision_rate - image_size)
if data_volume_size < 0:
test_util.test_skip('Do not have enough disk space to do test')
return True
delete_policy = test_lib.lib_set_delete_policy('vm', 'Direct')
delete_policy = test_lib.lib_set_delete_policy('volume', 'Direct')
host_res = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0]
avail_cap = host_res.availableCapacity
disk_offering_option = test_util.DiskOfferingOption()
disk_offering_option.set_name('vm-parallel-creation-test')
disk_offering_option.set_diskSize(data_volume_size)
data_volume_offering = vol_ops.create_volume_offering(disk_offering_option)
test_obj_dict.add_disk_offering(data_volume_offering)
rounds = 1
while (rounds <= 3):
times = 1
test_util.test_logger('test round: %s' % rounds)
while (times <= (target_vm_num)):
thread = threading.Thread(target = parallelly_create_vm, \
args = ('parallel_vm_creating_%d' % times, \
image_name, \
host.uuid, \
data_volume_offering.uuid, ))
thread.start()
times += 1
times = 1
print 'Running VM: %s ' % len(test_obj_dict.get_vm_list())
while threading.active_count() > 1:
check_thread_exception()
time.sleep(1)
if times > 60:
test_util.test_fail('creating vm time exceed 60s')
times += 1
check_thread_exception()
try:
vm = test_stub.create_vm(vm_name = 'unexpected vm', \
image_name = image_name, \
host_uuid = host.uuid)
test_obj_dict.add_vm(vm)
except:
test_util.test_logger('expect vm creation failure')
else:
test_util.test_fail('The extra vm is unexpected to be created up')
for vm in test_obj_dict.get_all_vm_list():
try:
test_lib.lib_destroy_vm_and_data_volumes_objs_update_test_dict(vm, test_obj_dict)
except Exception as e:
test_util.test_logger("VM Destroying Failure in vm parallel creation test. :%s " % e)
raise e
rounds += 1
test_lib.lib_set_provision_storage_rate(original_rate)
test_lib.lib_robot_cleanup(test_obj_dict)
test_lib.lib_set_delete_policy('vm', delete_policy)
test_lib.lib_set_delete_policy('volume', delete_policy)
test_util.test_pass('Parallel vm creation Test Pass')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
if original_rate:
test_lib.lib_set_provision_storage_rate(original_rate)
test_lib.lib_set_delete_policy('vm', delete_policy)
test_lib.lib_set_delete_policy('volume', delete_policy)
| zstackorg/zstack-woodpecker | integrationtest/vm/virt_plus/other/test_parallel_crt_vm_to_use_all_disk.py | Python | apache-2.0 | 5,980 | 0.009532 |
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Application import Application
from UM.Message import Message
from UM.Version import Version
from UM.Logger import Logger
from UM.Job import Job
import urllib.request
import platform
import json
import codecs
from UM.i18n import i18nCatalog
i18n_catalog = i18nCatalog("uranium")
## This job checks if there is an update available on the provided URL.
class UpdateCheckerJob(Job):
def __init__(self, silent = False, url = None, callback = None, set_download_url_callback = None):
super().__init__()
self.silent = silent
self._url = url
self._callback = callback
self._set_download_url_callback = set_download_url_callback
def run(self):
if not self._url:
Logger.log("e", "Can not check for a new release. URL not set!")
no_new_version = True
application_name = Application.getInstance().getApplicationName()
Logger.log("i", "Checking for new version of %s" % application_name)
try:
headers = {"User-Agent": "%s - %s" % (application_name, Application.getInstance().getVersion())}
request = urllib.request.Request(self._url, headers = headers)
latest_version_file = urllib.request.urlopen(request)
except Exception as e:
Logger.log("w", "Failed to check for new version: %s" % e)
if not self.silent:
Message(i18n_catalog.i18nc("@info", "Could not access update information."),
title = i18n_catalog.i18nc("@info:title", "Version Upgrade")
).show()
return
try:
reader = codecs.getreader("utf-8")
data = json.load(reader(latest_version_file))
try:
if Application.getInstance().getVersion() is not "master":
local_version = Version(Application.getInstance().getVersion())
else:
if not self.silent:
Message(i18n_catalog.i18nc("@info", "The version you are using does not support checking for updates."), title = i18n_catalog.i18nc("@info:title", "Warning")).show()
return
except ValueError:
Logger.log("w", "Could not determine application version from string %s, not checking for updates", Application.getInstance().getVersion())
if not self.silent:
Message(i18n_catalog.i18nc("@info", "The version you are using does not support checking for updates."), title = i18n_catalog.i18nc("@info:title", "Version Upgrade")).show()
return
if application_name in data:
for key, value in data[application_name].items():
if "major" in value and "minor" in value and "revision" in value and "url" in value:
os = key
if platform.system() == os: #TODO: add architecture check
newest_version = Version([int(value["major"]), int(value["minor"]), int(value["revision"])])
if local_version < newest_version:
Logger.log("i", "Found a new version of the software. Spawning message")
message = Message(i18n_catalog.i18nc("@info", "A new version is available!"), title = i18n_catalog.i18nc("@info:title", "Version Upgrade"))
message.addAction("download", i18n_catalog.i18nc("@action:button", "Download"), "[no_icon]", "[no_description]")
if self._set_download_url_callback:
self._set_download_url_callback(value["url"])
message.actionTriggered.connect(self._callback)
message.show()
no_new_version = False
break
else:
Logger.log("w", "Could not find version information or download url for update.")
else:
Logger.log("w", "Did not find any version information for %s." % application_name)
except Exception:
Logger.logException("e", "Exception in update checker while parsing the JSON file.")
Message(i18n_catalog.i18nc("@info", "An exception occurred while checking for updates."), title = i18n_catalog.i18nc("@info:title", "Error")).show()
no_new_version = False # Just to suppress the message below.
if no_new_version and not self.silent:
Message(i18n_catalog.i18nc("@info", "No new version was found."), title = i18n_catalog.i18nc("@info:title", "Version Upgrade")).show()
| thopiekar/Uranium | plugins/UpdateChecker/UpdateCheckerJob.py | Python | lgpl-3.0 | 4,818 | 0.009755 |
f_source = open('../talker/workfile', 'w')
f_gold = open('../listener/workfile', 'w')
for i in range(100000):
f_source.write('0123456789abcdef')
f_gold.write('0123456789abcdef')
| ncos/hometasks | Lunev/programming/star_1/stable/tests/file_generator.py | Python | mit | 193 | 0.010363 |
from django.contrib import admin
from . import models
| sachinkum/Bal-Aveksha | WebServer/Authentications/admin.py | Python | gpl-3.0 | 54 | 0 |
from test_support import verbose, TestFailed
if verbose:
print "Testing whether compiler catches assignment to __debug__"
try:
compile('__debug__ = 1', '?', 'single')
except SyntaxError:
pass
import __builtin__
prev = __builtin__.__debug__
setattr(__builtin__, '__debug__', 'sure')
setattr(__builtin__, '__debug__', prev)
if verbose:
print 'Running tests on argument handling'
try:
exec 'def f(a, a): pass'
raise TestFailed, "duplicate arguments"
except SyntaxError:
pass
try:
exec 'def f(a = 0, a = 1): pass'
raise TestFailed, "duplicate keyword arguments"
except SyntaxError:
pass
try:
exec 'def f(a): global a; a = 1'
raise TestFailed, "variable is global and local"
except SyntaxError:
pass
if verbose:
print "testing complex args"
def comp_args((a, b)):
print a,b
comp_args((1, 2))
def comp_args((a, b)=(3, 4)):
print a, b
comp_args((1, 2))
comp_args()
def comp_args(a, (b, c)):
print a, b, c
comp_args(1, (2, 3))
def comp_args(a=2, (b, c)=(3, 4)):
print a, b, c
comp_args(1, (2, 3))
comp_args()
try:
exec 'def f(a=1, (b, c)): pass'
raise TestFailed, "non-default args after default"
except SyntaxError:
pass
if verbose:
print "testing bad float literals"
def expect_error(s):
try:
eval(s)
raise TestFailed("%r accepted" % s)
except SyntaxError:
pass
expect_error("2e")
expect_error("2.0e+")
expect_error("1e-")
expect_error("3-4e/21")
if verbose:
print "testing literals with leading zeroes"
def expect_same(test_source, expected):
got = eval(test_source)
if got != expected:
raise TestFailed("eval(%r) gave %r, but expected %r" %
(test_source, got, expected))
expect_error("077787")
expect_error("0xj")
expect_error("0x.")
expect_error("0e")
expect_same("0777", 511)
expect_same("0777L", 511)
expect_same("000777", 511)
expect_same("0xff", 255)
expect_same("0xffL", 255)
expect_same("0XfF", 255)
expect_same("0777.", 777)
expect_same("0777.0", 777)
expect_same("000000000000000000000000000000000000000000000000000777e0", 777)
expect_same("0777e1", 7770)
expect_same("0e0", 0)
expect_same("0000E-012", 0)
expect_same("09.5", 9.5)
expect_same("0777j", 777j)
expect_same("00j", 0j)
expect_same("00.0", 0)
expect_same("0e3", 0)
expect_same("090000000000000.", 90000000000000.)
expect_same("090000000000000.0000000000000000000000", 90000000000000.)
expect_same("090000000000000e0", 90000000000000.)
expect_same("090000000000000e-0", 90000000000000.)
expect_same("090000000000000j", 90000000000000j)
expect_error("090000000000000") # plain octal literal w/ decimal digit
expect_error("080000000000000") # plain octal literal w/ decimal digit
expect_error("000000000000009") # plain octal literal w/ decimal digit
expect_error("000000000000008") # plain octal literal w/ decimal digit
expect_same("000000000000007", 7)
expect_same("000000000000008.", 8.)
expect_same("000000000000009.", 9.)
# Verify treatment of unary minus on negative numbers SF bug #660455
expect_same("0xffffffff", -1)
expect_same("-0xffffffff", 1)
| DarioGT/OMS-PluginXML | org.modelsphere.sms/lib/jython-2.2.1/Lib/test/test_compile.py | Python | gpl-3.0 | 3,243 | 0.005242 |
# Run with nosetests tests/test_slave_cli.py
import debile.slave.cli as slave
def test_parse_args():
args = slave.parse_args(['--auth', 'simple', '--config', \
'/etc/debile/slave.yaml', '-s', '-d'])
assert args.auth_method == 'simple'
assert args.config == '/etc/debile/slave.yaml'
assert args.syslog == True
assert args.debug == True
| lucaskanashiro/debile | tests/test_slave_cli.py | Python | mit | 370 | 0.010811 |
# -*- encoding: utf-8 -*-
"""Implements different locators for UI"""
from selenium.webdriver.common.by import By
from .model import LocatorDict
NAVBAR_PATH = (
'//div[contains(@class,"navbar-inner") and '
'not(contains(@style, "display"))]'
)
MENU_CONTAINER_PATH = NAVBAR_PATH + '//ul[@id="menu"]'
ADM_MENU_CONTAINER_PATH = NAVBAR_PATH + '//ul[@id="menu2"]'
menu_locators = LocatorDict({
# Menus
# Navbar
"navbar.spinner": (By.XPATH, ("//div[@id='turbolinks-progress']")),
# Monitor Menu
"menu.monitor": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='monitor_menu']")),
"menu.dashboard": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_dashboard']")),
"menu.reports": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_reports']")),
"menu.facts": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_fact_values']")),
"menu.statistics": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_statistics']")),
"menu.trends": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_trends']")),
"menu.audits": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_audits']")),
"menu.jobs": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_job_invocations']")),
# Content Menu
"menu.content": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='content_menu']")),
"menu.life_cycle_environments": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_environments']")),
"menu.red_hat_subscriptions": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_red_hat_subscriptions']")),
"menu.activation_keys": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_activation_keys']")),
"menu.red_hat_repositories": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_redhat_provider']")),
"menu.products": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_products']")),
"menu.gpg_keys": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_gpg_keys']")),
"menu.sync_status": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_sync_status']")),
"menu.sync_plans": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_sync_plans']")),
"menu.content_views": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_content_views']")),
"menu.errata": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_errata']")),
"menu.packages": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_packages']")),
"menu.puppet_modules": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_puppet_modules']")),
"menu.docker_tags": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_docker_tags']")),
# Containers Menu
"menu.containers": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='containers_menu']")),
"menu.all_containers": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_containers']")),
"menu.new_container": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_new_container']")),
"menu.registries": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_registries']")),
# Hosts Menu
"menu.hosts": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='hosts_menu']")),
"menu.all_hosts": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_hosts']")),
"menu.discovered_hosts": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_discovered_hosts']")),
"menu.content_hosts": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_content_hosts']")),
"menu.host_collections": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@id='menu_item_host_collections']")),
"menu.operating_systems": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_operatingsystems']")),
"menu.provisioning_templates": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@id='menu_item_provisioning_templates']")),
"menu.partition_tables": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_partition_tables']")),
"menu.job_templates": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_job_templates']")),
"menu.installation_media": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_media']")),
"menu.hardware_models": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_models']")),
"menu.architectures": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_architectures']")),
"menu.oscap_policy": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_compliance_policies']")),
"menu.oscap_content": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_compliance_contents']")),
"menu.oscap_reports": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_compliance_reports']")),
# Configure Menu
"menu.configure": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='configure_menu']")),
"menu.host_groups": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_hostgroups']")),
"menu.discovery_rules": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_discovery_rules']")),
"menu.global_parameters": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_common_parameters']")),
"menu.environments": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//li[contains(@class,'menu_tab_environments')]"
"/a[@id='menu_item_environments']")),
"menu.puppet_classes": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_puppetclasses']")),
"menu.smart_variables": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_variable_lookup_keys']")),
"menu.configure_groups": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_config_groups']")),
# Infrastructure Menu
"menu.infrastructure": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='infrastructure_menu']")),
"menu.smart_proxies": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_smart_proxies']")),
"menu.compute_resources": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_compute_resources']")),
"menu.compute_profiles": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_compute_profiles']")),
"menu.subnets": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_subnets']")),
"menu.domains": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='menu_item_domains']")),
# Access Insights menu
"menu.insights": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@id='redhat_access_top_menu']")),
"insights.overview": (
By.XPATH,
(MENU_CONTAINER_PATH + "//a[@href='/redhat_access/insights']")),
"insights.rules": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@href='/redhat_access/insights/rules/']")),
"insights.systems": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@href='/redhat_access/insights/systems/']")),
"insights.manage": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@href='/redhat_access/insights/manage']")),
# Administer Menu
"menu.administer": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='administer_menu']")),
"menu.ldap_auth": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='menu_item_auth_source_ldaps']")),
"menu.users": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='menu_item_users']")),
"menu.user_groups": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='menu_item_usergroups']")),
"menu.roles": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='menu_item_roles']")),
"menu.bookmarks": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='menu_item_bookmarks']")),
"menu.settings": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='menu_item_settings']")),
"menu.about": (
By.XPATH,
(ADM_MENU_CONTAINER_PATH + "//a[@id='menu_item_about_index']")),
# Account Menu
"menu.account": (By.XPATH, "//a[@id='account_menu']"),
"menu.sign_out": (By.XPATH, "//a[@id='menu_item_logout']"),
"menu.my_account": (By.XPATH, "//a[@id='menu_item_my_account']"),
# Common Locators for Orgs and Locations
"menu.any_context": (
By.XPATH,
(MENU_CONTAINER_PATH + "//li[contains(@class,'org-switcher')]/a")),
# Updated to current_text as the fetched text can also be org+loc
"menu.current_text": (
By.XPATH,
(MENU_CONTAINER_PATH + "//li[contains(@class,'org-switcher')]/a")),
"menu.fetch_org": (
By.XPATH,
(MENU_CONTAINER_PATH + "//li[contains(@class, 'org-menu')]/a")),
"menu.fetch_loc": (
By.XPATH,
(MENU_CONTAINER_PATH + "//li[contains(@class, 'loc-menu')]/a")),
# Orgs
"org.manage_org": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@class='manage-menu' and contains(@href, 'organizations')]")),
"org.nav_current_org": (
By.XPATH,
("(" + MENU_CONTAINER_PATH +
"//li[contains(@class,'org-switcher')]"
"//li/a[@data-toggle='dropdown'])[1]")),
"org.select_org": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@href='/organizations/clear']/../../li/a[contains(.,'%s')]|"
"//div[contains(@style,'static') or contains(@style,'fixed')]"
"//a[@href='/organizations/clear']/../../li/a"
"/span[contains(@data-original-title, '%s')]")),
# Locations
"loc.manage_loc": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@class='manage-menu' and contains(@href, 'locations')]")),
"loc.nav_current_loc": (
By.XPATH,
("(" + MENU_CONTAINER_PATH +
"//li[contains(@class,'org-switcher')]"
"//li/a[@data-toggle='dropdown'])[2]")),
"loc.select_loc": (
By.XPATH,
(MENU_CONTAINER_PATH +
"//a[@href='/locations/clear']/../../li/a[contains(.,'%s')]|"
"//div[contains(@style,'static') or contains(@style,'fixed')]"
"//a[@href='/locations/clear']/../../li/a"
"/span[contains(@data-original-title, '%s')]"))
})
| elyezer/robottelo | robottelo/ui/locators/menu.py | Python | gpl-3.0 | 10,740 | 0 |
"""
Public exercise API.
"""
from twisted.protocols import amp
from txampext.errors import Error
class UnknownExercise(Error):
"""The exercise was not recognized.
"""
class GetExercises(amp.Command):
"""
Gets the identifiers and titles of some exercises.
"""
arguments = [
(b"solved", amp.Boolean())
]
response = [
(b"exercises", amp.AmpList([
(b"identifier", amp.String()),
(b"title", amp.Unicode())
]))
]
class GetExerciseDetails(amp.Command):
"""
Gets the details of a partiucular exercise.
"""
arguments = [
(b"identifier", amp.String())
]
response = [
(b"title", amp.Unicode()),
(b"description", amp.Unicode()),
(b"solved", amp.Boolean())
]
errors = dict([
UnknownExercise.asAMP()
])
class NotifySolved(amp.Command):
"""Notify the client that they have solved an exercise.
"""
arguments = [
(b"identifier", amp.String()),
(b"title", amp.Unicode())
]
response = []
requiresAnswer = False
| crypto101/clarent | clarent/exercise.py | Python | isc | 1,105 | 0.002715 |
from django.db.models.fields import Field
from django.db.models.sql.expressions import SQLEvaluator
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis import forms
from django.contrib.gis.db.models.constants import GIS_LOOKUPS
from django.contrib.gis.db.models.lookups import GISLookup
from django.contrib.gis.db.models.proxy import GeometryProxy
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.utils import six
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if connection.alias not in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if srid not in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeometryField(Field):
"The base GIS field -- maps to the OpenGIS Specification Geometry type."
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
form_class = forms.GeometryField
# Geodetic units.
geodetic_units = ('decimal degree', 'degree')
description = _("The base GIS field -- maps to the OpenGIS Specification Geometry type.")
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, dim=2,
geography=False, **kwargs):
"""
The initialization function for geometry fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the dimension of the geometry field.
self.dim = dim
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(GeometryField, self).deconstruct()
# Always include SRID for less fragility; include others if they're
# not the default values.
kwargs['srid'] = self.srid
if self.dim != 2:
kwargs['dim'] = self.dim
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
if self.geography is not False:
kwargs['geography'] = self.geography
return name, path, args, kwargs
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the GeometryField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
### Routines specific to GeometryField ###
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
return self.units_name(connection).lower() in self.geodetic_units
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
value = super(GeometryField, self).get_prep_value(value)
if isinstance(value, SQLEvaluator):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, (bytes, six.string_types)) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
### Routines overloaded from Field ###
def contribute_to_class(self, cls, name, **kwargs):
super(GeometryField, self).contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, GeometryProxy(Geometry, self))
def db_type(self, connection):
return connection.ops.geo_db_type(self)
def formfield(self, **kwargs):
defaults = {'form_class': self.form_class,
'geom_type': self.geom_type,
'srid': self.srid,
}
defaults.update(kwargs)
if (self.dim > 2 and 'widget' not in kwargs and
not getattr(defaults['form_class'].widget, 'supports_3d', False)):
defaults['widget'] = forms.Textarea
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
if lookup_type in connection.ops.gis_terms:
# special case for isnull lookup
if lookup_type == 'isnull':
return []
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if lookup_type in connection.ops.distance_functions:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, SQLEvaluator):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'isnull':
return bool(value)
else:
return self.get_prep_value(value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if value is None:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
def get_placeholder(self, value, connection):
"""
Returns the placeholder for the geometry column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value)
for lookup_name in GIS_LOOKUPS:
lookup = type(lookup_name, (GISLookup,), {'lookup_name': lookup_name})
GeometryField.register_lookup(lookup)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
form_class = forms.PointField
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
form_class = forms.LineStringField
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
form_class = forms.PolygonField
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
form_class = forms.MultiPointField
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
form_class = forms.MultiLineStringField
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
form_class = forms.MultiPolygonField
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
| lecaoquochung/ddnb.django | django/contrib/gis/db/models/fields.py | Python | bsd-3-clause | 12,573 | 0.000954 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# TODO:
# Ability to set CPU/Memory reservations
try:
import json
except ImportError:
import simplejson as json
HAS_PYSPHERE = False
try:
from pysphere import VIServer, VIProperty, MORTypes
from pysphere.resources import VimService_services as VI
from pysphere.vi_task import VITask
from pysphere import VIException, VIApiException, FaultTypes
HAS_PYSPHERE = True
except ImportError:
pass
import ssl
DOCUMENTATION = '''
---
module: vsphere_guest
short_description: Create/delete/manage a guest VM through VMware vSphere.
description:
- Create/delete/reconfigure a guest VM through VMware vSphere. This module has a dependency on pysphere >= 1.7
version_added: "1.6"
options:
vcenter_hostname:
description:
- The hostname of the vcenter server the module will connect to, to create the guest.
required: true
default: null
aliases: []
validate_certs:
description:
- Validate SSL certs. Note, if running on python without SSLContext
support (typically, python < 2.7.9) you will have to set this to C(no)
as pysphere does not support validating certificates on older python.
Prior to 2.1, this module would always validate on python >= 2.7.9 and
never validate on python <= 2.7.8.
required: false
default: yes
choices: ['yes', 'no']
version_added: 2.1
guest:
description:
- The virtual server name you wish to manage.
required: true
username:
description:
- Username to connect to vcenter as.
required: true
default: null
password:
description:
- Password of the user to connect to vcenter as.
required: true
default: null
resource_pool:
description:
- The name of the resource_pool to create the VM in.
required: false
default: None
cluster:
description:
- The name of the cluster to create the VM in. By default this is derived from the host you tell the module to build the guest on.
required: false
default: None
esxi:
description:
- Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the datacenter name
required: false
default: null
state:
description:
- Indicate desired state of the vm. 'reconfigured' only applies changes to 'memory_mb' and 'num_cpus' in vm_hardware parameter, and only when hot-plugging is enabled for the guest.
default: present
choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured']
from_template:
version_added: "1.9"
description:
- Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template.
default: no
choices: ['yes', 'no']
template_src:
version_added: "1.9"
description:
- Name of the source template to deploy from
default: None
snapshot_to_clone:
description:
- A string that when specified, will create a linked clone copy of the VM. Snapshot must already be taken in vCenter.
version_added: "2.0"
required: false
default: none
power_on_after_clone:
description:
- Specifies if the VM should be powered on after the clone.
required: false
default: yes
choices: ['yes', 'no']
vm_disk:
description:
- A key, value list of disks and their sizes and which datastore to keep it in.
required: false
default: null
vm_hardware:
description:
- A key, value list of VM config settings. Must include ['memory_mb', 'num_cpus', 'osid', 'scsi'].
required: false
default: null
vm_nic:
description:
- A key, value list of nics, their types and what network to put them on.
required: false
default: null
vm_extra_config:
description:
- A key, value pair of any extra values you want set or changed in the vmx file of the VM. Useful to set advanced options on the VM.
required: false
default: null
vm_hw_version:
description:
- Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware version of existing vm is not supported.
required: false
default: null
version_added: "1.7"
vmware_guest_facts:
description:
- Gather facts from vCenter on a particular VM
required: false
default: null
force:
description:
- Boolean. Allows you to run commands which may alter the running state of a guest. Also used to reconfigure and destroy.
default: "no"
choices: [ "yes", "no" ]
notes:
- This module should run from a system that can access vSphere directly.
Either by using local_action, or using delegate_to.
author: "Richard Hoop (@rhoop) <wrhoop@gmail.com>"
requirements:
- "python >= 2.6"
- pysphere
'''
EXAMPLES = '''
# Create a new VM on an ESX server
# Returns changed = False when the VM already exists
# Returns changed = True and a adds ansible_facts from the new VM
# State will set the power status of a guest upon creation. Use powered_on to create and boot.
# Options ['state', 'vm_extra_config', 'vm_disk', 'vm_nic', 'vm_hardware', 'esxi'] are required together
# Note: vm_floppy support added in 2.0
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: powered_on
vm_extra_config:
vcpu.hotadd: yes
mem.hotadd: yes
notes: This is a test VM
folder: MyFolder
vm_disk:
disk1:
size_gb: 10
type: thin
datastore: storage001
# VMs can be put into folders. The value given here is either the full path
# to the folder (e.g. production/customerA/lamp) or just the last component
# of the path (e.g. lamp):
folder: production/customerA/lamp
vm_nic:
nic1:
type: vmxnet3
network: VM Network
network_type: standard
nic2:
type: vmxnet3
network: dvSwitch Network
network_type: dvs
vm_hardware:
memory_mb: 2048
num_cpus: 2
osid: centos64Guest
scsi: paravirtual
vm_cdrom:
type: "iso"
iso_path: "DatastoreName/cd-image.iso"
vm_floppy:
type: "image"
image_path: "DatastoreName/floppy-image.flp"
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Reconfigure the CPU and Memory on the newly created VM
# Will return the changes made
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: reconfigured
vm_extra_config:
vcpu.hotadd: yes
mem.hotadd: yes
notes: This is a test VM
vm_disk:
disk1:
size_gb: 10
type: thin
datastore: storage001
vm_nic:
nic1:
type: vmxnet3
network: VM Network
network_type: standard
vm_hardware:
memory_mb: 4096
num_cpus: 4
osid: centos64Guest
scsi: paravirtual
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Deploy a guest from a template
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
from_template: yes
template_src: centosTemplate
cluster: MainCluster
resource_pool: "/Resources"
vm_extra_config:
folder: MyFolder
# Task to gather facts from a vSphere cluster only if the system is a VMWare guest
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
vmware_guest_facts: yes
# Typical output of a vsphere_facts run on a guest
# If vmware tools is not installed, ipadresses with return None
- hw_eth0:
- addresstype: "assigned"
label: "Network adapter 1"
macaddress: "00:22:33:33:44:55"
macaddress_dash: "00-22-33-33-44-55"
ipaddresses: ['192.0.2.100', '2001:DB8:56ff:feac:4d8a']
summary: "VM Network"
hw_guest_full_name: "newvm001"
hw_guest_id: "rhel6_64Guest"
hw_memtotal_mb: 2048
hw_name: "centos64Guest"
hw_power_status: "POWERED ON",
hw_processor_count: 2
hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac"
hw_power_status will be one of the following values:
- POWERED ON
- POWERED OFF
- SUSPENDED
- POWERING ON
- POWERING OFF
- SUSPENDING
- RESETTING
- BLOCKED ON MSG
- REVERTING TO SNAPSHOT
- UNKNOWN
as seen in the VMPowerState-Class of PySphere: http://git.io/vlwOq
# Remove a vm from vSphere
# The VM must be powered_off or you need to use force to force a shutdown
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: absent
force: yes
'''
def add_scsi_controller(module, s, config, devices, type="paravirtual", bus_num=0, disk_ctrl_key=1):
# add a scsi controller
scsi_ctrl_spec = config.new_deviceChange()
scsi_ctrl_spec.set_element_operation('add')
if type == "lsi":
# For RHEL5
scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
elif type == "paravirtual":
# For RHEL6
scsi_ctrl = VI.ns0.ParaVirtualSCSIController_Def("scsi_ctrl").pyclass()
elif type == "lsi_sas":
scsi_ctrl = VI.ns0.VirtualLsiLogicSASController_Def(
"scsi_ctrl").pyclass()
elif type == "bus_logic":
scsi_ctrl = VI.ns0.VirtualBusLogicController_Def("scsi_ctrl").pyclass()
else:
s.disconnect()
module.fail_json(
msg="Error adding scsi controller to vm spec. No scsi controller"
" type of: %s" % (type))
scsi_ctrl.set_element_busNumber(int(bus_num))
scsi_ctrl.set_element_key(int(disk_ctrl_key))
scsi_ctrl.set_element_sharedBus("noSharing")
scsi_ctrl_spec.set_element_device(scsi_ctrl)
# Add the scsi controller to the VM spec.
devices.append(scsi_ctrl_spec)
return disk_ctrl_key
def add_disk(module, s, config_target, config, devices, datastore, type="thin", size=200000, disk_ctrl_key=1, disk_number=0, key=0):
# add a vmdk disk
# Verify the datastore exists
datastore_name, ds = find_datastore(module, s, datastore, config_target)
# create a new disk - file based - for the vm
disk_spec = config.new_deviceChange()
disk_spec.set_element_fileOperation("create")
disk_spec.set_element_operation("add")
disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def(
"disk_backing").pyclass()
disk_backing.set_element_fileName(datastore_name)
disk_backing.set_element_diskMode("persistent")
if type != "thick":
disk_backing.set_element_thinProvisioned(1)
disk_ctlr.set_element_key(key)
disk_ctlr.set_element_controllerKey(int(disk_ctrl_key))
disk_ctlr.set_element_unitNumber(int(disk_number))
disk_ctlr.set_element_backing(disk_backing)
disk_ctlr.set_element_capacityInKB(int(size))
disk_spec.set_element_device(disk_ctlr)
devices.append(disk_spec)
def add_cdrom(module, s, config_target, config, devices, default_devs, type="client", vm_cd_iso_path=None):
# Add a cd-rom
# Make sure the datastore exists.
if vm_cd_iso_path:
iso_location = vm_cd_iso_path.split('/', 1)
datastore, ds = find_datastore(
module, s, iso_location[0], config_target)
iso_path = iso_location[1]
# find ide controller
ide_ctlr = None
for dev in default_devs:
if dev.typecode.type[1] == "VirtualIDEController":
ide_ctlr = dev
# add a cdrom based on a physical device
if ide_ctlr:
cd_spec = config.new_deviceChange()
cd_spec.set_element_operation('add')
cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
if type == "iso":
iso = VI.ns0.VirtualCdromIsoBackingInfo_Def("iso").pyclass()
ds_ref = iso.new_datastore(ds)
ds_ref.set_attribute_type(ds.get_attribute_type())
iso.set_element_datastore(ds_ref)
iso.set_element_fileName("%s %s" % (datastore, iso_path))
cd_ctrl.set_element_backing(iso)
cd_ctrl.set_element_key(20)
cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
cd_ctrl.set_element_unitNumber(0)
cd_spec.set_element_device(cd_ctrl)
elif type == "client":
client = VI.ns0.VirtualCdromRemoteAtapiBackingInfo_Def(
"client").pyclass()
client.set_element_deviceName("")
cd_ctrl.set_element_backing(client)
cd_ctrl.set_element_key(20)
cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
cd_ctrl.set_element_unitNumber(0)
cd_spec.set_element_device(cd_ctrl)
else:
s.disconnect()
module.fail_json(
msg="Error adding cdrom of type %s to vm spec. "
" cdrom type can either be iso or client" % (type))
devices.append(cd_spec)
def add_floppy(module, s, config_target, config, devices, default_devs, type="image", vm_floppy_image_path=None):
# Add a floppy
# Make sure the datastore exists.
if vm_floppy_image_path:
image_location = vm_floppy_image_path.split('/', 1)
datastore, ds = find_datastore(
module, s, image_location[0], config_target)
image_path = image_location[1]
floppy_spec = config.new_deviceChange()
floppy_spec.set_element_operation('add')
floppy_ctrl = VI.ns0.VirtualFloppy_Def("floppy_ctrl").pyclass()
if type == "image":
image = VI.ns0.VirtualFloppyImageBackingInfo_Def("image").pyclass()
ds_ref = image.new_datastore(ds)
ds_ref.set_attribute_type(ds.get_attribute_type())
image.set_element_datastore(ds_ref)
image.set_element_fileName("%s %s" % (datastore, image_path))
floppy_ctrl.set_element_backing(image)
floppy_ctrl.set_element_key(3)
floppy_spec.set_element_device(floppy_ctrl)
elif type == "client":
client = VI.ns0.VirtualFloppyRemoteDeviceBackingInfo_Def(
"client").pyclass()
client.set_element_deviceName("/dev/fd0")
floppy_ctrl.set_element_backing(client)
floppy_ctrl.set_element_key(3)
floppy_spec.set_element_device(floppy_ctrl)
else:
s.disconnect()
module.fail_json(
msg="Error adding floppy of type %s to vm spec. "
" floppy type can either be image or client" % (type))
devices.append(floppy_spec)
def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"):
# add a NIC
# Different network card types are: "VirtualE1000",
# "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet", "VirtualNmxnet2",
# "VirtualVmxnet3"
nic_spec = config.new_deviceChange()
nic_spec.set_element_operation("add")
if nic_type == "e1000":
nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass()
elif nic_type == "e1000e":
nic_ctlr = VI.ns0.VirtualE1000e_Def("nic_ctlr").pyclass()
elif nic_type == "pcnet32":
nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet":
nic_ctlr = VI.ns0.VirtualVmxnet_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet2":
nic_ctlr = VI.ns0.VirtualVmxnet2_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet3":
nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass()
else:
s.disconnect()
module.fail_json(
msg="Error adding nic to vm spec. No nic type of: %s" %
(nic_type))
if network_type == "standard":
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_deviceName(network_name)
elif network_type == "dvs":
# Get the portgroup key
portgroupKey = find_portgroup_key(module, s, nfmor, network_name)
# Get the dvswitch uuid
dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, portgroupKey)
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
"nic_backing_port").pyclass()
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
nic_backing_port.set_element_portgroupKey(portgroupKey)
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_port(nic_backing_port)
else:
s.disconnect()
module.fail_json(
msg="Error adding nic backing to vm spec. No network type of:"
" %s" % (network_type))
nic_ctlr.set_element_addressType("generated")
nic_ctlr.set_element_backing(nic_backing)
nic_ctlr.set_element_key(4)
nic_spec.set_element_device(nic_ctlr)
devices.append(nic_spec)
def find_datastore(module, s, datastore, config_target):
# Verify the datastore exists and put it in brackets if it does.
ds = None
if config_target:
for d in config_target.Datastore:
if (d.Datastore.Accessible and
(datastore and d.Datastore.Name == datastore)
or (not datastore)):
ds = d.Datastore.Datastore
datastore = d.Datastore.Name
break
else:
for ds_mor, ds_name in server.get_datastores().items():
ds_props = VIProperty(s, ds_mor)
if (ds_props.summary.accessible and (datastore and ds_name == datastore)
or (not datastore)):
ds = ds_mor
datastore = ds_name
if not ds:
s.disconnect()
module.fail_json(msg="Datastore: %s does not appear to exist" %
(datastore))
datastore_name = "[%s]" % datastore
return datastore_name, ds
def find_portgroup_key(module, s, nfmor, network_name):
# Find a portgroups key given the portgroup name.
# Grab all the distributed virtual portgroup's names and key's.
dvpg_mors = s._retrieve_properties_traversal(
property_names=['name', 'key'],
from_node=nfmor, obj_type='DistributedVirtualPortgroup')
# Get the correct portgroup managed object.
dvpg_mor = None
for dvpg in dvpg_mors:
if dvpg_mor:
break
for p in dvpg.PropSet:
if p.Name == "name" and p.Val == network_name:
dvpg_mor = dvpg
if dvpg_mor:
break
# If dvpg_mor is empty we didn't find the named portgroup.
if dvpg_mor is None:
s.disconnect()
module.fail_json(
msg="Could not find the distributed virtual portgroup named"
" %s" % network_name)
# Get the portgroup key
portgroupKey = None
for p in dvpg_mor.PropSet:
if p.Name == "key":
portgroupKey = p.Val
return portgroupKey
def find_dvswitch_uuid(module, s, nfmor, portgroupKey):
# Find a dvswitch's uuid given a portgroup key.
# Function searches all dvswitches in the datacenter to find the switch
# that has the portgroup key.
# Grab the dvswitch uuid and portgroup properties
dvswitch_mors = s._retrieve_properties_traversal(
property_names=['uuid', 'portgroup'],
from_node=nfmor, obj_type='DistributedVirtualSwitch')
dvswitch_mor = None
# Get the dvswitches managed object
for dvswitch in dvswitch_mors:
if dvswitch_mor:
break
for p in dvswitch.PropSet:
if p.Name == "portgroup":
pg_mors = p.Val.ManagedObjectReference
for pg_mor in pg_mors:
if dvswitch_mor:
break
key_mor = s._get_object_properties(
pg_mor, property_names=['key'])
for key in key_mor.PropSet:
if key.Val == portgroupKey:
dvswitch_mor = dvswitch
# Get the switches uuid
dvswitch_uuid = None
for p in dvswitch_mor.PropSet:
if p.Name == "uuid":
dvswitch_uuid = p.Val
return dvswitch_uuid
def spec_singleton(spec, request, vm):
if not spec:
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
return spec
def vmdisk_id(vm, current_datastore_name):
id_list = []
for vm_disk in vm._disks:
if current_datastore_name in vm_disk['descriptor']:
id_list.append(vm_disk['device']['key'])
return id_list
def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone, power_on_after_clone, vm_extra_config):
vmTemplate = vsphere_client.get_vm_by_name(template_src)
vmTarget = None
if esxi:
datacenter = esxi['datacenter']
esxi_hostname = esxi['hostname']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
# hostFolder managed reference
hfmor = dcprops.hostFolder._obj
# Grab the computerResource name and host properties
crmors = vsphere_client._retrieve_properties_traversal(
property_names=['name', 'host'],
from_node=hfmor,
obj_type='ComputeResource')
# Grab the host managed object reference of the esxi_hostname
try:
hostmor = [k for k,
v in vsphere_client.get_hosts().items() if v == esxi_hostname][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname)
# Grab the computeResource managed object reference of the host we are
# creating the VM on.
crmor = None
for cr in crmors:
if crmor:
break
for p in cr.PropSet:
if p.Name == "host":
for h in p.Val.get_element_ManagedObjectReference():
if h == hostmor:
crmor = cr.Obj
break
if crmor:
break
crprops = VIProperty(vsphere_client, crmor)
rpmor = crprops.resourcePool._obj
elif resource_pool:
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0] if cluster_name else None
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
else:
module.fail_json(msg="You need to specify either esxi:[datacenter,hostname] or [cluster,resource_pool]")
try:
vmTarget = vsphere_client.get_vm_by_name(guest)
except Exception:
pass
if not vmTemplate.is_powered_off():
module.fail_json(
msg="Source %s must be powered off" % template_src
)
try:
if not vmTarget:
cloneArgs = dict(resourcepool=rpmor, power_on=power_on_after_clone)
if snapshot_to_clone is not None:
#check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
cloneArgs["linked"] = True
cloneArgs["snapshot"] = snapshot_to_clone
if vm_extra_config.get("folder") is not None:
# if a folder is specified, clone the VM into it
cloneArgs["folder"] = vm_extra_config.get("folder")
vmTemplate.clone(guest, **cloneArgs)
changed = True
else:
changed = False
vsphere_client.disconnect()
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(
msg="Could not clone selected machine: %s" % e
)
# example from https://github.com/kalazzerx/pysphere/blob/master/examples/pysphere_create_disk_and_add_to_vm.py
# was used.
def update_disks(vsphere_client, vm, module, vm_disk, changes):
request = VI.ReconfigVM_TaskRequestMsg()
changed = False
for cnf_disk in vm_disk:
disk_id = re.sub("disk", "", cnf_disk)
found = False
for dev_key in vm._devices:
if vm._devices[dev_key]['type'] == 'VirtualDisk':
hdd_id = vm._devices[dev_key]['label'].split()[2]
if disk_id == hdd_id:
found = True
continue
if not found:
it = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
dc = spec.new_deviceChange()
dc.Operation = "add"
dc.FileOperation = "create"
hd = VI.ns0.VirtualDisk_Def("hd").pyclass()
hd.Key = -100
hd.UnitNumber = int(disk_id)
hd.CapacityInKB = int(vm_disk[cnf_disk]['size_gb']) * 1024 * 1024
hd.ControllerKey = 1000
# module.fail_json(msg="peos : %s" % vm_disk[cnf_disk])
backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("backing").pyclass()
backing.FileName = "[%s]" % vm_disk[cnf_disk]['datastore']
backing.DiskMode = "persistent"
backing.Split = False
backing.WriteThrough = False
backing.ThinProvisioned = False
backing.EagerlyScrub = False
hd.Backing = backing
dc.Device = hd
spec.DeviceChange = [dc]
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
# Wait for the task to finish
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS,
task.STATE_ERROR])
if status == task.STATE_SUCCESS:
changed = True
changes[cnf_disk] = vm_disk[cnf_disk]
elif status == task.STATE_ERROR:
module.fail_json(
msg="Error reconfiguring vm: %s, [%s]" % (
task.get_error_message(),
vm_disk[cnf_disk]))
return changed, changes
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
spec = None
changed = False
changes = {}
request = None
shutdown = False
poweron = vm.is_powered_on()
memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)
changed, changes = update_disks(vsphere_client, vm,
module, vm_disk, changes)
request = VI.ReconfigVM_TaskRequestMsg()
# Change Memory
if 'memory_mb' in vm_hardware:
if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
spec = spec_singleton(spec, request, vm)
if vm.is_powered_on():
if force:
# No hot add but force
if not memoryHotAddEnabled:
shutdown = True
elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
shutdown = True
else:
# Fail on no hot add and no force
if not memoryHotAddEnabled:
module.fail_json(
msg="memoryHotAdd is not enabled. force is "
"required for shutdown")
# Fail on no force and memory shrink
elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
module.fail_json(
msg="Cannot lower memory on a live VM. force is "
"required for shutdown")
# set the new RAM size
spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
changes['memory'] = vm_hardware['memory_mb']
# ===( Reconfigure Network )====#
if vm_nic:
changed = reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name)
# ====( Config Memory )====#
if 'num_cpus' in vm_hardware:
if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
spec = spec_singleton(spec, request, vm)
if vm.is_powered_on():
if force:
# No hot add but force
if not cpuHotAddEnabled:
shutdown = True
elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
if not cpuHotRemoveEnabled:
shutdown = True
else:
# Fail on no hot add and no force
if not cpuHotAddEnabled:
module.fail_json(
msg="cpuHotAdd is not enabled. force is "
"required for shutdown")
# Fail on no force and cpu shrink without hot remove
elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
if not cpuHotRemoveEnabled:
module.fail_json(
msg="Cannot lower CPU on a live VM without "
"cpuHotRemove. force is required for shutdown")
spec.set_element_numCPUs(int(vm_hardware['num_cpus']))
changes['cpu'] = vm_hardware['num_cpus']
if len(changes):
if shutdown and vm.is_powered_on():
try:
vm.power_off(sync_run=True)
vm.get_status()
except Exception, e:
module.fail_json(
msg='Failed to shutdown vm %s: %s' % (guest, e)
)
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
# Wait for the task to finish
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
changed = True
elif status == task.STATE_ERROR:
module.fail_json(
msg="Error reconfiguring vm: %s" % task.get_error_message())
if vm.is_powered_off() and poweron:
try:
vm.power_on(sync_run=True)
except Exception, e:
module.fail_json(
msg='Failed to power on vm %s : %s' % (guest, e)
)
vsphere_client.disconnect()
if changed:
module.exit_json(changed=True, changes=changes)
module.exit_json(changed=False)
def reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name=None):
s = vsphere_client
nics = {}
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
nic_changes = []
datacenter = esxi['datacenter']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
nfmor = dcprops.networkFolder._obj
for k,v in vm_nic.iteritems():
nicNum = k[len(k) -1]
if vm_nic[k]['network_type'] == 'dvs':
portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network'])
todvs = True
elif vm_nic[k]['network_type'] == 'standard':
todvs = False
# Detect cards that need to be changed and network type (and act accordingly)
for dev in vm.properties.config.hardware.device:
if dev._type in ["VirtualE1000", "VirtualE1000e",
"VirtualPCNet32", "VirtualVmxnet",
"VirtualNmxnet2", "VirtualVmxnet3"]:
devNum = dev.deviceInfo.label[len(dev.deviceInfo.label) - 1]
if devNum == nicNum:
fromdvs = dev.deviceInfo.summary.split(':')[0] == 'DVSwitch'
if todvs and fromdvs:
if dev.backing.port._obj.get_element_portgroupKey() != portgroupKey:
nics[k] = (dev, portgroupKey, 1)
elif fromdvs and not todvs:
nics[k] = (dev, '', 2)
elif not fromdvs and todvs:
nics[k] = (dev, portgroupKey, 3)
elif not fromdvs and not todvs:
if dev.backing._obj.get_element_deviceName() != vm_nic[k]['network']:
nics[k] = (dev, '', 2)
else:
pass
else:
module.exit_json()
if len(nics) > 0:
for nic, obj in nics.iteritems():
"""
1,2 and 3 are used to mark which action should be taken
1 = from a distributed switch to a distributed switch
2 = to a standard switch
3 = to a distributed switch
"""
dev = obj[0]
pgKey = obj[1]
dvsKey = obj[2]
if dvsKey == 1:
dev.backing.port._obj.set_element_portgroupKey(pgKey)
dev.backing.port._obj.set_element_portKey('')
if dvsKey == 3:
dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, pgKey)
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
"nic_backing_port").pyclass()
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
nic_backing_port.set_element_portgroupKey(pgKey)
nic_backing_port.set_element_portKey('')
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_port(nic_backing_port)
dev._obj.set_element_backing(nic_backing)
if dvsKey == 2:
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_deviceName(vm_nic[nic]['network'])
dev._obj.set_element_backing(nic_backing)
for nic, obj in nics.iteritems():
dev = obj[0]
spec = request.new_spec()
nic_change = spec.new_deviceChange()
nic_change.set_element_device(dev._obj)
nic_change.set_element_operation("edit")
nic_changes.append(nic_change)
spec.set_element_deviceChange(nic_changes)
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
return(True)
elif status == task.STATE_ERROR:
module.fail_json(msg="Could not change network %s" % task.get_error_message())
elif len(nics) == 0:
return(False)
def _build_folder_tree(nodes, parent):
tree = {}
for node in nodes:
if node['parent'] == parent:
tree[node['name']] = dict.copy(node)
tree[node['name']]['subfolders'] = _build_folder_tree(nodes, node['id'])
del tree[node['name']]['parent']
return tree
def _find_path_in_tree(tree, path):
for name, o in tree.iteritems():
if name == path[0]:
if len(path) == 1:
return o
else:
return _find_path_in_tree(o['subfolders'], path[1:])
return None
def _get_folderid_for_path(vsphere_client, datacenter, path):
content = vsphere_client._retrieve_properties_traversal(property_names=['name', 'parent'], obj_type=MORTypes.Folder)
if not content: return {}
node_list = [
{
'id': o.Obj,
'name': o.PropSet[0].Val,
'parent': (o.PropSet[1].Val if len(o.PropSet) > 1 else None)
} for o in content
]
tree = _build_folder_tree(node_list, datacenter)
tree = _find_path_in_tree(tree, ['vm'])['subfolders']
folder = _find_path_in_tree(tree, path.split('/'))
return folder['id'] if folder else None
def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state):
datacenter = esxi['datacenter']
esxi_hostname = esxi['hostname']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
# hostFolder managed reference
hfmor = dcprops.hostFolder._obj
# virtualmachineFolder managed object reference
if vm_extra_config.get('folder'):
# try to find the folder by its full path, e.g. 'production/customerA/lamp'
vmfmor = _get_folderid_for_path(vsphere_client, dcmor, vm_extra_config.get('folder'))
# try the legacy behaviour of just matching the folder name, so 'lamp' alone matches 'production/customerA/lamp'
if vmfmor is None:
for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems():
if name == vm_extra_config['folder']:
vmfmor = mor
# if neither of strategies worked, bail out
if vmfmor is None:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder'])
else:
vmfmor = dcprops.vmFolder._obj
# networkFolder managed object reference
nfmor = dcprops.networkFolder._obj
# Grab the computerResource name and host properties
crmors = vsphere_client._retrieve_properties_traversal(
property_names=['name', 'host'],
from_node=hfmor,
obj_type='ComputeResource')
# Grab the host managed object reference of the esxi_hostname
try:
hostmor = [k for k,
v in vsphere_client.get_hosts().items() if v == esxi_hostname][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname)
# Grab the computerResource managed object reference of the host we are
# creating the VM on.
crmor = None
for cr in crmors:
if crmor:
break
for p in cr.PropSet:
if p.Name == "host":
for h in p.Val.get_element_ManagedObjectReference():
if h == hostmor:
crmor = cr.Obj
break
if crmor:
break
crprops = VIProperty(vsphere_client, crmor)
# Get resource pool managed reference
# Requires that a cluster name be specified.
if resource_pool:
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0] if cluster_name else None
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
else:
rpmor = crprops.resourcePool._obj
# CREATE VM CONFIGURATION
# get config target
request = VI.QueryConfigTargetRequestMsg()
_this = request.new__this(crprops.environmentBrowser._obj)
_this.set_attribute_type(
crprops.environmentBrowser._obj.get_attribute_type())
request.set_element__this(_this)
h = request.new_host(hostmor)
h.set_attribute_type(hostmor.get_attribute_type())
request.set_element_host(h)
config_target = vsphere_client._proxy.QueryConfigTarget(request)._returnval
# get default devices
request = VI.QueryConfigOptionRequestMsg()
_this = request.new__this(crprops.environmentBrowser._obj)
_this.set_attribute_type(
crprops.environmentBrowser._obj.get_attribute_type())
request.set_element__this(_this)
h = request.new_host(hostmor)
h.set_attribute_type(hostmor.get_attribute_type())
request.set_element_host(h)
config_option = vsphere_client._proxy.QueryConfigOption(request)._returnval
default_devs = config_option.DefaultDevice
# add parameters to the create vm task
create_vm_request = VI.CreateVM_TaskRequestMsg()
config = create_vm_request.new_config()
if vm_hw_version:
config.set_element_version(vm_hw_version)
vmfiles = config.new_files()
datastore_name, ds = find_datastore(
module, vsphere_client, vm_disk['disk1']['datastore'], config_target)
vmfiles.set_element_vmPathName(datastore_name)
config.set_element_files(vmfiles)
config.set_element_name(guest)
if 'notes' in vm_extra_config:
config.set_element_annotation(vm_extra_config['notes'])
config.set_element_memoryMB(int(vm_hardware['memory_mb']))
config.set_element_numCPUs(int(vm_hardware['num_cpus']))
config.set_element_guestId(vm_hardware['osid'])
devices = []
# Attach all the hardware we want to the VM spec.
# Add a scsi controller to the VM spec.
disk_ctrl_key = add_scsi_controller(
module, vsphere_client, config, devices, vm_hardware['scsi'])
if vm_disk:
disk_num = 0
disk_key = 0
for disk in sorted(vm_disk.iterkeys()):
try:
datastore = vm_disk[disk]['datastore']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. datastore needs to be"
" specified." % disk)
try:
disksize = int(vm_disk[disk]['size_gb'])
# Convert the disk size to kiloboytes
disksize = disksize * 1024 * 1024
except (KeyError, ValueError):
vsphere_client.disconnect()
module.fail_json(msg="Error on %s definition. size needs to be specified as an integer." % disk)
try:
disktype = vm_disk[disk]['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. type needs to be"
" specified." % disk)
# Add the disk to the VM spec.
add_disk(
module, vsphere_client, config_target, config,
devices, datastore, disktype, disksize, disk_ctrl_key,
disk_num, disk_key)
disk_num = disk_num + 1
disk_key = disk_key + 1
if 'vm_cdrom' in vm_hardware:
cdrom_iso_path = None
cdrom_type = None
try:
cdrom_type = vm_hardware['vm_cdrom']['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. cdrom type needs to be"
" specified." % vm_hardware['vm_cdrom'])
if cdrom_type == 'iso':
try:
cdrom_iso_path = vm_hardware['vm_cdrom']['iso_path']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. cdrom iso_path needs"
" to be specified." % vm_hardware['vm_cdrom'])
# Add a CD-ROM device to the VM.
add_cdrom(module, vsphere_client, config_target, config, devices,
default_devs, cdrom_type, cdrom_iso_path)
if 'vm_floppy' in vm_hardware:
floppy_image_path = None
floppy_type = None
try:
floppy_type = vm_hardware['vm_floppy']['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. floppy type needs to be"
" specified." % vm_hardware['vm_floppy'])
if floppy_type == 'image':
try:
floppy_image_path = vm_hardware['vm_floppy']['image_path']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. floppy image_path needs"
" to be specified." % vm_hardware['vm_floppy'])
# Add a floppy to the VM.
add_floppy(module, vsphere_client, config_target, config, devices,
default_devs, floppy_type, floppy_image_path)
if vm_nic:
for nic in sorted(vm_nic.iterkeys()):
try:
nictype = vm_nic[nic]['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. type needs to be "
" specified." % nic)
try:
network = vm_nic[nic]['network']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. network needs to be "
" specified." % nic)
try:
network_type = vm_nic[nic]['network_type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. network_type needs to be "
" specified." % nic)
# Add the nic to the VM spec.
add_nic(module, vsphere_client, nfmor, config, devices,
nictype, network, network_type)
config.set_element_deviceChange(devices)
create_vm_request.set_element_config(config)
folder_mor = create_vm_request.new__this(vmfmor)
folder_mor.set_attribute_type(vmfmor.get_attribute_type())
create_vm_request.set_element__this(folder_mor)
rp_mor = create_vm_request.new_pool(rpmor)
rp_mor.set_attribute_type(rpmor.get_attribute_type())
create_vm_request.set_element_pool(rp_mor)
host_mor = create_vm_request.new_host(hostmor)
host_mor.set_attribute_type(hostmor.get_attribute_type())
create_vm_request.set_element_host(host_mor)
# CREATE THE VM
taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval
task = VITask(taskmor, vsphere_client)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
vsphere_client.disconnect()
module.fail_json(msg="Error creating vm: %s" %
task.get_error_message())
else:
# We always need to get the vm because we are going to gather facts
vm = vsphere_client.get_vm_by_name(guest)
# VM was created. If there is any extra config options specified, set
# them here , disconnect from vcenter, then exit.
if vm_extra_config:
vm.set_extra_config(vm_extra_config)
# Power on the VM if it was requested
power_state(vm, state, True)
vmfacts=gather_facts(vm)
vsphere_client.disconnect()
module.exit_json(
ansible_facts=vmfacts,
changed=True,
changes="Created VM %s" % guest)
def delete_vm(vsphere_client, module, guest, vm, force):
try:
if vm.is_powered_on():
if force:
try:
vm.power_off(sync_run=True)
vm.get_status()
except Exception, e:
module.fail_json(
msg='Failed to shutdown vm %s: %s' % (guest, e))
else:
module.fail_json(
msg='You must use either shut the vm down first or '
'use force ')
# Invoke Destroy_Task
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = vsphere_client._proxy.Destroy_Task(request)._returnval
task = VITask(ret, vsphere_client)
# Wait for the task to finish
status = task.wait_for_state(
[task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_ERROR:
vsphere_client.disconnect()
module.fail_json(msg="Error removing vm: %s %s" %
task.get_error_message())
module.exit_json(changed=True, changes="VM %s deleted" % guest)
except Exception, e:
module.fail_json(
msg='Failed to delete vm %s : %s' % (guest, e))
def power_state(vm, state, force):
"""
Correctly set the power status for a VM determined by the current and
requested states. force is forceful
"""
power_status = vm.get_status()
check_status = ' '.join(state.split("_")).upper()
# Need Force
if not force and power_status in [
'SUSPENDED', 'POWERING ON',
'RESETTING', 'BLOCKED ON MSG'
]:
return "VM is in %s power state. Force is required!" % power_status
# State is already true
if power_status == check_status:
return False
else:
try:
if state == 'powered_off':
vm.power_off(sync_run=True)
elif state == 'powered_on':
vm.power_on(sync_run=True)
elif state == 'restarted':
if power_status in ('POWERED ON', 'POWERING ON', 'RESETTING'):
vm.reset(sync_run=False)
else:
return "Cannot restart VM in the current state %s" \
% power_status
return True
except Exception, e:
return e
return False
def gather_facts(vm):
"""
Gather facts for VM directly from vsphere.
"""
vm.get_properties()
facts = {
'module_hw': True,
'hw_name': vm.properties.name,
'hw_power_status': vm.get_status(),
'hw_guest_full_name': vm.properties.config.guestFullName,
'hw_guest_id': vm.properties.config.guestId,
'hw_product_uuid': vm.properties.config.uuid,
'hw_processor_count': vm.properties.config.hardware.numCPU,
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
'hw_interfaces':[],
}
netInfo = vm.get_property('net')
netDict = {}
if netInfo:
for net in netInfo:
netDict[net['mac_address']] = net['ip_addresses']
ifidx = 0
for entry in vm.properties.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
factname = 'hw_eth' + str(ifidx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': entry.macAddress,
'ipaddresses': netDict.get(entry.macAddress, None),
'macaddress_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
facts['hw_interfaces'].append('eth'+str(ifidx))
ifidx += 1
return facts
class DefaultVMConfig(object):
"""
Shallow and deep dict comparison for interfaces
"""
def __init__(self, check_dict, interface_dict):
self.check_dict, self.interface_dict = check_dict, interface_dict
self.set_current, self.set_past = set(
check_dict.keys()), set(interface_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
self.recursive_missing = None
def shallow_diff(self):
return self.set_past - self.intersect
def recursive_diff(self):
if not self.recursive_missing:
self.recursive_missing = []
for key, value in self.interface_dict.items():
if isinstance(value, dict):
for k, v in value.items():
if k in self.check_dict[key]:
if not isinstance(self.check_dict[key][k], v):
try:
if v == int:
self.check_dict[key][k] = int(self.check_dict[key][k])
elif v == basestring:
self.check_dict[key][k] = str(self.check_dict[key][k])
else:
raise ValueError
except ValueError:
self.recursive_missing.append((k, v))
else:
self.recursive_missing.append((k, v))
return self.recursive_missing
def config_check(name, passed, default, module):
"""
Checks that the dict passed for VM configuration matches the required
interface declared at the top of __main__
"""
diff = DefaultVMConfig(passed, default)
if len(diff.shallow_diff()):
module.fail_json(
msg="Missing required key/pair [%s]. %s must contain %s" %
(', '.join(diff.shallow_diff()), name, default))
if diff.recursive_diff():
module.fail_json(
msg="Config mismatch for %s on %s" %
(name, diff.recursive_diff()))
return True
def main():
vm = None
proto_vm_hardware = {
'memory_mb': int,
'num_cpus': int,
'scsi': basestring,
'osid': basestring
}
proto_vm_disk = {
'disk1': {
'datastore': basestring,
'size_gb': int,
'type': basestring
}
}
proto_vm_nic = {
'nic1': {
'type': basestring,
'network': basestring,
'network_type': basestring
}
}
proto_esxi = {
'datacenter': basestring,
'hostname': basestring
}
module = AnsibleModule(
argument_spec=dict(
vcenter_hostname=dict(required=True, type='str'),
username=dict(required=True, type='str'),
password=dict(required=True, type='str', no_log=True),
state=dict(
required=False,
choices=[
'powered_on',
'powered_off',
'present',
'absent',
'restarted',
'reconfigured'
],
default='present'),
vmware_guest_facts=dict(required=False, type='bool'),
from_template=dict(required=False, type='bool'),
template_src=dict(required=False, type='str'),
snapshot_to_clone=dict(required=False, default=None, type='str'),
guest=dict(required=True, type='str'),
vm_disk=dict(required=False, type='dict', default={}),
vm_nic=dict(required=False, type='dict', default={}),
vm_hardware=dict(required=False, type='dict', default={}),
vm_extra_config=dict(required=False, type='dict', default={}),
vm_hw_version=dict(required=False, default=None, type='str'),
resource_pool=dict(required=False, default=None, type='str'),
cluster=dict(required=False, default=None, type='str'),
force=dict(required=False, type='bool', default=False),
esxi=dict(required=False, type='dict', default={}),
validate_certs=dict(required=False, type='bool', default=True),
power_on_after_clone=dict(required=False, type='bool', default=True)
),
supports_check_mode=False,
mutually_exclusive=[['state', 'vmware_guest_facts'],['state', 'from_template']],
required_together=[
['state', 'force'],
[
'state',
'vm_disk',
'vm_nic',
'vm_hardware',
'esxi'
],
['from_template', 'template_src'],
],
)
if not HAS_PYSPHERE:
module.fail_json(msg='pysphere module required')
vcenter_hostname = module.params['vcenter_hostname']
username = module.params['username']
password = module.params['password']
vmware_guest_facts = module.params['vmware_guest_facts']
state = module.params['state']
guest = module.params['guest']
force = module.params['force']
vm_disk = module.params['vm_disk']
vm_nic = module.params['vm_nic']
vm_hardware = module.params['vm_hardware']
vm_extra_config = module.params['vm_extra_config']
vm_hw_version = module.params['vm_hw_version']
esxi = module.params['esxi']
resource_pool = module.params['resource_pool']
cluster = module.params['cluster']
template_src = module.params['template_src']
from_template = module.params['from_template']
snapshot_to_clone = module.params['snapshot_to_clone']
power_on_after_clone = module.params['power_on_after_clone']
validate_certs = module.params['validate_certs']
# CONNECT TO THE SERVER
viserver = VIServer()
if validate_certs and not hasattr(ssl, 'SSLContext') and not vcenter_hostname.startswith('http://'):
module.fail_json(msg='pysphere does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
try:
viserver.connect(vcenter_hostname, username, password)
except ssl.SSLError as sslerr:
if '[SSL: CERTIFICATE_VERIFY_FAILED]' in sslerr.strerror:
if not validate_certs:
default_context = ssl._create_default_https_context
ssl._create_default_https_context = ssl._create_unverified_context
viserver.connect(vcenter_hostname, username, password)
else:
module.fail_json(msg='Unable to validate the certificate of the vcenter host %s' % vcenter_hostname)
else:
raise
except VIApiException, err:
module.fail_json(msg="Cannot connect to %s: %s" %
(vcenter_hostname, err))
# Check if the VM exists before continuing
try:
vm = viserver.get_vm_by_name(guest)
except Exception:
pass
if vm:
# Run for facts only
if vmware_guest_facts:
try:
module.exit_json(ansible_facts=gather_facts(vm))
except Exception, e:
module.fail_json(
msg="Fact gather failed with exception %s" % e)
# Power Changes
elif state in ['powered_on', 'powered_off', 'restarted']:
state_result = power_state(vm, state, force)
# Failure
if isinstance(state_result, basestring):
module.fail_json(msg=state_result)
else:
module.exit_json(changed=state_result)
# Just check if there
elif state == 'present':
module.exit_json(changed=False)
# Fail on reconfig without params
elif state == 'reconfigured':
reconfigure_vm(
vsphere_client=viserver,
vm=vm,
module=module,
esxi=esxi,
resource_pool=resource_pool,
cluster_name=cluster,
guest=guest,
vm_extra_config=vm_extra_config,
vm_hardware=vm_hardware,
vm_disk=vm_disk,
vm_nic=vm_nic,
state=state,
force=force
)
elif state == 'absent':
delete_vm(
vsphere_client=viserver,
module=module,
guest=guest,
vm=vm,
force=force)
# VM doesn't exist
else:
# Fail for fact gather task
if vmware_guest_facts:
module.fail_json(
msg="No such VM %s. Fact gathering requires an existing vm"
% guest)
elif from_template:
deploy_template(
vsphere_client=viserver,
esxi=esxi,
resource_pool=resource_pool,
guest=guest,
template_src=template_src,
module=module,
cluster_name=cluster,
snapshot_to_clone=snapshot_to_clone,
power_on_after_clone=power_on_after_clone,
vm_extra_config=vm_extra_config
)
if state in ['restarted', 'reconfigured']:
module.fail_json(
msg="No such VM %s. States ["
"restarted, reconfigured] required an existing VM" % guest)
elif state == 'absent':
module.exit_json(changed=False, msg="vm %s not present" % guest)
# Create the VM
elif state in ['present', 'powered_off', 'powered_on']:
# Check the guest_config
config_check("vm_disk", vm_disk, proto_vm_disk, module)
config_check("vm_nic", vm_nic, proto_vm_nic, module)
config_check("vm_hardware", vm_hardware, proto_vm_hardware, module)
config_check("esxi", esxi, proto_esxi, module)
create_vm(
vsphere_client=viserver,
module=module,
esxi=esxi,
resource_pool=resource_pool,
cluster_name=cluster,
guest=guest,
vm_extra_config=vm_extra_config,
vm_hardware=vm_hardware,
vm_disk=vm_disk,
vm_nic=vm_nic,
vm_hw_version=vm_hw_version,
state=state
)
viserver.disconnect()
module.exit_json(
changed=False,
vcenter=vcenter_hostname)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| jjshoe/ansible-modules-core | cloud/vmware/vsphere_guest.py | Python | gpl-3.0 | 65,239 | 0.001456 |
#!/usr/bin/env python
# Copyright 2015 Luminal, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
__version__ = pkg_resources.resource_string(__name__, 'VERSION')
| 3stack-software/credsmash | credsmash/__init__.py | Python | apache-2.0 | 686 | 0 |
# -*- coding: utf-8 -*-
#
# This file is part of pywebmachine released under the MIT license.
# See the NOTICE for more information.
class Resource(object):
def __init__(self, req, rsp):
pass
def allowed_methods(self, req, rsp):
return ["GET", "HEAD"]
def allow_missing_post(self, req, rsp):
return False
def auth_required(self, req, rsp):
return True
def charsets_provided(self, req, rsp):
"""\
return [("iso-8859-1", lambda x: x)]
Returning None prevents the character set negotiation
logic.
"""
return None
def content_types_accepted(self, req, rsp):
return None
def content_types_provided(self, req, rsp):
return [
("text/html", self.to_html)
]
def created_location(self, req, rsp):
return None
def delete_completed(self, req, rsp):
return True
def delete_resource(self, req, rsp):
return False
def encodings_provided(self, req, rsp):
"""\
return [("identity", lambda x: x)]
Returning None prevents the encoding negotiation logic.
"""
return None
def expires(self, req, rsp):
return None
def finish_request(self, req, rsp):
return True
def forbidden(self, req, rsp):
return False
def generate_etag(self, req, rsp):
return None
def is_authorized(self, req, rsp):
return True
def is_conflict(self, req, rsp):
return False
def known_content_type(self, req, rsp):
return True
def known_methods(self, req, rsp):
return set([
"GET", "HEAD", "POST", "PUT", "DELETE",
"TRACE", "CONNECT", "OPTIONS"
])
def languages_provided(self, req, rsp):
"""\
return ["en", "es", "en-gb"]
returning None short circuits the language negotiation
"""
return None
def last_modified(self, req, rsp):
return None
def malformed_request(self, req, rsp):
return False
def moved_permanently(self, req, rsp):
return False
def moved_temporarily(self, req, rsp):
return False
def multiple_choices(self, req, rsp):
return False
def options(self, req, rsp):
return []
def ping(self, req, rsp):
return True
def post_is_create(self, req, rsp):
return False
def previously_existed(self, req, rsp):
return False
def process_post(self, req, rsp):
return False
def resource_exists(self, req, rsp):
return True
def service_available(self, req, rsp):
return True
def uri_too_long(self, req, rsp):
return False
def valid_content_headers(self, req, rsp):
return True
def valid_entity_length(self, req, rsp):
return True
def variances(self, req, rsp):
return []
| benoitc/pywebmachine | pywebmachine/resource.py | Python | mit | 3,011 | 0.004982 |
# -*- coding: utf-8 -*-
#
# RedPipe documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 19 13:22:45 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import os
import sys
from os import path
ROOTDIR = path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, ROOTDIR)
import redpipe # noqa
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RedPipe'
copyright = u'2017, John Loehrer'
author = u'John Loehrer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = redpipe.__version__
# The full version, including alpha/beta/rc tags.
release = redpipe.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'redpipe-logo.gif',
'github_banner': True,
'github_user': '72squared',
'github_repo': 'redpipe',
'travis_button': True,
'analytics_id': 'UA-98626018-1',
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'RedPipedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RedPipe.tex', u'%s Documentation' % project,
u'John Loehrer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project, u'%s Documentation' % project,
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'%s Documentation' % project,
author, project, 'making redis pipelines easy in python',
'Miscellaneous'),
]
suppress_warnings = ['image.nonlocal_uri']
| 72squared/redpipe | docs/conf.py | Python | mit | 5,400 | 0 |
#!/usr/bin/env python3
__all__ = [
'get_client', 'Client', 'ThriftServer', 'Struct', 'BadEnum', 'Error',
'ApplicationError', 'TransportError', 'SSLPolicy',
]
try:
from thrift.py3.client import get_client, Client
except ImportError:
__all__.remove('Client')
__all__.remove('get_client')
try:
from thrift.py3.server import ThriftServer, SSLPolicy
except ImportError:
__all__.remove('ThriftServer')
__all__.remove('SSLPolicy')
try:
from thrift.py3.types import Struct, BadEnum
except ImportError:
__all__.remove('Struct')
__all__.remove('BadEnum')
try:
from thrift.py3.exceptions import Error, ApplicationError, TransportError
except ImportError:
__all__.remove('Error')
__all__.remove('ApplicationError')
__all__.remove('TransportError')
| SergeyMakarenko/fbthrift | thrift/lib/py3/__init__.py | Python | apache-2.0 | 800 | 0 |
#!/usr/local/bin/python3 -u
"""
Author: Oliver Ratzesberger <https://github.com/fxstein>
Copyright: Copyright (C) 2016 Oliver Ratzesberger
License: Apache License, Version 2.0
"""
# Make sure we have access to SentientHome commons
import os
import sys
try:
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/..')
except:
exit(1)
import time
from cement.core import hook
def process_event(app, event_type, event):
app.log.debug('process_event() Event: %s %s' %
(event_type, event), __name__)
try:
if event_type == 'isy' and event['Event.node'] is not None:
# Lookup name for easy rules coding
nodename = app.isy._nodedict[event['Event.node']]['name']
app.log.warn('ISY Node Event: %s %s: %s' %
(event['Event.node'], nodename, event), __name__)
if nodename == 'Master - Lights' and\
event['Event.control'] == 'DON':
app.log.error('Auto Off for: %s %s' %
(event['Event.node'], nodename), __name__)
time.sleep(5)
app.isy[event['Event.node']].off()
except Exception as e:
app.log.error(e)
# if event_type == 'isy' and event['Event.node'] == '24 0 93 1':
# app.log.warn('!!!!!!!!!!FOUNTAIN!!!!!!!!!!!')
# elif etype == 'isy' and event['Event.node'] == '29 14 86 1':
# app.log.debug('!!!!!!!!!!LIVING - WINDOW - OUTLET!!!!!!!!!!!')
# elif etype == 'isy' and state['control'] == 'DON':
# app.log.debug('Node: %s TURNED ON!!!!!!!!!!!!!!!!' %
# event['Event.node'])
# elif etype == 'isy' and state['control'] == 'ST':
# app.log.debug('Node: %s SET TARGET!!!!!!!!!!!!!!!' %
# event['Event.node'])
#
# if etype == 'ubnt.mfi.sensor':
# # Slow test workload for async task
# app.log.debug('mFi Sensor event: %s' % event)
# # log.debug('Pause for 10 sec')
# # yield from asyncio.sleep(10)
# # log.debug('Back from sleep')
#
# # Test mFi Sensor rule
# if etype == 'ubnt.mfi.sensor' and event['label'] == 'Well.Well.Pump':
# if event['amps'] < 21 and event['amps'] > 15:
# # Turn off the well pump for set amount of time
# app.log.info('!!!!!!!! WELL PUMP SAVER ACTION !!!!!!!!!')
#
# # First put pump to sleep
# well_pump = app.isy.get_node("Well - Well Pump")
# if well_pump:
# well_pump.off()
# # yield from asyncio.sleep(2)
# # well_pump.off()
# #
# # # Then schedule wakeup at a later time
# # yield from asyncio.sleep(900)
# # well_pump.on()
# # yield from asyncio.sleep(2)
# # well_pump.on()
def load(app):
hook.register('process_event', process_event)
app.log.info('Succeful Rules Plugin registration', __name__)
#
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
| fxstein/SentientHome | rules/plugin.rules.py | Python | apache-2.0 | 3,196 | 0.000626 |
#!/usr/bin/env python2
##
## We define Instrution as two types "Computing instruction" and "Control Transfer instruction"
## for computing instruction
## "NAME" : [ Operand_Number , [ Formula_that_modify_reg ], [ FLAG_reg_modified]]
## for control transfter instruciton
## "NAME" : [ Operand_Number , [ Formula_that_modify_reg ], [ DST_Addr_on_condition]]
##
from capstone import *
from expression import Exp
from semantic import Semantic
from copy import deepcopy
class X86:
FLAG = ["CF", "PF", "AF", "ZF", "SF", "TF", "IF", "DF", "OF"]
regs64 = ["rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp", "r8", "r9", "r10", "r11", "r12",
"r13", "r14", "r15", "cs", "ds", "es", "fs", "gs", "ss"]
regs32 = ["eax", "ebx", "ecx", "edx", "cs", "ds", "es", "fs", "gs", "ss", "esi", "edi", "ebp", "esp", "eip"]
Tregs64 = {
"eax" : ["rax $ 0 : 31", "rax = ( rax $ 32 : 63 ) # eax", 32],
"ax" : ["rax $ 0 : 15", "rax = ( rax $ 16 : 63 ) # ax", 16],
"ah" : ["rax $ 8 : 15", "rax = ( rax $ 16 : 63 ) # ah # ( rax $ 0 : 7 )", 8],
"al" : ["rax $ 0 : 7", "rax = ( rax $ 8 : 63 ) # al", 8],
"ebx" : ["rbx $ 0 : 31", "rbx = ( rbx $ 32 : 63 ) # ebx", 32],
"bx" : ["rbx $ 0 : 15", "rbx = ( rbx $ 16 : 63 ) # bx", 16],
"bh" : ["rbx $ 8 : 15", "rbx = ( rbx $ 16 : 63 ) # bh # ( rbx $ 0 : 7 )", 8],
"bl" : ["rbx $ 0 : 7", "rbx = ( rbx $ 8 : 63 ) # bl", 8],
"ecx" : ["rcx $ 0 : 31", "rcx = ( rcx $ 32 : 63 ) # ecx", 32],
"cx" : ["rcx $ 0 : 15", "rcx = ( rcx $ 16 : 63 ) # cx", 16],
"ch" : ["rcx $ 8 : 15", "rcx = ( rcx $ 16 : 63 ) # ch # ( rcx $ 0 : 7 )", 8],
"cl" : ["rcx $ 0 : 7", "rcx = ( rcx $ 8 : 63 ) # cl", 8],
"edx" : ["rdx $ 0 : 31", "rdx = ( rdx $ 32 : 63 ) # edx", 32],
"dx" : ["rdx $ 0 : 15", "rdx = ( rdx $ 16 : 63 ) # dx", 16],
"dh" : ["rdx $ 8 : 15", "rdx = ( rdx $ 16 : 63 ) # dh # ( rdx $ 0 : 7 )", 8],
"dl" : ["rdx $ 0 : 7", "rdx = ( rdx $ 8 : 63 ) # dl", 8],
}
Tregs32 = {
"ax" : ["eax $ 0 : 15", "eax = ( eax $ 16 : 31 ) # ax", 16],
"ah" : ["eax $ 8 : 15", "eax = ( eax $ 16 : 31 ) # ah # ( eax $ 0 : 7 )", 8],
"al" : ["eax $ 0 : 7", "eax = ( eax $ 8 : 31 ) # al", 8],
"bx" : ["ebx $ 0 : 15", "ebx = ( ebx $ 16 : 31 ) # bx", 16],
"bh" : ["ebx $ 8 : 15", "ebx = ( ebx $ 16 : 31 ) # bh # ( ebx $ 0 : 7 )", 8],
"bl" : ["ebx $ 0 : 7", "ebx = ( ebx $ 8 : 31 ) # bl", 8],
"cx" : ["ecx $ 0 : 15", "ecx = ( ecx $ 16 : 31 ) # cx", 16],
"ch" : ["ecx $ 8 : 15", "ecx = ( ecx $ 16 : 31 ) # ch # ( ecx $ 0 : 7 )", 8],
"cl" : ["ecx $ 0 : 7", "ecx = ( ecx $ 8 : 31 ) # cl", 8],
"dx" : ["edx $ 0 : 15", "edx = ( edx $ 16 : 31 ) # dx", 16],
"dh" : ["edx $ 8 : 15", "edx = ( edx $ 16 : 31 ) # dh # ( edx $ 0 : 7 )", 8],
"dl" : ["edx $ 0 : 7", "edx = ( edx $ 8 : 31 ) # dl", 8],
}
# Instructions that modifty the execution path
Control = ["ret", "iret", "int", "into", "enter", "leave", "call", "jmp", "ja", "jae", "jb", "jbe", "jc", "je","jnc", "jne", "jnp", "jp", "jg", "jge", "jl", "jle", "jno", "jns", "jo", "js"]
insn = {
# data transfer
"mov": [2, ["operand1 = operand2"], []],
"cmove": [2, ["operand1 = ( ZF == 1 ) ? operand2 : operand1"], []],
"cmovne": [2, ["operand1 = ( ZF == 0 ) ? operand2 : operand1"], []],
"cmova": [2, ["operand1 = ( ( ZF == 0 ) & ( CF == 0 ) ) ? operand2 : operand1"], []],
"cmovae": [2, ["operand1 = ( CF == 0 ) ? operand2 : operand1"], []],
"cmovb": [2, ["operand1 = ( CF == 1 ) ? operand2 : operand1"], []],
"cmovbe": [2, ["operand1 = ( ( ZF == 1 ) | ( CF == 1 ) ) ? operand2 : operand1"], []],
"cmovg": [2, ["operand1 = ( ( ZF == 0 ) & ( SF == OF ) ) ? operand2 : operand1"], []],
"cmovge": [2, ["operand1 = ( SF == OF ) ? operand2 : operand1"], []],
"cmovl": [2, ["operand1 = ( SF != OF ) ? operand2 : operand1"], []],
"cmovle": [2, ["operand1 = ( ( ZF == 1 ) & ( SF != OF ) ) ? operand2 : operand1"], []],
"cmovs": [2, ["operand1 = ( SF == 1 ) ? operand2 : operand1"], []],
"cmovp": [2, ["operand1 = ( PF == 1 ) ? operand2 : operand1"], []],
"push": [1, ["* ssp = operand1"], []],
"pop": [1, ["operand1 = * ssp"], []],
#"movsx": [2, ["operand1 = operand2 > 0 ? operand2 : operand2 & 0xffffffffffffffff"], []],
#"movzx": [2, ["operand1 = 0 & operand2"], []],
# flag control instuctions
"stc": [0, [], ["CF = 1"]],
"clc": [0, [], ["CF = 0"]],
"cmc": [0, [], ["CF = ~ CF"]],
"cld": [0, [], ["DF = 0"]],
"std": [0, [], ["DF = 1"]],
"sti": [0, [], ["IF = 1"]],
"cli": [0, [], ["IF = 0"]],
# arithmetic
"xchg": [2, ["FIXME"], []],
"cmp": [2, ["temp = operand1 - operand2"], ["CF", "OF", "SF", "ZF", "AF", "PF"]],
"add": [2, ["operand1 = operand1 + operand2"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"adc": [2, ["operand1 = operand1 + operand2 + CF"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"sub": [2, ["operand1 = operand1 - operand2"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"sbb": [2, ["operand1 = operand1 - operand2 - CF"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"inc": [1, ["operand1 = operand1 + 1"], ["OF", "SF", "ZF", "AF", "PF"]],
"dec": [1, ["operand1 = operand1 - 1"], ["OF", "SF", "ZF", "AF", "PF"]],
"neg": [1, ["operand1 = - operand1"], ["CF", "OF", "SF", "ZF", "AF", "PF"]],
# control transfer
"ret": [1, [], ["* ssp"]],
"call": [1, [], ["* operand1"]],
"jmp": [1, [], ["* operand1"]],
"ja": [1, [], ["( ( CF == 0 ) & ( ZF == 0 ) ) ? * operand1 : 0"]],
"jae": [1, [], ["CF == 0 ? * operand1 : 0"]],
"jb": [1, [] , ["CF == 1 ? * operand1 : 0"]],
"jbe": [1, [] , ["( ( CF == 1 ) | ( ZF == 1 ) ) ? * operand1 : 0"]],
"jc": [1, [], ["CF == 1 ? * operand1 : 0"]],
"je": [1, [], ["ZF == 1 ? * operand1 : 0"]],
"jnc": [1, [], ["CF == 0 ? * operand1 : 0"]],
"jne": [1, [], ["ZF == 0 ? * operand1 : 0"]],
"jnp": [1, [], ["PF == 0 ? * operand1 : 0"]],
"jp": [1, [], ["PF == 1 ? * operand1 : 0"]],
"jg": [1, [], ["( ( ZF == 0 ) & ( SF == OF ) ) ? * operand1 : 0"]],
"jge": [1, [], ["SF == OF ? * operand1 : 0"]],
"jl": [1, [], ["SF != OF ? * operand1 : 0"]],
"jle": [1, [], ["( ( ZF == 1 ) | ( SF != OF ) ) ? * operand1 : 0"]],
"jno": [1, [], ["OF == 0 ? * operand1 : 0"]],
"jns": [1, [], ["SF == 0 ? * operand1 : 0"]],
"jo": [1, [], ["OF == 1 ? * operand1 : 0"]],
"js": [1, [], ["SF == 1 ? * operand1 : 0"]],
# logic
"and": [2, ["operand1 = operand1 & operand2"], ["CF = 0", "OF = 0", "SF", "ZF", "PF"]],
"or": [2, ["operand1 = operand1 | operand2"], ["CF = 0", "OF = 0", "SF", "ZF", "PF"]],
"xor": [2, ["operand1 = operand1 ^ operand2"], ["CF = 0","OF = 0", "SF", "ZF", "PF"]],
"not": [1, ["operand1 = ~ operand1"], []],
"test": [2, ["temp = operand1 & operand2"], ["OF = 0", "CF = 0", "SF", "ZF", "PF"]],
# segment
# others
"lea": [2, ["operand1 = & operand2"], []],
"nop": [0, [], []]
}
class ROPParserX86:
def __init__(self, gadgets, mode):
self.gadgets = gadgets
self.addrs = dict()
self.mode = mode
self.aligned = 0
self.memLoc = []
self.writeMem = {}
if mode == CS_MODE_32:
self.regs = X86.regs32 + X86.FLAG
self.Tregs = X86.Tregs32
self.aligned = 4
self.default = 32
self.sp = "esp"
self.ip = "eip"
else:
self.regs = X86.regs64 + X86.FLAG
self.Tregs = X86.Tregs64
self.aligned = 8
self.default = 64
self.sp = "rsp"
self.ip = "rip"
for k, v in X86.insn.items():
for i, s in enumerate(v[1]):
v[1][i] = s.replace("ssp", self.sp)
for i, s in enumerate(v[2]):
v[2][i] = s.replace("ssp", self.sp)
X86.insn.update({k:v})
def parse(self):
formulas = []
for gadget in self.gadgets:
self.memLoc = []
self.writeMem = {}
regs = {self.sp : Exp(self.sp)}
regs = self.parseInst(regs, gadget["insns"], 0)
if len(regs) == 0:
# gadget cannot parsed
continue
formulas.append(Semantic(regs, gadget["vaddr"], self.memLoc, self.writeMem))
self.addrs.update({hex(gadget["vaddr"]).replace("L",""):gadget["insns"]})
print "================================="
print "Unique gadgets parsed ", len(formulas)
return formulas
def parseInst(self, regs, insts, i):
if i >= len(insts):
return regs
prefix = insts[i]["mnemonic"]
op_str = insts[i]["op_str"].replace("*", " * ")
if prefix not in X86.insn.keys():
# unsupported ins
return {}
ins = X86.insn.get(prefix)
if prefix in X86.Control:
# control transfer ins, end of gadget
if prefix in ["ret", "call"]:
operand1 = None
operand1 = Exp.parseOperand(op_str.split(", ")[0], regs, self.Tregs)
dst = Exp.parseExp(ins[2][0].split())
if operand1 is None:
dst = dst.binding({"operand1":Exp.ExpL(Exp.defaultLength,0)})
else:
dst = dst.binding({"operand1":operand1})
dst = dst.binding(regs)
regs.update({self.ip : dst})
# only ret inst modifies stackpointer
if prefix == "ret":
ssp = regs[self.sp]
ssp = Exp(ssp, "+", Exp(self.aligned))
if operand1 is not None:
ssp = Exp(ssp, "+", operand1)
regs.update({ self.sp :ssp})
return regs
# handle jmp
operand1 = Exp.parseOperand(op_str.split(" ")[0], regs, self.Tregs)
dst = Exp.parseExp(ins[2][0].split())
dst = dst.binding({"operand1":operand1})
dst = dst.binding(regs)
regs.update({self.ip : dst})
return regs
else:
# computing ins
operand1 = None
operand2 = None
operands = {self.sp :regs[self.sp]}
for flag in X86.FLAG:
if flag in regs.keys():
operands.update({flag:regs[flag]})
# handle special cases
if ins[0] == 1:
operand1 = Exp.parseOperand(op_str.split(", ")[0], regs, self.Tregs)
if operand1 is None:
return []
operands.update({"operand1":operand1})
elif ins[0] == 2:
operand1 = Exp.parseOperand(op_str.split(", ")[0], regs, self.Tregs)
operand2 = Exp.parseOperand(op_str.split(", ")[1], regs, self.Tregs)
if operand1 is None or operand2 is None:
return []
operands.update({"operand1":operand1})
operands.update({"operand2":operand2})
if prefix != "lea" and "ptr" in op_str and (operand1.getCategory() == 3 or operand2.getCategory() == 3):
if prefix not in ["cmp", "test", "push"] and "ptr" in op_str.split(", ")[0]:
self.memLoc.append(operand1)
self.writeMem.update({str(operand1):operand1})
else:
self.memLoc.append(operand1 if operand1.getCategory() == 3 else operand2)
# contruct insn operation
if len(ins[1]) > 0:
if prefix == "lea":
reg = op_str.split(", ")[0]
addr = Exp.parseExp(op_str.split("[")[1][:-1].split())
addr = addr.binding(regs)
addr.length = Exp.defaultLength
regs.update({reg:addr})
return self.parseInst(regs, insts, i+1)
if prefix == "xchg":
op1k = op_str.split(", ")[0]
op2k = op_str.split(", ")[1]
op1v = None
op2v = None
if op2k in self.Tregs:
# subpart of register
temp = Exp.parse(self.Tregs[op2k][1], {op2k:operands["operand1"]})
for k, v in temp.items():
v.length = Exp.defaultLength
op2k = k
op2v = v
elif op2k in self.regs:
# register
operands["operand1"].length = Exp.defaultLength
op2v = operands["operand1"]
else:
# mem
op2k = str(operands["operand2"])
op2v = operands["operand1"]
if op1k in self.Tregs:
temp = Exp.parse(self.Tregs[op1k][1], {op1k:operands["operand2"]})
for k, v in temp.items():
v.length = Exp.defaultLength
op1k = k
op1v = v
elif op1k in self.regs:
operands["operand2"].length = Exp.defaultLength
op1v = operands["operand2"]
else:
op1k = str(operands["operand1"])
op1v = operands["operand2"]
regs.update({op1k:op1v})
regs.update({op2k:op2v})
return self.parseInst(regs, insts, i+1)
exps = Exp.parse(ins[1][0], operands)
for reg, val in exps.items():
# handle special case of xor, op1 == op2 clear the register
if prefix == "xor" and op_str.split(", ")[0] == op_str.split(", ")[1]:
val = Exp.ExpL(val.length, 0)
# temp variable, no need to assign
if reg == "temp":
val.length = max(operand1.length, operand2.length)
continue
if "*" in reg:
# this can only be push inst
val.length = Exp.defaultLength
regs.update({"[ " + str(regs[self.sp]) + " ]":val})
continue
dst = Exp.parseOperand(op_str.split(", ")[0], {}, {})
if str(dst) in self.regs:
# general purpose reg
val.length = Exp.defaultLength
regs.update({str(dst):val})
elif str(dst) in self.Tregs:
# subpart of GPRs
temp = Exp.parse(self.Tregs[str(dst)][1], {})
for k, v in temp.items():
v = v.binding(regs)
v = v.binding({str(dst):val})
v.length = Exp.defaultLength
regs.update({k:v})
else:
# mem
regs.update({str(operands["operand1"]):val})
if prefix == "push":
regs.update({self.sp :Exp(regs[self.sp], "+", Exp(self.aligned))})
if prefix == "pop":
regs.update({self.sp :Exp(regs[self.sp], "-", Exp(self.aligned))})
# evaluate flag regs base on exp
if len(ins[2]) != 0:
for flag in ins[2]:
tokens = flag.split()
if len(tokens) == 1:
for k, v in exps.items():
exp = Exp(v, tokens[0][:-1])
exp.length = 1
regs.update({tokens[0]:exp})
else:
f = Exp.parse(flag, {})
for k,v in f.items():
# "CF = 1"
v.length = 1
regs.update({tokens[0]:v})
return self.parseInst(regs, insts, i+1)
if __name__ == '__main__':
binarys = [b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\xc3",
b"\xbb\x01\x00\x00\x00\x29\xd8\x83\xf8\x01\x0f\x84\x0f\xf9\x01\x00\x5a\xc3"]
gadgets = []
md = Cs(CS_ARCH_X86, CS_MODE_32)
md.detail = True
for binary in binarys:
gadget = []
for decode in md.disasm(binary, 0x1000):
inst = {}
inst.update({"mnemonic": decode.mnemonic})
inst.update({"op_str": decode.op_str})
inst.update({"vaddr": decode.address})
gadget.append(inst)
gadgets.append(gadget)
p = ROPParserX86(gadgets, CS_MODE_32)
formulas = p.parse()
| XiaofanZhang/ROPgadget | ropgadget/ropparse/arch/parserx86.py | Python | gpl-2.0 | 17,183 | 0.013967 |
from typing_extensions import Protocol
# noinspection PyPropertyDefinition
class UserLike(Protocol):
@property
def is_active(self) -> bool: ...
@property
def is_authenticated(self) -> bool: ...
@property
def is_anonymous(self) -> bool: ...
| MarauderXtreme/sipa | sipa/backends/types.py | Python | mit | 268 | 0 |
import re
from threading import Thread
import time
from django.core.management.base import BaseCommand
import requests
from mittab.apps.tab.models import Round, TabSettings
from mittab.apps.tab.management.commands import utils
class Command(BaseCommand):
help = "Load test the tournament, connecting via localhost and hitting the server"
def add_arguments(self, parser):
parser.add_argument(
"--host",
dest="host",
help="The hostname of the server to hit",
nargs="?",
default="localhost:8000")
parser.add_argument(
"--connections",
dest="connections",
help="The number of concurrent connections to open",
nargs="?",
default=10,
type=int)
def handle(self, *args, **options):
cur_round = TabSettings.get("cur_round") - 1
host = options["host"]
csrf_threads = []
rounds = Round.objects.filter(round_number=cur_round, victor=Round.NONE)
for round_obj in rounds:
judge = round_obj.chair
csrf_threads.append(GetCsrfThread(host, judge.ballot_code, round_obj))
num_errors = 0
while csrf_threads:
cur_csrf_threads = []
for _ in range(min(len(csrf_threads), options["connections"])):
cur_csrf_threads.append(csrf_threads.pop())
for thr in cur_csrf_threads:
thr.start()
for thr in cur_csrf_threads:
thr.join()
result_threads = []
for thr in cur_csrf_threads:
num_errors += num_errors
csrf_token, num_errors = thr.result
if csrf_token is None:
print("no csrf token")
result_thread = SubmitResultThread(
thr.host,
thr.ballot_code,
csrf_token,
thr.round_obj)
result_threads.append(result_thread)
for thr in result_threads:
thr.start()
for thr in result_threads:
thr.join()
for thr in result_threads:
num_errors += thr.num_errors
print("Done with one batch! Sleeping!")
time.sleep(2)
print("Done!")
print("Total errors: %s" % num_errors)
class SubmitResultThread(Thread):
MAX_ERRORS = 10
def __init__(self, host, ballot_code, csrf_token, round_obj):
super(SubmitResultThread, self).__init__()
self.host = host
self.ballot_code = ballot_code
self.csrf_token = csrf_token
self.round_obj = round_obj
self.num_errors = 0
self.resp = None
def run(self):
self.resp = self.get_resp()
def get_resp(self):
if self.num_errors >= self.MAX_ERRORS:
return None
result = utils.generate_random_results(self.round_obj, self.ballot_code)
result["csrfmiddlewaretoken"] = self.csrf_token
resp = requests.post("http://%s/e_ballots/%s/" % (self.host, self.ballot_code),
result,
cookies={"csrftoken": self.csrf_token})
if resp.status_code > 299:
self.num_errors += 1
return self.get_resp()
else:
return resp.text
class GetCsrfThread(Thread):
REGEX = "name=\"csrfmiddlewaretoken\" value=\"([^\"]+)\""
MAX_ERRORS = 10
def __init__(self, host, ballot_code, round_obj):
super(GetCsrfThread, self).__init__()
self.num_errors = 0
self.host = host
self.ballot_code = ballot_code
self.round_obj = round_obj
self.result = (None, None)
def run(self):
resp = self.get_resp()
if resp is None:
self.result = (None, self.num_errors)
else:
csrf = re.search(self.REGEX, resp).group(1)
self.result = (csrf, self.num_errors)
def get_resp(self):
if self.num_errors >= self.MAX_ERRORS:
return None
resp = requests.get("http://%s/e_ballots/%s" % (self.host, self.ballot_code))
if resp.status_code > 299:
self.num_errors += 1
return self.get_resp()
else:
return resp.text
| jolynch/mit-tab | mittab/apps/tab/management/commands/load_test.py | Python | mit | 4,345 | 0.001611 |
#!/usr/bin/env python
import json
import requests
from requests.auth import HTTPBasicAuth
import urlparse
import time
class PEACInfo:
def __init__(self, url, method):
self.url = url
self.method = method
self.headers = {
'accept': 'application/json',
'Content-Type': 'application/json'
}
LOCATION_INFO = PEACInfo('/service/locations.json', 'GET')
DEVICES_INFO = PEACInfo('/service/locations/%(locationId)s/devices.json', 'GET')
CONTROLS_INFO = PEACInfo('/service/devices/%(deviceId)s/controls.json', 'GET')
UPDATE_INFO = PEACInfo('/service/controls/update.json', 'PUT')
class PEAC(object):
def __init__(self, server, user, password, proxies={}):
self.server = server
self.user = user
self.password = password
self.proxies = proxies
def _make_url(self, peacinfo):
urlparts = list(urlparse.urlparse(self.server + peacinfo.url))
return urlparse.urlunparse(urlparts)
def _PEAC_request(self, peacinfo, payload=None, url_args=dict()):
url = self._make_url(peacinfo)
if payload:
resp = requests.request(peacinfo.method, url % url_args, data=json.dumps(payload), headers=peacinfo.headers, auth=HTTPBasicAuth(self.user, self.password), proxies=self.proxies)
else:
resp = requests.request(peacinfo.method, url % url_args, headers=peacinfo.headers, auth=HTTPBasicAuth(self.user, self.password), proxies=self.proxies)
return resp
def list_locations(self):
'''
This requests retrieves all locations.
Request Type: GET
Parameters: none
Response: JSON array with Location Objects.
'''
return self._PEAC_request(LOCATION_INFO).json()
def list_devices(self, location_id):
'''
This requests gets the list of devices in location location_id
Request Type: GET
Parameters: locationId, the id retrieved by the previous call to locations.json
Response: JSON Array of Device objects.
'''
return self._PEAC_request(DEVICES_INFO, url_args=dict(locationId=location_id)).json()
def get_device_info(self, device_id):
'''
Retrieves the controls associated with device deviceId.
Request Type: GET
Parameters: deviceId, the id retrieved from the device.json call.
Response: JSON Array of Control objects.
'''
return self._PEAC_request(CONTROLS_INFO, url_args=dict(deviceId=device_id)).json()
def update_control(self, controlId, numval):
'''
Updates the control value. This call is used to 'press' a button.
Method: PUT
Params: JSON Control Update Object
Response: Control object
'''
# import pdb; pdb.set_trace()
return self._PEAC_request(UPDATE_INFO, payload=dict(id=controlId, numVal=numval)).json()
def test_credentials(self):
'''
Tests credentials against PEAC server
'''
return self._PEAC_request(LOCATION_INFO).status_code == 200
def test_server_responses():
import os
peac = PEAC('http://localhost:8000', os.environ['PEAC_USER'], os.environ['PEAC_PASSWORD'])
# print peac.list_locations()
# print peac.list_devices(83)
# print peac.get_device_info(1955)
print peac.update_control(5000,0)
if __name__ == '__main__':
test_server_responses()
| OSUrobotics/peac_bridge | src/peac_bridge/peac_client.py | Python | bsd-3-clause | 3,420 | 0.003509 |
from abc import abstractmethod, ABCMeta
import numpy as np
from lolopy.loloserver import get_java_gateway
from lolopy.utils import send_feature_array, send_1D_array
from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin, is_regressor
from sklearn.exceptions import NotFittedError
__all__ = ['RandomForestRegressor', 'RandomForestClassifier', 'ExtraRandomTreesRegressor', 'ExtraRandomTreesClassifier']
class BaseLoloLearner(BaseEstimator, metaclass=ABCMeta):
"""Base object for all leaners that use Lolo.
Contains logic for starting the JVM gateway, and the fit operations.
It is only necessary to implement the `_make_learner` object and create an `__init__` function
to adapt a learner from the Lolo library for use in lolopy.
The logic for making predictions (i.e., `predict` and `predict_proba`) is specific to whether the learner
is a classification or regression model.
In lolo, learners are not specific to a regression or classification problem and the type of problem is determined
when fitting data is provided to the algorithm.
In contrast, Scikit-learn learners for regression or classification problems are different classes.
We have implemented `BaseLoloRegressor` and `BaseLoloClassifier` abstract classes to make it easier to create
a classification or regression version of a Lolo base class.
The pattern for creating a scikit-learn compatible learner is to first implement the `_make_learner` and `__init__`
operations in a special "Mixin" class that inherits from `BaseLoloLearner`, and then create a regression- or
classification-specific class that inherits from both `BaseClassifier` or `BaseRegressor` and your new "Mixin".
See the RandomForest models as an example of this approach.
"""
def __init__(self):
self.gateway = get_java_gateway()
# Create a placeholder for the model
self.model_ = None
self._num_outputs = None
self._compress_level = 9
self.feature_importances_ = None
def __getstate__(self):
# Get the current state
try:
state = super(BaseLoloLearner, self).__getstate__()
except AttributeError:
state = self.__dict__.copy()
# Delete the gateway data
del state['gateway']
# If there is a model set, replace it with the JVM copy
if self.model_ is not None:
state['model_'] = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.serializeObject(self.model_,
self._compress_level)
return state
def __setstate__(self, state):
# Unpickle the object
super(BaseLoloLearner, self).__setstate__(state)
# Get a pointer to the gateway
self.gateway = get_java_gateway()
# If needed, load the model into memory
if state['model_'] is not None:
bytes = state.pop('model_')
self.model_ = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.deserializeObject(bytes)
def fit(self, X, y, weights=None):
# Instantiate the JVM object
learner = self._make_learner()
# Determine the number of outputs
y_shape = np.asarray(y).shape
if len(y_shape) == 1:
self._num_outputs = 1
elif len(y_shape) == 2:
self._num_outputs = y.shape[1]
else:
raise ValueError("Output array must be either 1- or 2-dimensional")
# Convert all of the training data to Java arrays
train_data, weights_java = self._convert_train_data(X, y, weights)
assert train_data.length() == len(X), "Array copy failed"
assert train_data.head()._1().length() == len(X[0]), "Wrong number of features"
assert weights_java.length() == len(X), "Weights copy failed"
# Train the model
result = learner.train(train_data, self.gateway.jvm.scala.Some(weights_java))
# Unlink the training data, which is no longer needed (to save memory)
self.gateway.detach(train_data)
self.gateway.detach(weights_java)
# Get the model out
self.model_ = result.getModel()
# Store the feature importances
feature_importances_java = result.getFeatureImportance().get()
feature_importances_bytes = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.send1DArray(feature_importances_java)
self.feature_importances_ = np.frombuffer(feature_importances_bytes, 'float')
return self
@abstractmethod
def _make_learner(self):
"""Instantiate the learner used by Lolo to train a model
Returns:
(JavaObject) A lolo "Learner" object, which can be used to train a model"""
pass
def clear_model(self):
"""Utility operation for deleting model from JVM when no longer needed"""
if self.model_ is not None:
self.gateway.detach(self.model_)
self.model_ = None
def _convert_train_data(self, X, y, weights=None):
"""Convert the training data to a form accepted by Lolo
Args:
X (ndarray): Input variables
y (ndarray): Output variables
weights (ndarray): Wegihts for each sample
Returns
train_data (JavaObject): Pointer to the training data in Java
"""
# Make some default weights
if weights is None:
weights = np.ones(len(y))
# Convert y and w to float64 or int32 with native ordering
y = np.array(y, dtype=np.float64 if is_regressor(self) else np.int32)
weights = np.array(weights, dtype=np.float64)
# Convert X, y, and w to Java Objects
X_java = send_feature_array(self.gateway, X)
if self._num_outputs == 1:
y_java = send_1D_array(self.gateway, y, is_regressor(self))
else:
y_java = send_feature_array(self.gateway, y)
assert y_java.length() == len(y) == len(X)
w_java = send_1D_array(self.gateway, weights, True)
assert w_java.length() == len(weights)
return self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.zipTrainingData(X_java, y_java), w_java
def _convert_run_data(self, X):
"""Convert the data to be run by the model
Args:
X (ndarray): Input data
Returns:
(JavaObject): Pointer to run data in Java
"""
if not isinstance(X, np.ndarray):
X = np.array(X)
return self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getFeatureArray(X.tobytes(), X.shape[1], False)
def get_importance_scores(self, X):
"""Get the importance scores for each entry in the training set for each prediction
Args:
X (ndarray): Inputs for each entry to be assessed
"""
pred_result = self._get_prediction_result(X)
y_import_bytes = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getImportanceScores(pred_result)
y_import = np.frombuffer(y_import_bytes, 'float').reshape(len(X), -1)
return y_import
def _get_prediction_result(self, X):
"""Get the PredictionResult from the lolo JVM
The PredictionResult class holds methods that will generate the expected predictions, uncertainty intervals, etc
Args:
X (ndarray): Input features for each entry
Returns:
(JavaObject): Prediction result produced by evaluating the model
"""
# Check that the model is fitted
if self.model_ is None:
raise NotFittedError()
# Convert the data to Java
X_java = self._convert_run_data(X)
# Get the PredictionResult
pred_result = self.model_.transform(X_java)
# Unlink the run data, which is no longer needed (to save memory)
self.gateway.detach(X_java)
return pred_result
class BaseLoloRegressor(BaseLoloLearner, RegressorMixin):
"""Abstract class for models that produce regression models.
As written, this allows for both single-task and multi-task models.
Implements the predict operation."""
def predict(self, X, return_std = False, return_cov_matrix = False):
"""
Apply the model to a matrix of inputs, producing predictions and optionally some measure of uncertainty
Args:
X (ndarray): Input array
return_std (bool): if True, return the standard deviations along with the predictions
return_cov_matrix (bool): If True, return the covariance matrix along with the predictions
Returns
Sequence of predictions OR
(Sequence of predictions, Sequence of standard deviations) OR
(Sequence of predictions, Sequence of covariance matrices).
Each prediction and standard deviation is a float (for single-output learners) or an array (for multi-output learners).
Each covariance matrix entry is a (# outputs x # outputs) matrix.
"""
if return_std and return_cov_matrix:
raise ValueError("Only one of return_std or return_cov_matrix can be True")
# Start the prediction process
pred_result = self._get_prediction_result(X)
# Pull out the expected values
if self._num_outputs == 1:
y_pred_byte = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getRegressionExpected(pred_result)
y_pred = np.frombuffer(y_pred_byte, dtype='float') # Lolo gives a byte array back
else:
y_pred_byte = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getMultiRegressionExpected(pred_result)
y_pred = np.frombuffer(y_pred_byte, dtype='float').reshape(-1, self._num_outputs)
if return_std:
y_std = self._get_std(X, pred_result)
return y_pred, y_std
if return_cov_matrix:
corr_matrix = self._get_corr_matrix(X, pred_result)
y_std = self._get_std(X, pred_result).reshape(-1, self._num_outputs)
sigma_sq_matrix = np.array([np.outer(y_std[i, :], y_std[i, :]) for i in range(len(X))])
# both sigma_squared and correlation matrices have size (# predictions, # outputs, # outputs).
# They are multiplied term-by-term to produce the covariance matrix.
cov_matrix = sigma_sq_matrix * corr_matrix
return y_pred, cov_matrix
# Get the expected values
return y_pred
def _get_std(self, X, pred_result):
# TODO: This part fails on Windows because the NativeSystemBLAS is not found. Fix that
if self._num_outputs == 1:
y_std_bytes = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getRegressionUncertainty(pred_result)
return np.frombuffer(y_std_bytes, 'float')
else:
y_std_bytes = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getMultiRegressionUncertainty(pred_result)
return np.frombuffer(y_std_bytes, 'float').reshape(-1, self._num_outputs)
def _get_corr_matrix(self, X, pred_result):
num_predictions = len(X)
corr_matrix = np.zeros((num_predictions, self._num_outputs, self._num_outputs))
idx = np.arange(self._num_outputs)
corr_matrix[:, idx, idx] = 1.0
for i in range(self._num_outputs - 1):
for j in range(i + 1, self._num_outputs):
rho_bytes = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getRegressionCorrelation(pred_result, i, j)
rho = np.frombuffer(rho_bytes, 'float')
corr_matrix[:, i, j] = rho
corr_matrix[:, j, i] = rho
return corr_matrix
class BaseLoloClassifier(BaseLoloLearner, ClassifierMixin):
"""Base class for classification models
Implements a modification to the fit operation that stores the number of classes
and the predict/predict_proba methods"""
def fit(self, X, y, weights=None):
# Get the number of classes
self.n_classes_ = len(set(y))
return super(BaseLoloClassifier, self).fit(X, y, weights)
def predict(self, X):
pred_result = self._get_prediction_result(X)
# Pull out the expected values
y_pred_byte = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getClassifierExpected(pred_result)
y_pred = np.frombuffer(y_pred_byte, dtype=np.int32) # Lolo gives a byte array back
return y_pred
def predict_proba(self, X):
pred_result = self._get_prediction_result(X)
# Copy over the class probabilities
probs_byte = self.gateway.jvm.io.citrine.lolo.util.LoloPyDataLoader.getClassifierProbabilities(pred_result,
self.n_classes_)
probs = np.frombuffer(probs_byte, dtype='float').reshape(-1, self.n_classes_)
return probs
class RandomForestMixin(BaseLoloLearner):
"""Random Forest base class
Implements the _make_learner operation and the __init__ function with options specific to the RandomForest
class in Lolo"""
def __init__(self, num_trees=-1, use_jackknife=True, bias_learner=None,
leaf_learner=None, subset_strategy="auto", min_leaf_instances=1,
max_depth=2**30, uncertainty_calibration=False, randomize_pivot_location=False,
randomly_rotate_features=False, random_seed=None):
"""Initialize the RandomForest
Args:
num_trees (int): Number of trees to use in the forest (default of -1 sets the number of trees to the number of training rows)
use_jackknife (bool): Whether to use jackknife based variance estimates
bias_learner (BaseLoloLearner): Algorithm used to model bias (default: no model)
leaf_learner (BaseLoloLearner): Learner used at each leaf of the random forest (default: GuessTheMean)
subset_strategy (Union[string,int,float]): Strategy used to determine number of features used at each split
Available options:
"auto": Use the default for lolo (all features for regression, sqrt for classification)
"log2": Use the base 2 log of the number of features
"sqrt": Use the square root of the number of features
integer: Set the number of features explicitly
float: Use a certain fraction of the features
min_leaf_instances (int): Minimum number of features used at each leaf
max_depth (int): Maximum depth to which to allow the decision trees to grow
uncertainty_calibration (bool): whether to re-calibrate the predicted uncertainty based on out-of-bag residuals
randomize_pivot_location (bool): whether to draw pivots randomly or always select the midpoint
randomly_rotate_features (bool): whether to randomly rotate real features for each tree in the forest
random_seed (int): random number generator seed used for nondeterministic functionality
"""
super(RandomForestMixin, self).__init__()
# Store the variables
self.num_trees = num_trees
self.use_jackknife = use_jackknife
self.subset_strategy = subset_strategy
self.bias_learner = bias_learner
self.leaf_learner = leaf_learner
self.min_leaf_instances = min_leaf_instances
self.max_depth = max_depth
self.uncertainty_calibration = uncertainty_calibration
self.randomize_pivot_location = randomize_pivot_location
self.randomly_rotate_features = randomly_rotate_features
self.random_seed = random_seed
def _make_learner(self):
rng = self.gateway.jvm.scala.util.Random(self.random_seed) if self.random_seed else self.gateway.jvm.scala.util.Random()
# TODO: Figure our a more succinct way of dealing with optional arguments/Option values
# TODO: that ^^, please
learner = self.gateway.jvm.io.citrine.lolo.learners.RandomForest(
self.num_trees, self.use_jackknife,
getattr(self.gateway.jvm.io.citrine.lolo.learners.RandomForest,
"$lessinit$greater$default$3")() if self.bias_learner is None
else self.gateway.jvm.scala.Some(self.bias_learner._make_learner()),
getattr(self.gateway.jvm.io.citrine.lolo.learners.RandomForest,
"$lessinit$greater$default$4")() if self.leaf_learner is None
else self.gateway.jvm.scala.Some(self.leaf_learner._make_learner()),
self.subset_strategy,
self.min_leaf_instances,
self.max_depth,
self.uncertainty_calibration,
self.randomize_pivot_location,
self.randomly_rotate_features,
rng
)
return learner
class RandomForestRegressor(BaseLoloRegressor, RandomForestMixin):
"""Random Forest model used for regression"""
class RandomForestClassifier(BaseLoloClassifier, RandomForestMixin):
"""Random Forest model used for classification"""
class ExtraRandomTreesMixIn(BaseLoloLearner):
"""Extra Random Trees base class
Implements the _make_learner operation and the __init__ function with options specific to the ExtraRandomTrees
class in Lolo"""
def __init__(self, num_trees=-1, use_jackknife=False, bias_learner=None,
leaf_learner=None, subset_strategy="auto", min_leaf_instances=1,
max_depth=2**30, uncertainty_calibration=False, disable_bootstrap=True,
randomly_rotate_features=False, random_seed=None):
"""Initialize the ExtraRandomTrees ensemble
Args:
num_trees (int): Number of trees to use in the forest (default of -1 sets the number of trees to the number of training rows)
use_jackknife (bool): Whether to use jackknife based variance estimates (default: False)
bias_learner (BaseLoloLearner): Algorithm used to model bias (default: no model)
leaf_learner (BaseLoloLearner): Learner used at each leaf of the random forest (default: GuessTheMean)
subset_strategy (Union[string,int,float]): Strategy used to determine number of features used at each split
Available options:
"auto": Use the default for lolo (all features for regression; classification not supported)
"log2": Use the base 2 log of the number of features
"sqrt": Use the square root of the number of features
integer: Set the number of features explicitly
float: Use a certain fraction of the features
min_leaf_instances (int): Minimum number of features used at each leaf
max_depth (int): Maximum depth to which to allow the decision trees to grow
uncertainty_calibration (bool): whether to re-calibrate the predicted uncertainty based on out-of-bag residuals
randomize_pivot_location (bool): whether to draw pivots randomly or always select the midpoint
disable_bootstrap (bool): whether to disable bootstrapping (default: true)
randomly_rotate_features (bool): whether to randomly rotate real features for each tree in the forest
"""
super(ExtraRandomTreesMixIn, self).__init__()
# Store the variables
self.num_trees = num_trees
self.use_jackknife = use_jackknife
self.bias_learner = bias_learner
self.leaf_learner = leaf_learner
self.subset_strategy = subset_strategy
self.min_leaf_instances = min_leaf_instances
self.max_depth = max_depth
self.uncertainty_calibration = uncertainty_calibration
self.randomly_rotate_features = randomly_rotate_features
self.random_seed = random_seed
def _make_learner(self):
# TODO: Figure our a more succinct way of dealing with optional arguments/Option values
# TODO: that ^^, please
rng = self.gateway.jvm.scala.util.Random(self.random_seed) if self.random_seed else self.gateway.jvm.scala.util.Random()
learner = self.gateway.jvm.io.citrine.lolo.learners.ExtraRandomTrees(
self.num_trees, self.use_jackknife,
getattr(self.gateway.jvm.io.citrine.lolo.learners.ExtraRandomTrees,
"$lessinit$greater$default$3")() if self.bias_learner is None
else self.gateway.jvm.scala.Some(self.bias_learner._make_learner()),
getattr(self.gateway.jvm.io.citrine.lolo.learners.ExtraRandomTrees,
"$lessinit$greater$default$4")() if self.leaf_learner is None
else self.gateway.jvm.scala.Some(self.leaf_learner._make_learner()),
self.subset_strategy,
self.min_leaf_instances,
self.max_depth,
self.uncertainty_calibration,
self.disable_bootstrap,
self.randomly_rotate_features,
rng
)
return learner
class ExtraRandomTreesRegressor(BaseLoloRegressor, RandomForestMixin):
"""Random Forest model used for regression"""
class ExtraRandomTreesClassifier(BaseLoloClassifier, RandomForestMixin):
"""Random Forest model used for regression"""
class RegressionTreeLearner(BaseLoloRegressor):
"""Regression tree learner, based on the RandomTree algorithm."""
def __init__(self, num_features=-1, max_depth=30, min_leaf_instances=1, leaf_learner=None, random_seed=None):
"""Initialize the learner
Args:
num_features (int): Number of features to consider at each split (-1 to consider all features)
max_depth (int): Maximum depth of the regression tree
min_leaf_instances (int): Minimum number instances per leaf
leaf_learner (BaseLoloLearner): Learner to use on the leaves
"""
super(RegressionTreeLearner, self).__init__()
self.num_features = num_features
self.max_depth = max_depth
self.min_leaf_instances = min_leaf_instances
self.leaf_learner = leaf_learner
self.random_seed = random_seed
def _make_learner(self):
if self.leaf_learner is None:
# pull out default learner
leaf_learner = getattr(
self.gateway.jvm.io.citrine.lolo.trees.regression.RegressionTreeLearner,
"$lessinit$greater$default$4"
)()
else:
leaf_learner = self.gateway.jvm.scala.Some(self.leaf_learner._make_learner())
# pull out default splitter
splitter = getattr(
self.gateway.jvm.io.citrine.lolo.trees.regression.RegressionTreeLearner,
"$lessinit$greater$default$5"
)()
rng = self.gateway.jvm.scala.util.Random() if self.random_seed is None else self.gateway.jvm.scala.util.Random(self.random_seed)
return self.gateway.jvm.io.citrine.lolo.trees.regression.RegressionTreeLearner(
self.num_features, self.max_depth, self.min_leaf_instances,
leaf_learner, splitter, rng
)
class LinearRegression(BaseLoloRegressor):
"""Linear ridge regression with an :math:`L_2` penalty"""
def __init__(self, reg_param=None, fit_intercept=True):
"""Initialize the regressor"""
super(LinearRegression, self).__init__()
self.reg_param = reg_param
self.fit_intercept = fit_intercept
def _make_learner(self):
return self.gateway.jvm.io.citrine.lolo.linear.LinearRegressionLearner(
getattr(self.gateway.jvm.io.citrine.lolo.linear.LinearRegressionLearner,
"$lessinit$greater$default$1")() if self.reg_param is None
else self.gateway.jvm.scala.Some(float(self.reg_param)),
self.fit_intercept
)
| CitrineInformatics/lolo | python/lolopy/learners.py | Python | apache-2.0 | 23,987 | 0.004586 |
"""Time offset classes for use with cftime.datetime objects"""
# The offset classes and mechanisms for generating time ranges defined in
# this module were copied/adapted from those defined in pandas. See in
# particular the objects and methods defined in pandas.tseries.offsets
# and pandas.core.indexes.datetimes.
# For reference, here is a copy of the pandas copyright notice:
# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team
# All rights reserved.
# Copyright (c) 2008-2011 AQR Capital Management, LLC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the copyright holder nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from datetime import timedelta
from distutils.version import LooseVersion
from functools import partial
from typing import ClassVar, Optional
import numpy as np
from ..core.pdcompat import count_not_none
from .cftimeindex import CFTimeIndex, _parse_iso8601_with_reso
from .times import format_cftime_datetime
def get_date_type(calendar):
"""Return the cftime date type for a given calendar name."""
try:
import cftime
except ImportError:
raise ImportError("cftime is required for dates with non-standard calendars")
else:
calendars = {
"noleap": cftime.DatetimeNoLeap,
"360_day": cftime.Datetime360Day,
"365_day": cftime.DatetimeNoLeap,
"366_day": cftime.DatetimeAllLeap,
"gregorian": cftime.DatetimeGregorian,
"proleptic_gregorian": cftime.DatetimeProlepticGregorian,
"julian": cftime.DatetimeJulian,
"all_leap": cftime.DatetimeAllLeap,
"standard": cftime.DatetimeGregorian,
}
return calendars[calendar]
class BaseCFTimeOffset:
_freq: ClassVar[Optional[str]] = None
_day_option: ClassVar[Optional[str]] = None
def __init__(self, n=1):
if not isinstance(n, int):
raise TypeError(
"The provided multiple 'n' must be an integer. "
"Instead a value of type {!r} was provided.".format(type(n))
)
self.n = n
def rule_code(self):
return self._freq
def __eq__(self, other):
return self.n == other.n and self.rule_code() == other.rule_code()
def __ne__(self, other):
return not self == other
def __add__(self, other):
return self.__apply__(other)
def __sub__(self, other):
import cftime
if isinstance(other, cftime.datetime):
raise TypeError("Cannot subtract a cftime.datetime " "from a time offset.")
elif type(other) == type(self):
return type(self)(self.n - other.n)
else:
return NotImplemented
def __mul__(self, other):
return type(self)(n=other * self.n)
def __neg__(self):
return self * -1
def __rmul__(self, other):
return self.__mul__(other)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
if isinstance(other, BaseCFTimeOffset) and type(self) != type(other):
raise TypeError("Cannot subtract cftime offsets of differing " "types")
return -self + other
def __apply__(self):
return NotImplemented
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
test_date = (self + date) - self
return date == test_date
def rollforward(self, date):
if self.onOffset(date):
return date
else:
return date + type(self)()
def rollback(self, date):
if self.onOffset(date):
return date
else:
return date - type(self)()
def __str__(self):
return "<{}: n={}>".format(type(self).__name__, self.n)
def __repr__(self):
return str(self)
def _get_offset_day(self, other):
# subclass must implement `_day_option`; calling from the base class
# will raise NotImplementedError.
return _get_day_of_month(other, self._day_option)
def _get_day_of_month(other, day_option):
"""Find the day in `other`'s month that satisfies a BaseCFTimeOffset's
onOffset policy, as described by the `day_option` argument.
Parameters
----------
other : cftime.datetime
day_option : 'start', 'end'
'start': returns 1
'end': returns last day of the month
Returns
-------
day_of_month : int
"""
if day_option == "start":
return 1
elif day_option == "end":
days_in_month = _days_in_month(other)
return days_in_month
elif day_option is None:
# Note: unlike `_shift_month`, _get_day_of_month does not
# allow day_option = None
raise NotImplementedError()
else:
raise ValueError(day_option)
def _days_in_month(date):
"""The number of days in the month of the given date"""
if date.month == 12:
reference = type(date)(date.year + 1, 1, 1)
else:
reference = type(date)(date.year, date.month + 1, 1)
return (reference - timedelta(days=1)).day
def _adjust_n_months(other_day, n, reference_day):
"""Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
"""
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n
def _adjust_n_years(other, n, month, reference_day):
"""Adjust the number of times an annual offset is applied based on
another date, and the reference day provided"""
if n > 0:
if other.month < month or (other.month == month and other.day < reference_day):
n -= 1
else:
if other.month > month or (other.month == month and other.day > reference_day):
n += 1
return n
def _shift_month(date, months, day_option="start"):
"""Shift the date to a month start or end a given number of months away.
"""
import cftime
delta_year = (date.month + months) // 12
month = (date.month + months) % 12
if month == 0:
month = 12
delta_year = delta_year - 1
year = date.year + delta_year
if day_option == "start":
day = 1
elif day_option == "end":
reference = type(date)(year, month, 1)
day = _days_in_month(reference)
else:
raise ValueError(day_option)
if LooseVersion(cftime.__version__) < LooseVersion("1.0.4"):
# dayofwk=-1 is required to update the dayofwk and dayofyr attributes of
# the returned date object in versions of cftime between 1.0.2 and
# 1.0.3.4. It can be removed for versions of cftime greater than
# 1.0.3.4.
return date.replace(year=year, month=month, day=day, dayofwk=-1)
else:
return date.replace(year=year, month=month, day=day)
def roll_qtrday(other, n, month, day_option, modby=3):
"""Possibly increment or decrement the number of periods to shift
based on rollforward/rollbackward conventions.
Parameters
----------
other : cftime.datetime
n : number of periods to increment, before adjusting for rolling
month : int reference month giving the first month of the year
day_option : 'start', 'end'
The convention to use in finding the day in a given month against
which to compare for rollforward/rollbackward decisions.
modby : int 3 for quarters, 12 for years
Returns
-------
n : int number of periods to increment
See Also
--------
_get_day_of_month : Find the day in a month provided an offset.
"""
months_since = other.month % modby - month % modby
if n > 0:
if months_since < 0 or (
months_since == 0 and other.day < _get_day_of_month(other, day_option)
):
# pretend to roll back if on same month but
# before compare_day
n -= 1
else:
if months_since > 0 or (
months_since == 0 and other.day > _get_day_of_month(other, day_option)
):
# make sure to roll forward, so negate
n += 1
return n
def _validate_month(month, default_month):
if month is None:
result_month = default_month
else:
result_month = month
if not isinstance(result_month, int):
raise TypeError(
"'self.month' must be an integer value between 1 "
"and 12. Instead, it was set to a value of "
"{!r}".format(result_month)
)
elif not (1 <= result_month <= 12):
raise ValueError(
"'self.month' must be an integer value between 1 "
"and 12. Instead, it was set to a value of "
"{!r}".format(result_month)
)
return result_month
class MonthBegin(BaseCFTimeOffset):
_freq = "MS"
def __apply__(self, other):
n = _adjust_n_months(other.day, self.n, 1)
return _shift_month(other, n, "start")
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == 1
class MonthEnd(BaseCFTimeOffset):
_freq = "M"
def __apply__(self, other):
n = _adjust_n_months(other.day, self.n, _days_in_month(other))
return _shift_month(other, n, "end")
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == _days_in_month(date)
_MONTH_ABBREVIATIONS = {
1: "JAN",
2: "FEB",
3: "MAR",
4: "APR",
5: "MAY",
6: "JUN",
7: "JUL",
8: "AUG",
9: "SEP",
10: "OCT",
11: "NOV",
12: "DEC",
}
class QuarterOffset(BaseCFTimeOffset):
"""Quarter representation copied off of pandas/tseries/offsets.py
"""
_freq: ClassVar[str]
_default_month: ClassVar[int]
def __init__(self, n=1, month=None):
BaseCFTimeOffset.__init__(self, n)
self.month = _validate_month(month, self._default_month)
def __apply__(self, other):
# months_since: find the calendar quarter containing other.month,
# e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep].
# Then find the month in that quarter containing an onOffset date for
# self. `months_since` is the number of months to shift other.month
# to get to this on-offset month.
months_since = other.month % 3 - self.month % 3
qtrs = roll_qtrday(
other, self.n, self.month, day_option=self._day_option, modby=3
)
months = qtrs * 3 - months_since
return _shift_month(other, months, self._day_option)
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
mod_month = (date.month - self.month) % 3
return mod_month == 0 and date.day == self._get_offset_day(date)
def __sub__(self, other):
import cftime
if isinstance(other, cftime.datetime):
raise TypeError("Cannot subtract cftime.datetime from offset.")
elif type(other) == type(self) and other.month == self.month:
return type(self)(self.n - other.n, month=self.month)
else:
return NotImplemented
def __mul__(self, other):
return type(self)(n=other * self.n, month=self.month)
def rule_code(self):
return "{}-{}".format(self._freq, _MONTH_ABBREVIATIONS[self.month])
def __str__(self):
return "<{}: n={}, month={}>".format(type(self).__name__, self.n, self.month)
class QuarterBegin(QuarterOffset):
# When converting a string to an offset, pandas converts
# 'QS' to a QuarterBegin offset starting in the month of
# January. When creating a QuarterBegin offset directly
# from the constructor, however, the default month is March.
# We follow that behavior here.
_default_month = 3
_freq = "QS"
_day_option = "start"
def rollforward(self, date):
"""Roll date forward to nearest start of quarter"""
if self.onOffset(date):
return date
else:
return date + QuarterBegin(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest start of quarter"""
if self.onOffset(date):
return date
else:
return date - QuarterBegin(month=self.month)
class QuarterEnd(QuarterOffset):
# When converting a string to an offset, pandas converts
# 'Q' to a QuarterEnd offset starting in the month of
# December. When creating a QuarterEnd offset directly
# from the constructor, however, the default month is March.
# We follow that behavior here.
_default_month = 3
_freq = "Q"
_day_option = "end"
def rollforward(self, date):
"""Roll date forward to nearest end of quarter"""
if self.onOffset(date):
return date
else:
return date + QuarterEnd(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest end of quarter"""
if self.onOffset(date):
return date
else:
return date - QuarterEnd(month=self.month)
class YearOffset(BaseCFTimeOffset):
_freq: ClassVar[str]
_day_option: ClassVar[str]
_default_month: ClassVar[int]
def __init__(self, n=1, month=None):
BaseCFTimeOffset.__init__(self, n)
self.month = _validate_month(month, self._default_month)
def __apply__(self, other):
reference_day = _get_day_of_month(other, self._day_option)
years = _adjust_n_years(other, self.n, self.month, reference_day)
months = years * 12 + (self.month - other.month)
return _shift_month(other, months, self._day_option)
def __sub__(self, other):
import cftime
if isinstance(other, cftime.datetime):
raise TypeError("Cannot subtract cftime.datetime from offset.")
elif type(other) == type(self) and other.month == self.month:
return type(self)(self.n - other.n, month=self.month)
else:
return NotImplemented
def __mul__(self, other):
return type(self)(n=other * self.n, month=self.month)
def rule_code(self):
return "{}-{}".format(self._freq, _MONTH_ABBREVIATIONS[self.month])
def __str__(self):
return "<{}: n={}, month={}>".format(type(self).__name__, self.n, self.month)
class YearBegin(YearOffset):
_freq = "AS"
_day_option = "start"
_default_month = 1
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == 1 and date.month == self.month
def rollforward(self, date):
"""Roll date forward to nearest start of year"""
if self.onOffset(date):
return date
else:
return date + YearBegin(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest start of year"""
if self.onOffset(date):
return date
else:
return date - YearBegin(month=self.month)
class YearEnd(YearOffset):
_freq = "A"
_day_option = "end"
_default_month = 12
def onOffset(self, date):
"""Check if the given date is in the set of possible dates created
using a length-one version of this offset class."""
return date.day == _days_in_month(date) and date.month == self.month
def rollforward(self, date):
"""Roll date forward to nearest end of year"""
if self.onOffset(date):
return date
else:
return date + YearEnd(month=self.month)
def rollback(self, date):
"""Roll date backward to nearest end of year"""
if self.onOffset(date):
return date
else:
return date - YearEnd(month=self.month)
class Day(BaseCFTimeOffset):
_freq = "D"
def as_timedelta(self):
return timedelta(days=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Hour(BaseCFTimeOffset):
_freq = "H"
def as_timedelta(self):
return timedelta(hours=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Minute(BaseCFTimeOffset):
_freq = "T"
def as_timedelta(self):
return timedelta(minutes=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
class Second(BaseCFTimeOffset):
_freq = "S"
def as_timedelta(self):
return timedelta(seconds=self.n)
def __apply__(self, other):
return other + self.as_timedelta()
_FREQUENCIES = {
"A": YearEnd,
"AS": YearBegin,
"Y": YearEnd,
"YS": YearBegin,
"Q": partial(QuarterEnd, month=12),
"QS": partial(QuarterBegin, month=1),
"M": MonthEnd,
"MS": MonthBegin,
"D": Day,
"H": Hour,
"T": Minute,
"min": Minute,
"S": Second,
"AS-JAN": partial(YearBegin, month=1),
"AS-FEB": partial(YearBegin, month=2),
"AS-MAR": partial(YearBegin, month=3),
"AS-APR": partial(YearBegin, month=4),
"AS-MAY": partial(YearBegin, month=5),
"AS-JUN": partial(YearBegin, month=6),
"AS-JUL": partial(YearBegin, month=7),
"AS-AUG": partial(YearBegin, month=8),
"AS-SEP": partial(YearBegin, month=9),
"AS-OCT": partial(YearBegin, month=10),
"AS-NOV": partial(YearBegin, month=11),
"AS-DEC": partial(YearBegin, month=12),
"A-JAN": partial(YearEnd, month=1),
"A-FEB": partial(YearEnd, month=2),
"A-MAR": partial(YearEnd, month=3),
"A-APR": partial(YearEnd, month=4),
"A-MAY": partial(YearEnd, month=5),
"A-JUN": partial(YearEnd, month=6),
"A-JUL": partial(YearEnd, month=7),
"A-AUG": partial(YearEnd, month=8),
"A-SEP": partial(YearEnd, month=9),
"A-OCT": partial(YearEnd, month=10),
"A-NOV": partial(YearEnd, month=11),
"A-DEC": partial(YearEnd, month=12),
"QS-JAN": partial(QuarterBegin, month=1),
"QS-FEB": partial(QuarterBegin, month=2),
"QS-MAR": partial(QuarterBegin, month=3),
"QS-APR": partial(QuarterBegin, month=4),
"QS-MAY": partial(QuarterBegin, month=5),
"QS-JUN": partial(QuarterBegin, month=6),
"QS-JUL": partial(QuarterBegin, month=7),
"QS-AUG": partial(QuarterBegin, month=8),
"QS-SEP": partial(QuarterBegin, month=9),
"QS-OCT": partial(QuarterBegin, month=10),
"QS-NOV": partial(QuarterBegin, month=11),
"QS-DEC": partial(QuarterBegin, month=12),
"Q-JAN": partial(QuarterEnd, month=1),
"Q-FEB": partial(QuarterEnd, month=2),
"Q-MAR": partial(QuarterEnd, month=3),
"Q-APR": partial(QuarterEnd, month=4),
"Q-MAY": partial(QuarterEnd, month=5),
"Q-JUN": partial(QuarterEnd, month=6),
"Q-JUL": partial(QuarterEnd, month=7),
"Q-AUG": partial(QuarterEnd, month=8),
"Q-SEP": partial(QuarterEnd, month=9),
"Q-OCT": partial(QuarterEnd, month=10),
"Q-NOV": partial(QuarterEnd, month=11),
"Q-DEC": partial(QuarterEnd, month=12),
}
_FREQUENCY_CONDITION = "|".join(_FREQUENCIES.keys())
_PATTERN = fr"^((?P<multiple>\d+)|())(?P<freq>({_FREQUENCY_CONDITION}))$"
# pandas defines these offsets as "Tick" objects, which for instance have
# distinct behavior from monthly or longer frequencies in resample.
CFTIME_TICKS = (Day, Hour, Minute, Second)
def to_offset(freq):
"""Convert a frequency string to the appropriate subclass of
BaseCFTimeOffset."""
if isinstance(freq, BaseCFTimeOffset):
return freq
else:
try:
freq_data = re.match(_PATTERN, freq).groupdict()
except AttributeError:
raise ValueError("Invalid frequency string provided")
freq = freq_data["freq"]
multiples = freq_data["multiple"]
if multiples is None:
multiples = 1
else:
multiples = int(multiples)
return _FREQUENCIES[freq](n=multiples)
def to_cftime_datetime(date_str_or_date, calendar=None):
import cftime
if isinstance(date_str_or_date, str):
if calendar is None:
raise ValueError(
"If converting a string to a cftime.datetime object, "
"a calendar type must be provided"
)
date, _ = _parse_iso8601_with_reso(get_date_type(calendar), date_str_or_date)
return date
elif isinstance(date_str_or_date, cftime.datetime):
return date_str_or_date
else:
raise TypeError(
"date_str_or_date must be a string or a "
"subclass of cftime.datetime. Instead got "
"{!r}.".format(date_str_or_date)
)
def normalize_date(date):
"""Round datetime down to midnight."""
return date.replace(hour=0, minute=0, second=0, microsecond=0)
def _maybe_normalize_date(date, normalize):
"""Round datetime down to midnight if normalize is True."""
if normalize:
return normalize_date(date)
else:
return date
def _generate_linear_range(start, end, periods):
"""Generate an equally-spaced sequence of cftime.datetime objects between
and including two dates (whose length equals the number of periods)."""
import cftime
total_seconds = (end - start).total_seconds()
values = np.linspace(0.0, total_seconds, periods, endpoint=True)
units = "seconds since {}".format(format_cftime_datetime(start))
calendar = start.calendar
return cftime.num2date(
values, units=units, calendar=calendar, only_use_cftime_datetimes=True
)
def _generate_range(start, end, periods, offset):
"""Generate a regular range of cftime.datetime objects with a
given time offset.
Adapted from pandas.tseries.offsets.generate_range.
Parameters
----------
start : cftime.datetime, or None
Start of range
end : cftime.datetime, or None
End of range
periods : int, or None
Number of elements in the sequence
offset : BaseCFTimeOffset
An offset class designed for working with cftime.datetime objects
Returns
-------
A generator object
"""
if start:
start = offset.rollforward(start)
if end:
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
current = start
if offset.n >= 0:
while current <= end:
yield current
next_date = current + offset
if next_date <= current:
raise ValueError(f"Offset {offset} did not increment date")
current = next_date
else:
while current >= end:
yield current
next_date = current + offset
if next_date >= current:
raise ValueError(f"Offset {offset} did not decrement date")
current = next_date
def cftime_range(
start=None,
end=None,
periods=None,
freq="D",
normalize=False,
name=None,
closed=None,
calendar="standard",
):
"""Return a fixed frequency CFTimeIndex.
Parameters
----------
start : str or cftime.datetime, optional
Left bound for generating dates.
end : str or cftime.datetime, optional
Right bound for generating dates.
periods : integer, optional
Number of periods to generate.
freq : str, default 'D', BaseCFTimeOffset, or None
Frequency strings can have multiples, e.g. '5H'.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting index
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to the
'left', 'right', or both sides (None, the default).
calendar : str
Calendar type for the datetimes (default 'standard').
Returns
-------
CFTimeIndex
Notes
-----
This function is an analog of ``pandas.date_range`` for use in generating
sequences of ``cftime.datetime`` objects. It supports most of the
features of ``pandas.date_range`` (e.g. specifying how the index is
``closed`` on either side, or whether or not to ``normalize`` the start and
end bounds); however, there are some notable exceptions:
- You cannot specify a ``tz`` (time zone) argument.
- Start or end dates specified as partial-datetime strings must use the
`ISO-8601 format <https://en.wikipedia.org/wiki/ISO_8601>`_.
- It supports many, but not all, frequencies supported by
``pandas.date_range``. For example it does not currently support any of
the business-related, semi-monthly, or sub-second frequencies.
- Compound sub-monthly frequencies are not supported, e.g. '1H1min', as
these can easily be written in terms of the finest common resolution,
e.g. '61min'.
Valid simple frequency strings for use with ``cftime``-calendars include
any multiples of the following.
+--------+--------------------------+
| Alias | Description |
+========+==========================+
| A, Y | Year-end frequency |
+--------+--------------------------+
| AS, YS | Year-start frequency |
+--------+--------------------------+
| Q | Quarter-end frequency |
+--------+--------------------------+
| QS | Quarter-start frequency |
+--------+--------------------------+
| M | Month-end frequency |
+--------+--------------------------+
| MS | Month-start frequency |
+--------+--------------------------+
| D | Day frequency |
+--------+--------------------------+
| H | Hour frequency |
+--------+--------------------------+
| T, min | Minute frequency |
+--------+--------------------------+
| S | Second frequency |
+--------+--------------------------+
Any multiples of the following anchored offsets are also supported.
+----------+--------------------------------------------------------------------+
| Alias | Description |
+==========+====================================================================+
| A(S)-JAN | Annual frequency, anchored at the end (or beginning) of January |
+----------+--------------------------------------------------------------------+
| A(S)-FEB | Annual frequency, anchored at the end (or beginning) of February |
+----------+--------------------------------------------------------------------+
| A(S)-MAR | Annual frequency, anchored at the end (or beginning) of March |
+----------+--------------------------------------------------------------------+
| A(S)-APR | Annual frequency, anchored at the end (or beginning) of April |
+----------+--------------------------------------------------------------------+
| A(S)-MAY | Annual frequency, anchored at the end (or beginning) of May |
+----------+--------------------------------------------------------------------+
| A(S)-JUN | Annual frequency, anchored at the end (or beginning) of June |
+----------+--------------------------------------------------------------------+
| A(S)-JUL | Annual frequency, anchored at the end (or beginning) of July |
+----------+--------------------------------------------------------------------+
| A(S)-AUG | Annual frequency, anchored at the end (or beginning) of August |
+----------+--------------------------------------------------------------------+
| A(S)-SEP | Annual frequency, anchored at the end (or beginning) of September |
+----------+--------------------------------------------------------------------+
| A(S)-OCT | Annual frequency, anchored at the end (or beginning) of October |
+----------+--------------------------------------------------------------------+
| A(S)-NOV | Annual frequency, anchored at the end (or beginning) of November |
+----------+--------------------------------------------------------------------+
| A(S)-DEC | Annual frequency, anchored at the end (or beginning) of December |
+----------+--------------------------------------------------------------------+
| Q(S)-JAN | Quarter frequency, anchored at the end (or beginning) of January |
+----------+--------------------------------------------------------------------+
| Q(S)-FEB | Quarter frequency, anchored at the end (or beginning) of February |
+----------+--------------------------------------------------------------------+
| Q(S)-MAR | Quarter frequency, anchored at the end (or beginning) of March |
+----------+--------------------------------------------------------------------+
| Q(S)-APR | Quarter frequency, anchored at the end (or beginning) of April |
+----------+--------------------------------------------------------------------+
| Q(S)-MAY | Quarter frequency, anchored at the end (or beginning) of May |
+----------+--------------------------------------------------------------------+
| Q(S)-JUN | Quarter frequency, anchored at the end (or beginning) of June |
+----------+--------------------------------------------------------------------+
| Q(S)-JUL | Quarter frequency, anchored at the end (or beginning) of July |
+----------+--------------------------------------------------------------------+
| Q(S)-AUG | Quarter frequency, anchored at the end (or beginning) of August |
+----------+--------------------------------------------------------------------+
| Q(S)-SEP | Quarter frequency, anchored at the end (or beginning) of September |
+----------+--------------------------------------------------------------------+
| Q(S)-OCT | Quarter frequency, anchored at the end (or beginning) of October |
+----------+--------------------------------------------------------------------+
| Q(S)-NOV | Quarter frequency, anchored at the end (or beginning) of November |
+----------+--------------------------------------------------------------------+
| Q(S)-DEC | Quarter frequency, anchored at the end (or beginning) of December |
+----------+--------------------------------------------------------------------+
Finally, the following calendar aliases are supported.
+--------------------------------+---------------------------------------+
| Alias | Date type |
+================================+=======================================+
| standard, gregorian | ``cftime.DatetimeGregorian`` |
+--------------------------------+---------------------------------------+
| proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` |
+--------------------------------+---------------------------------------+
| noleap, 365_day | ``cftime.DatetimeNoLeap`` |
+--------------------------------+---------------------------------------+
| all_leap, 366_day | ``cftime.DatetimeAllLeap`` |
+--------------------------------+---------------------------------------+
| 360_day | ``cftime.Datetime360Day`` |
+--------------------------------+---------------------------------------+
| julian | ``cftime.DatetimeJulian`` |
+--------------------------------+---------------------------------------+
Examples
--------
This function returns a ``CFTimeIndex``, populated with ``cftime.datetime``
objects associated with the specified calendar type, e.g.
>>> xr.cftime_range(start="2000", periods=6, freq="2MS", calendar="noleap")
CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00,
2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00],
dtype='object')
As in the standard pandas function, three of the ``start``, ``end``,
``periods``, or ``freq`` arguments must be specified at a given time, with
the other set to ``None``. See the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/generated/pandas.date_range.html#pandas.date_range>`_
for more examples of the behavior of ``date_range`` with each of the
parameters.
See Also
--------
pandas.date_range
"""
# Adapted from pandas.core.indexes.datetimes._generate_range.
if count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the arguments 'start', 'end', 'periods', and 'freq', three "
"must be specified at a time."
)
if start is not None:
start = to_cftime_datetime(start, calendar)
start = _maybe_normalize_date(start, normalize)
if end is not None:
end = to_cftime_datetime(end, calendar)
end = _maybe_normalize_date(end, normalize)
if freq is None:
dates = _generate_linear_range(start, end, periods)
else:
offset = to_offset(freq)
dates = np.array(list(_generate_range(start, end, periods, offset)))
left_closed = False
right_closed = False
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed must be either 'left', 'right' or None")
if not left_closed and len(dates) and start is not None and dates[0] == start:
dates = dates[1:]
if not right_closed and len(dates) and end is not None and dates[-1] == end:
dates = dates[:-1]
return CFTimeIndex(dates, name=name)
| shoyer/xarray | xarray/coding/cftime_offsets.py | Python | apache-2.0 | 35,760 | 0.001091 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMapLayer
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '1/02/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import tempfile
from qgis.core import (QgsReadWriteContext,
QgsVectorLayer,
QgsProject)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtXml import QDomDocument
start_app()
class TestQgsMapLayer(unittest.TestCase):
def testUniqueId(self):
"""
Test that layers created quickly with same name get a unique ID
"""
# make 1000 layers quickly
layers = []
for i in range(1000):
layer = QgsVectorLayer(
'Point?crs=epsg:4326&field=name:string(20)',
'test',
'memory')
layers.append(layer)
# make sure all ids are unique
ids = set()
for l in layers:
self.assertFalse(l.id() in ids)
ids.add(l.id())
def copyLayerViaXmlReadWrite(self, source, dest):
# write to xml
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(source.writeLayerXml(elem, doc, QgsReadWriteContext()))
self.assertTrue(dest.readLayerXml(elem, QgsReadWriteContext()), QgsProject.instance())
def testGettersSetters(self):
# test auto refresh getters/setters
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
self.assertFalse(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 0)
layer.setAutoRefreshInterval(5)
self.assertFalse(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 5)
layer.setAutoRefreshEnabled(True)
self.assertTrue(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 5)
layer.setAutoRefreshInterval(0) # should disable auto refresh
self.assertFalse(layer.hasAutoRefreshEnabled())
self.assertEqual(layer.autoRefreshInterval(), 0)
def testSaveRestoreAutoRefresh(self):
""" test saving/restoring auto refresh to xml """
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
layer2 = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
self.copyLayerViaXmlReadWrite(layer, layer2)
self.assertFalse(layer2.hasAutoRefreshEnabled())
self.assertEqual(layer2.autoRefreshInterval(), 0)
layer.setAutoRefreshInterval(56)
self.copyLayerViaXmlReadWrite(layer, layer2)
self.assertFalse(layer2.hasAutoRefreshEnabled())
self.assertEqual(layer2.autoRefreshInterval(), 56)
layer.setAutoRefreshEnabled(True)
self.copyLayerViaXmlReadWrite(layer, layer2)
self.assertTrue(layer2.hasAutoRefreshEnabled())
self.assertEqual(layer2.autoRefreshInterval(), 56)
def testReadWriteMetadata(self):
layer = QgsVectorLayer("Point?field=fldtxt:string", "layer", "memory")
m = layer.metadata()
# Only abstract, more tests are done in test_qgslayermetadata.py
m.setAbstract('My abstract')
layer.setMetadata(m)
self.assertTrue(layer.metadata().abstract(), 'My abstract')
destination = tempfile.NamedTemporaryFile(suffix='.qmd').name
message, status = layer.saveNamedMetadata(destination)
self.assertTrue(status, message)
layer2 = QgsVectorLayer("Point?field=fldtxt:string", "layer", "memory")
message, status = layer2.loadNamedMetadata(destination)
self.assertTrue(status)
self.assertTrue(layer2.metadata().abstract(), 'My abstract')
if __name__ == '__main__':
unittest.main()
| stevenmizuno/QGIS | tests/src/python/test_qgsmaplayer.py | Python | gpl-2.0 | 4,223 | 0.000474 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The sax module contains a collection of classes that provide a
(D)ocument (O)bject (M)odel representation of an XML document.
The goal is to provide an easy, intuative interface for managing XML
documents. Although, the term, DOM, is used above, this model is
B{far} better.
XML namespaces in suds are represented using a (2) element tuple
containing the prefix and the URI. Eg: I{('tns', 'http://myns')}
"""
import suds
from suds import *
from suds.sax import *
from suds.sax.attribute import Attribute
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sax.text import Text
from logging import getLogger
import sys
from xml.sax import make_parser, InputSource, ContentHandler
from xml.sax.handler import feature_external_ges
log = getLogger(__name__)
class Handler(ContentHandler):
""" sax hanlder """
def __init__(self):
self.nodes = [Document()]
def startElement(self, name, attrs):
top = self.top()
node = Element(unicode(name))
for a in attrs.getNames():
n = unicode(a)
v = unicode(attrs.getValue(a))
attribute = Attribute(n,v)
if self.mapPrefix(node, attribute):
continue
node.append(attribute)
node.charbuffer = []
top.append(node)
self.push(node)
def mapPrefix(self, node, attribute):
skip = False
if attribute.name == 'xmlns':
if len(attribute.value):
node.expns = unicode(attribute.value)
skip = True
elif attribute.prefix == 'xmlns':
prefix = attribute.name
node.nsprefixes[prefix] = unicode(attribute.value)
skip = True
return skip
def endElement(self, name):
name = unicode(name)
current = self.top()
if len(current.charbuffer):
current.text = Text(u''.join(current.charbuffer))
del current.charbuffer
if len(current):
current.trim()
if name == current.qname():
self.pop()
else:
raise Exception('malformed document')
def characters(self, content):
text = unicode(content)
node = self.top()
node.charbuffer.append(text)
def push(self, node):
self.nodes.append(node)
return node
def pop(self):
return self.nodes.pop()
def top(self):
return self.nodes[len(self.nodes)-1]
class Parser:
""" SAX Parser """
@classmethod
def saxparser(cls):
p = make_parser()
p.setFeature(feature_external_ges, 0)
h = Handler()
p.setContentHandler(h)
return (p, h)
def parse(self, file=None, string=None):
"""
SAX parse XML text.
@param file: Parse a python I{file-like} object.
@type file: I{file-like} object.
@param string: Parse string XML.
@type string: str
"""
timer = suds.metrics.Timer()
timer.start()
sax, handler = self.saxparser()
if file is not None:
sax.parse(file)
timer.stop()
suds.metrics.log.debug('sax (%s) duration: %s', file, timer)
return handler.nodes[0]
if string is not None:
source = InputSource(None)
source.setByteStream(suds.BytesIO(string))
sax.parse(source)
timer.stop()
suds.metrics.log.debug('%s\nsax duration: %s', string, timer)
return handler.nodes[0]
| sfriesel/suds | suds/sax/parser.py | Python | lgpl-3.0 | 4,378 | 0.000228 |
# -*- coding: utf-8 -*-
from model.group import Group
def test_add_group(app):
old_groups = app.group.get_group_list()
group = Group(name="hjhj", header="jhjh", footer="jhjjhhj")
app.group.create(group)
new_groups = app.group.get_group_list()
assert len(old_groups) + 1 == len(new_groups)
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
def test_add_empty_group(app):
old_groups = app.group.get_group_list()
group = Group(name="", header="", footer="")
app.group.create(group)
new_groups = app.group.get_group_list()
assert len(old_groups) + 1 == len(new_groups)
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
| alenasf/Pythontest | test/test_add_group.py | Python | apache-2.0 | 813 | 0.00492 |
#!/usr/bin/python
import random
import math
import bisect
############################Zipf Generater################################
# The library of numpy.random.zipf or scipy.stats.zipf only work when
# alph > 1
class ZipfGenerator:
def __init__(self, n, alpha):
# Calculate Zeta values from 1 to n:
tmp = [1. / (math.pow(float(i), alpha)) for i in range(1, n+1)]
zeta = reduce(lambda sums, x: sums + [sums[-1] + x], tmp, [0])
# Store the translation map:
self.distMap = [x / zeta[-1] for x in zeta]
def next(self):
# Take a uniform 0-1 pseudo-random value:
u = random.random()
# Translate the Zipf variable:
return bisect.bisect(self.distMap, u) - 1
######################################################################### | fengz10/ICN_SCM | Zipf.py | Python | gpl-2.0 | 821 | 0.015834 |
#条件过滤的要点是综合语句的构造。利用 if 剔除掉非字符串的元素。e.g:而列表生成式则可以用一行语句代替循环生成上面的list:
#>>> [x * x for x in range(1, 11)]
#[1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
def toUppers(L):
return [x.upper() for x in L if isinstance(x,str)]
print (toUppers(['Hello', 'world', 101]))
| quietcoolwu/learn-python3-master | imooc/10_3.py | Python | gpl-2.0 | 365 | 0.023166 |
import logging
import os
import sys
import threading
from queue import Empty, Queue
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import QMutex
from PyQt5.QtCore import QThread
from PyQt5.QtCore import QEvent
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QSystemTrayIcon
from sqlalchemy.orm.exc import NoResultFound
from osfsync.application.background import BackgroundHandler
from osfsync.client.osf import OSFClient
from osfsync.database import Session
from osfsync.database import drop_db
from osfsync.database.models import User
from osfsync.gui.qt.login import LoginScreen
from osfsync.gui.qt.menu import OSFSyncMenu
from osfsync.utils.log import remove_user_from_sentry_logs
from osfsync import settings
from osfsync.tasks.notifications import group_events, Level
from osfsync.utils.validators import validate_containing_folder
logger = logging.getLogger(__name__)
ON_WINDOWS = sys.platform == 'win32'
ON_MAC = sys.platform == 'darwin'
class QResizableMessageBox(QMessageBox):
QWIDGETSIZE_MAX = 16777215
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setMouseTracking(True)
self.setSizeGripEnabled(True)
def event(self, e):
if e.type() in (QEvent.MouseMove, QEvent.MouseButtonPress):
self.setMaximumSize(self.QWIDGETSIZE_MAX, self.QWIDGETSIZE_MAX)
details_box = self.findChild(QTextEdit)
if details_box is not None:
details_box.setFixedSize(details_box.sizeHint())
return QMessageBox.event(self, e)
class OSFSyncQT(QSystemTrayIcon):
def __init__(self, application):
if ON_WINDOWS:
super().__init__(QIcon(':/tray_icon_win.png'), application)
else:
super().__init__(QIcon(':/tray_icon_mac.png'), application)
self._context_menu = OSFSyncMenu(self)
self.setContextMenu(self._context_menu)
self.show()
self.intervention_handler = SyncEventHandler()
self.notification_handler = SyncEventHandler()
# [ (signal, slot) ]
signal_slot_pairs = [
# preferences
# (self.preferences.ui.desktopNotifications.stateChanged, self.preferences.alerts_changed),
# (self.preferences.preferences_closed_signal, self.resume),
(self._context_menu.preferences.accountLogOutButton.clicked, self.logout),
(self.intervention_handler.notify_signal, self.on_intervention),
(self.notification_handler.notify_signal, self.on_notification),
]
for signal, slot in signal_slot_pairs:
signal.connect(slot)
def ensure_folder(self, user):
containing_folder = os.path.dirname(user.folder or '')
while not validate_containing_folder(containing_folder):
logger.warning('Invalid containing folder: "{}"'.format(containing_folder))
res = QFileDialog.getExistingDirectory(caption='Choose where to place OSF folder')
if not res:
# Do not accept an empty string (dialog box dismissed without selection)
# FIXME: This fixes overt errors, but user gets folder picker endlessly until they select a folder
continue
else:
containing_folder = os.path.abspath(res)
with Session() as session:
user.folder = os.path.join(containing_folder, 'OSF')
os.makedirs(user.folder, exist_ok=True)
session.add(user)
session.commit()
def start(self):
logger.debug('Start in main called.')
self.hide()
user = LoginScreen().get_user()
if user is None:
return False
self.ensure_folder(user)
self.show()
logger.debug('starting background handler from main.start')
BackgroundHandler().set_intervention_cb(self.intervention_handler.enqueue_signal.emit)
BackgroundHandler().set_notification_cb(self.notification_handler.enqueue_signal.emit)
BackgroundHandler().start()
if user.first_boot:
self._context_menu.preferences.on_first_boot()
self._context_menu.open_settings()
return True
def on_intervention(self, intervention):
message = QResizableMessageBox()
message.setWindowTitle('OSF Sync')
message.setIcon(QMessageBox.Question)
message.setText(intervention.title)
message.setInformativeText(intervention.description)
for option in intervention.options:
option_language = str(option).split('.')[1]
message.addButton(" ".join(option_language.split('_')), QMessageBox.YesRole)
idx = message.exec()
intervention.set_result(intervention.options[idx])
self.intervention_handler.done()
def on_notification(self, notification):
"""
Display user-facing event notifications.
:param notification: An individual notification event
:return:
"""
if not self.supportsMessages():
return
# Wait for more notifications, then grab all events and display
t = threading.Timer(settings.ALERT_DURATION, self._consolidate_notifications, args=[notification])
t.start()
# def resume(self):
# logger.debug('resuming')
# if self.background_handler.is_alive():
# raise RuntimeError('Resume called without first calling pause')
# self.background_handler = BackgroundHandler()
# self.background_handler.start()
# def pause(self):
# logger.debug('pausing')
# if self.background_handler and self.background_handler.is_alive():
# self.background_handler.stop()
def _consolidate_notifications(self, first_notification):
"""
Consolidates notifications and groups them together. Releases a burst of all notifications that occur in
a given window of time after the first message is received.
Error messages are always displayed individually.
:param first_notification: The first notification that triggered the consolidation cycle
:return:
"""
# Grab all available events, including the one that kicked off this consolidation cycle
available_notifications = [first_notification]
while True:
try:
event = self.notification_handler.queue.get_nowait()
except Empty:
break
else:
available_notifications.append(event)
# Display notifications
if len(available_notifications) == 1:
# If there's only one message, show it regardless of level
self._show_notifications(available_notifications)
else:
consolidated = group_events(available_notifications)
for level, notification_list in consolidated.items():
# Group info notifications, but display errors and warnings individually
if level > Level.INFO:
self._show_notifications(notification_list)
else:
self.showMessage(
'Updated multiple',
'Updated {} files and folders'.format(len(notification_list)),
QSystemTrayIcon.NoIcon,
msecs=settings.ALERT_DURATION / 1000.
)
self.notification_handler.done()
def _show_notifications(self, notifications_list):
"""Show a message bubble for each notification in the list provided"""
for n in notifications_list:
self.showMessage(
'Synchronizing...',
n.msg,
QSystemTrayIcon.NoIcon,
msecs=settings.ALERT_DURATION / 1000.
)
def quit(self):
BackgroundHandler().stop()
with Session() as session:
try:
user = session.query(User).one()
except NoResultFound:
pass
else:
logger.debug('Saving user data')
session.add(user)
session.commit()
session.close()
logger.info('Quitting application')
QApplication.instance().quit()
def sync_now(self):
BackgroundHandler().sync_now()
def logout(self):
BackgroundHandler().stop()
OSFClient().stop()
# Will probably wipe out everything :shrug:
drop_db()
# Clear any user-specific context data that would be sent to Sentry
remove_user_from_sentry_logs()
# if the preferences window is active, close it.
if self._context_menu.preferences.isVisible():
self._context_menu.preferences.close()
with Session() as session:
session.close()
logger.info('Restart the application.')
self.start()
class SyncEventHandler(QThread):
notify_signal = pyqtSignal(object)
enqueue_signal = pyqtSignal(object)
done_signal = pyqtSignal()
def __init__(self):
super().__init__()
self.queue = Queue()
self.mutex = QMutex()
self.enqueue_signal.connect(self.queue.put)
self.done_signal.connect(self.mutex.unlock)
self.start()
def done(self):
self.done_signal.emit()
def run(self):
while True:
self.mutex.lock()
event = self.queue.get()
self.notify_signal.emit(event)
| CenterForOpenScience/osf-sync | osfsync/gui/qt/tray.py | Python | lgpl-3.0 | 9,674 | 0.001654 |
# coding=UTF-8
import pandas as pd
import numpy as np
import pickle
from axiomatic.base import AxiomSystem, MinMaxAxiom, MaxAxiom, MinAxiom, ChangeAxiom, IntegralAxiom
from axiomatic.base import RelativeChangeAxiom, FirstDiffAxiom, SecondDiffAxiom, TrainingPipeline
from axiomatic.axiom_training_stage import FrequencyECTrainingStage, FrequencyAxiomTrainingStage
from axiomatic.recognizer_training_stage import DummyRecognizerTrainingStage
from axiomatic.objective_function import ObjectiveFunction
from axiomatic.abnormal_behavior_recognizer import AbnormalBehaviorRecognizer
with open('datasets/debug_dataset.pickle', 'rb') as f:
dataset = pickle.load(f)
axiom_list = [MinMaxAxiom, MaxAxiom, MinAxiom, ChangeAxiom, IntegralAxiom, RelativeChangeAxiom, FirstDiffAxiom, SecondDiffAxiom]
frequency_ec_stage = FrequencyECTrainingStage({'num_part': 5, 'left_window': 2, 'right_window': 2, 'num_axioms': 10, 'axiom_list': axiom_list, 'enable_cache': True})
frequency_axiom_stage = FrequencyAxiomTrainingStage({'num_axioms': 10, 'max_depth': 5, 'num_step_axioms': 10})
dummy_recognizer_stage = DummyRecognizerTrainingStage()
training_pipeline = TrainingPipeline([frequency_ec_stage, frequency_axiom_stage, dummy_recognizer_stage])
artifacts = training_pipeline.train(dataset, dict())
print("Artifacts after training: ", artifacts)
recognizer = AbnormalBehaviorRecognizer(artifacts['axiom_system'], artifacts['abn_models'],
dict(bound=0.1,maxdelta=0.5))
obj_fn = ObjectiveFunction(1, 20)
obj_fn_value = obj_fn.calculate(recognizer, dataset['test'])
print("Recognizer objective function: ", obj_fn_value)
| victorshch/axiomatic | integration_test_frequecy_axioms.py | Python | gpl-3.0 | 1,653 | 0.005445 |
"""Support for binary sensor using RPi GPIO."""
import logging
import voluptuous as vol
import requests
from homeassistant.const import CONF_HOST
from homeassistant.components.binary_sensor import (
BinarySensorDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
from . import (CONF_BOUNCETIME, CONF_PULL_MODE, CONF_INVERT_LOGIC,
DEFAULT_BOUNCETIME, DEFAULT_INVERT_LOGIC, DEFAULT_PULL_MODE)
from .. import remote_rpi_gpio
_LOGGER = logging.getLogger(__name__)
CONF_PORTS = 'ports'
_SENSORS_SCHEMA = vol.Schema({
cv.positive_int: cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORTS): _SENSORS_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC,
default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_BOUNCETIME,
default=DEFAULT_BOUNCETIME): cv.positive_int,
vol.Optional(CONF_PULL_MODE,
default=DEFAULT_PULL_MODE): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Raspberry PI GPIO devices."""
address = config['host']
invert_logic = config[CONF_INVERT_LOGIC]
pull_mode = config[CONF_PULL_MODE]
ports = config['ports']
bouncetime = config[CONF_BOUNCETIME]/1000
devices = []
for port_num, port_name in ports.items():
try:
button = remote_rpi_gpio.setup_input(address,
port_num,
pull_mode,
bouncetime)
except (ValueError, IndexError, KeyError, IOError):
return
new_sensor = RemoteRPiGPIOBinarySensor(port_name, button, invert_logic)
devices.append(new_sensor)
add_entities(devices, True)
class RemoteRPiGPIOBinarySensor(BinarySensorDevice):
"""Represent a binary sensor that uses a Remote Raspberry Pi GPIO."""
def __init__(self, name, button, invert_logic):
"""Initialize the RPi binary sensor."""
self._name = name
self._invert_logic = invert_logic
self._state = False
self._button = button
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
def read_gpio():
"""Read state from GPIO."""
self._state = remote_rpi_gpio.read_input(self._button)
self.schedule_update_ha_state()
self._button.when_released = read_gpio
self._button.when_pressed = read_gpio
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
return self._state != self._invert_logic
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return
def update(self):
"""Update the GPIO state."""
try:
self._state = remote_rpi_gpio.read_input(self._button)
except requests.exceptions.ConnectionError:
return
| aequitas/home-assistant | homeassistant/components/remote_rpi_gpio/binary_sensor.py | Python | apache-2.0 | 3,259 | 0 |
# Python - 2.7.6
to_freud = lambda sentence: ' '.join(['sex'] * len(sentence.split(' ')))
| RevansChen/online-judge | Codewars/8kyu/freudian-translator/Python/solution1.py | Python | mit | 91 | 0.010989 |
'''tzinfo timezone information for Asia/Ashkhabad.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Ashkhabad(DstTzInfo):
'''Asia/Ashkhabad timezone definition. See datetime.tzinfo for details'''
zone = 'Asia/Ashkhabad'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1924,5,1,20,6,28),
d(1930,6,20,20,0,0),
d(1981,3,31,19,0,0),
d(1981,9,30,18,0,0),
d(1982,3,31,19,0,0),
d(1982,9,30,18,0,0),
d(1983,3,31,19,0,0),
d(1983,9,30,18,0,0),
d(1984,3,31,19,0,0),
d(1984,9,29,21,0,0),
d(1985,3,30,21,0,0),
d(1985,9,28,21,0,0),
d(1986,3,29,21,0,0),
d(1986,9,27,21,0,0),
d(1987,3,28,21,0,0),
d(1987,9,26,21,0,0),
d(1988,3,26,21,0,0),
d(1988,9,24,21,0,0),
d(1989,3,25,21,0,0),
d(1989,9,23,21,0,0),
d(1990,3,24,21,0,0),
d(1990,9,29,21,0,0),
d(1991,3,30,21,0,0),
d(1991,9,28,22,0,0),
d(1991,10,26,20,0,0),
d(1992,1,18,22,0,0),
]
_transition_info = [
i(14040,0,'LMT'),
i(14400,0,'ASHT'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(21600,3600,'ASHST'),
i(18000,0,'ASHT'),
i(18000,0,'ASHST'),
i(14400,0,'ASHT'),
i(14400,0,'TMT'),
i(18000,0,'TMT'),
]
Ashkhabad = Ashkhabad()
| newvem/pytz | pytz/zoneinfo/Asia/Ashkhabad.py | Python | mit | 1,535 | 0.160261 |
from django.conf.urls.defaults import *
urlpatterns = patterns('contrib.karma.views',
(r'^$', 'index'),
(r'index', 'index'),
)
| danigm/sweetter | sweetter/contrib/karma/urls.py | Python | agpl-3.0 | 136 | 0.007353 |
import sys
import os
import traceback
from django import db
sys.path.append('/root/wisely/wisely_project/')
os.environ['DJANGO_SETTINGS_MODULE'] = 'wisely_project.settings.production'
from django.db.models import F, Q
from django.utils import timezone
from users.tasks import get_coursera_courses, get_edx_courses, get_udemy_courses
__author__ = 'tmehta'
from users.models import CourseraProfile, EdxProfile, UdemyProfile
while True:
try:
for connection in db.connections.all():
if len(connection.queries) > 100:
db.reset_queries()
for user in CourseraProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(username='')).filter(
incorrect_login=False):
print user.username
print "Start coursera"
get_coursera_courses(user)
user.last_updated = timezone.now()
print "Done Coursera"
user.save()
for user in EdxProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(email='')).filter(
incorrect_login=False):
print user.email
print "Start edx"
get_edx_courses(user)
print "Done EDx"
user.last_updated = timezone.now()
user.save()
for user in UdemyProfile.objects.filter(last_updated__lt=F('user__last_login')).filter(~Q(email='')).filter(
incorrect_login=False):
print user.email
print "Start udemy"
get_udemy_courses(user)
print "Done Udemy"
user.last_updated = timezone.now()
user.save()
except Exception as e:
print traceback.format_exc()
| TejasM/wisely | wisely_project/get_courses_file.py | Python | mit | 1,721 | 0.004648 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('playbook', '0003_auto_20151028_1735'),
]
operations = [
migrations.AddField(
model_name='playbookrunhistory',
name='log_url',
field=models.CharField(default=b'', max_length=1024, blank=True),
),
]
| cycloidio/cyclosible | cyclosible/playbook/migrations/0004_playbookrunhistory_log_url.py | Python | gpl-3.0 | 441 | 0 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .util import Specification
from . import compat
class Field(Specification):
"""
Field object for adding fields to a resource schema.
Currently this is built around the Tabular Data Package.
"""
SPECIFICATION = {'name': compat.str,
'title': compat.str,
'type': compat.str,
'format': compat.str,
'constraints': dict}
REQUIRED = ('name',)
class Constraints(Specification):
"""
Constraints object which can be added to a field in a resource schema
in order to represent the constraints put on that particular field.
"""
SPECIFICATION = {'required': bool,
'minLength': int,
'maxLength': int,
'unique': bool,
'pattern': compat.str,
'minimum': None,
'maximum': None}
class Reference(Specification):
"""
Reference object which can be added to a ForeignKey object to represent
the reference to the other datapackage.
"""
SPECIFICATION = {'datapackage': compat.str,
'resource': compat.str,
'fields': (compat.str, list)}
REQUIRED = ('fields',)
def __setattr__(self, attribute, value):
if attribute == 'fields':
# We need to make sure all fields are represented with by their
# names if it is a list
if type(value) == list:
modified_value = []
for single_value in value:
if type(single_value) == compat.str:
modified_value.append(single_value)
elif isinstance(single_value, Field):
modified_value.append(single_value.name)
else:
raise TypeError(
'Field type ({0}) is not supported'.format(
type(single_value)))
value = modified_value
elif type(value) == compat.str:
# We don't need to do anything with a str
pass
elif isinstance(value, Field):
# Set the name from the field as the value
value = value.name
else:
raise TypeError("Type of field ({0}) is not supported".format(
type(value)))
super(Reference, self).__setattr__(attribute, value)
class ForeignKey(Specification):
"""
ForeignKey object which can be added to a resource schema object to
represent a foreign key in another data package.
"""
SPECIFICATION = {'fields': (compat.str, list),
'reference': Reference}
REQUIRED = ('fields', 'reference')
def __setattr__(self, attribute, value):
# If the attribute is 'reference' we need to check if there is a
# fields attribute and do some checks to see if they are inconsistent
# because they shouldn't be
if attribute == 'reference' and 'fields' in self:
fields = self['fields']
if type(fields) != type(value.fields):
raise TypeError(
'Reference fields must have the same type as fields')
if type(value.fields) == list:
if len(value.fields) != len(fields):
raise ValueError(
'Reference fields and fields are inconsistent')
if attribute == 'fields':
value_type = type(value)
# We only want to show the names of the fields so we add we need
# to go through a list and get out the names and use them as the
# value
if value_type == list:
modified_value = []
for single_value in value:
if type(single_value) == compat.str:
modified_value.append(single_value)
elif isinstance(single_value, Field):
modified_value.append(single_value.name)
else:
raise TypeError(
'Foreign key type ({0}) is not supported'.format(
type(single_value)))
value = modified_value
elif value_type == compat.str:
# We don't need to do anything if the value is a str
pass
elif isinstance(value, Field):
value = value.name
else:
raise TypeError("Type of field ({0}) is not supported".format(
value_type))
# Same check as before about inconsistencies but just the other
# way around
if 'reference' in self:
reference_fields = self['reference'].fields
if type(reference_fields) != value_type:
raise TypeError(
'Fields must have the same type as Reference fields')
if type(reference_fields) == list:
if len(reference_fields) != len(value):
raise ValueError(
'Reference fields and fields are inconsistent')
super(ForeignKey, self).__setattr__(attribute, value)
class Schema(Specification):
"""
Schema object which holds the representation of the schema for a
Tabular Data Package (using the JSON Table Schema protocol). The
schema can be used just like a dictionary which means it is ready
for json serialization and export as part of a data package
descriptor (when added to a resource).
"""
SPECIFICATION = {'fields': list,
'primaryKey': (compat.str, list),
'foreignKeys': list}
def __init__(self, *args, **kwargs):
# We need to initialize an empty fields array (this is a required
# field but we don't require it, we create it)
self['fields'] = []
# We add the fields using the internal method so we can do
# validation of each field
self.add_fields(kwargs.pop('fields', []))
super(Schema, self).__init__(self, *args, **kwargs)
def __setattr__(self, attribute, value):
if attribute == 'primaryKey' and value is not None:
# Primary Keys must be a reference to existing fields so we
# need to check if the primary key is in the fields array
field_names = [f.name for f in self.get('fields', [])]
if type(value) == list:
modified_value = []
for single_value in value:
if type(single_value) == compat.str:
if single_value in field_names:
modified_value.append(single_value)
else:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
single_value))
elif isinstance(single_value, Field):
if single_value.name in field_names:
modified_value.append(single_value.name)
else:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
single_value.name))
else:
raise TypeError(
'primaryKey type ({0}) is not supported'.format(
type(single_value)))
value = modified_value
elif type(value) == compat.str:
if value not in field_names:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
value))
elif isinstance(value, Field):
if value.name in field_names:
value = value.name
else:
raise AttributeError(
"Unknown '{0}' cannot be primaryKey".format(
value.name))
else:
raise TypeError('Primary Key type ({0}) not supported'.format(
type(value)))
super(Schema, self).__setattr__(attribute, value)
def add_field(self, field):
"""
Adds a field to the resource schema
:param ~Field field: A Field instance containing the field to be
appended to the schema.
"""
if isinstance(field, Field):
self['fields'].append(field)
elif type(field) == dict:
self['fields'].append(Field(field))
else:
raise TypeError("Type of parameter field is not supported.")
def add_fields(self, fields):
"""
Adds fields to the resource schema
:param list fields: A list of Field instances which should be
appended (extend) to the resource schema fields.
"""
# We loop through the fields list to make sure all elements
# in the list are of the proper type
for field in fields:
self.add_field(field)
def add_foreign_key(self, foreign_key):
"""
Adds a foreign key to the resource schema.
:param ~ForeignKey foreign_key: A ForeignKey object which keeps
track of a foreign key relationship to another data package.
"""
# We can only accept ForeignKey objects
if not isinstance(foreign_key, ForeignKey):
raise TypeError("Foreign Key type is not supported")
# ForeignKey fields must be a schema field
field_names = [f.name for f in self.get('fields', [])]
for field in foreign_key.fields:
if field not in field_names:
raise ValueError(
"Foreign key field '{0}' is not in schema fields".format(
field))
# Append the ForeignKey to the foreignKeys object or create it if it
# doesn't exist
foreign_keys = dict.get(self, 'foreignKeys', [])
foreign_keys.append(foreign_key)
self['foreignKeys'] = foreign_keys
def add_foreign_keys(self, foreign_keys):
"""
Adds foreign keys to the resource schema
:param list foreign_keys: A list of ForeignKey instances which should
be appended (extend) to the resource schema fields or create a
foreignKeys attribute if it doesn't exist.
"""
# We loop through the foreign keys list to make sure all elements
# in the list are of the proper type and validate
for foreign_key in foreign_keys:
self.add_foreign_key(foreign_key)
| tryggvib/datapackage | datapackage/schema.py | Python | gpl-3.0 | 11,045 | 0.000181 |
# -*- coding: utf-8 -*-
#
# exercise 5: more variables and printing
#
# string formating
name = 'Zed A. Shaw'
ages = 35 # not a lie
height = 74 # inched
weight = 180 # lbs
eyes = 'Blue'
teeth = 'White'
hair = 'Brown'
print "Let's talk about %s." % name
print "He's %d inched tall." % height
print "He's %d pounds heavy." % weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee." % teeth
# this line is tricky, try to get it exactly right
print "If I add %d, %d, and %d I get %d." %(
ages, height, weight, ages + height + weight) | zstang/learning-python-the-hard-way | ex5.py | Python | mit | 635 | 0.009449 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from gnocchiclient.tests.functional import base
class BenchmarkMetricTest(base.ClientTestBase):
def test_benchmark_metric_create_wrong_workers(self):
result = self.gnocchi(
u'benchmark', params=u"metric create -n 0",
fail_ok=True, merge_stderr=True)
self.assertIn("0 must be greater than 0", result)
def test_benchmark_metric_create(self):
apname = str(uuid.uuid4())
# PREPARE AN ARCHIVE POLICY
self.gnocchi("archive-policy", params="create %s "
"--back-window 0 -d granularity:1s,points:86400" % apname)
result = self.gnocchi(
u'benchmark', params=u"metric create -n 10 -a %s" % apname)
result = json.loads(result)
self.assertEqual(10, int(result['create executed']))
self.assertLessEqual(int(result['create failures']), 10)
self.assertLessEqual(int(result['delete executed']),
int(result['create executed']))
result = self.gnocchi(
u'benchmark', params=u"metric create -k -n 10 -a %s" % apname)
result = json.loads(result)
self.assertEqual(10, int(result['create executed']))
self.assertLessEqual(int(result['create failures']), 10)
self.assertNotIn('delete executed', result)
def test_benchmark_metric_get(self):
apname = str(uuid.uuid4())
# PREPARE AN ARCHIVE POLICY
self.gnocchi("archive-policy", params="create %s "
"--back-window 0 -d granularity:1s,points:86400" % apname)
result = self.gnocchi(
u'metric', params=u"create -a %s" % apname)
metric = json.loads(result)
result = self.gnocchi(
u'benchmark', params=u"metric show -n 10 %s" % metric['id'])
result = json.loads(result)
self.assertEqual(10, int(result['show executed']))
self.assertLessEqual(int(result['show failures']), 10)
def test_benchmark_measures_add(self):
apname = str(uuid.uuid4())
# PREPARE AN ARCHIVE POLICY
self.gnocchi("archive-policy", params="create %s "
"--back-window 0 -d granularity:1s,points:86400" % apname)
result = self.gnocchi(
u'metric', params=u"create -a %s" % apname)
metric = json.loads(result)
result = self.gnocchi(
u'benchmark', params=u"measures add -n 10 -b 4 %s" % metric['id'])
result = json.loads(result)
self.assertEqual(2, int(result['push executed']))
self.assertLessEqual(int(result['push failures']), 2)
result = self.gnocchi(
u'benchmark',
params=u"measures add -s 2010-01-01 -n 10 -b 4 %s"
% metric['id'])
result = json.loads(result)
self.assertEqual(2, int(result['push executed']))
self.assertLessEqual(int(result['push failures']), 2)
result = self.gnocchi(
u'benchmark',
params=u"measures add --wait -s 2010-01-01 -n 10 -b 4 %s"
% metric['id'])
result = json.loads(result)
self.assertEqual(2, int(result['push executed']))
self.assertLessEqual(int(result['push failures']), 2)
self.assertIn("extra wait to process measures", result)
def test_benchmark_measures_show(self):
apname = str(uuid.uuid4())
# PREPARE AN ARCHIVE POLICY
self.gnocchi("archive-policy", params="create %s "
"--back-window 0 -d granularity:1s,points:86400" % apname)
result = self.gnocchi(
u'metric', params=u"create -a %s" % apname)
metric = json.loads(result)
result = self.gnocchi(
u'benchmark',
params=u"measures show -n 2 %s"
% metric['id'])
result = json.loads(result)
self.assertEqual(2, int(result['show executed']))
self.assertLessEqual(int(result['show failures']), 2)
| sileht/python-gnocchiclient | gnocchiclient/tests/functional/test_benchmark.py | Python | apache-2.0 | 4,537 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# GNU GENERAL PUBLIC LICENSE
# Version 3, 29 June 2007
#
# Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
# Everyone is permitted to copy and distribute verbatim copies
# of this license document, but changing it is not allowed.
#
# Preamble
#
# The GNU General Public License is a free, copyleft license for
# software and other kinds of works.
#
# The licenses for most software and other practical works are designed
# to take away your freedom to share and change the works. By contrast,
# the GNU General Public License is intended to guarantee your freedom to
# share and change all versions of a program--to make sure it remains free
# software for all its users. We, the Free Software Foundation, use the
# GNU General Public License for most of our software; it applies also to
# any other work released this way by its authors. You can apply it to
# your programs, too.
#
# When we speak of free software, we are referring to freedom, not
# price. Our General Public Licenses are designed to make sure that you
# have the freedom to distribute copies of free software (and charge for
# them if you wish), that you receive source code or can get it if you
# want it, that you can change the software or use pieces of it in new
# free programs, and that you know you can do these things.
#
# To protect your rights, we need to prevent others from denying you
# these rights or asking you to surrender the rights. Therefore, you have
# certain responsibilities if you distribute copies of the software, or if
# you modify it: responsibilities to respect the freedom of others.
#
# For example, if you distribute copies of such a program, whether
# gratis or for a fee, you must pass on to the recipients the same
# freedoms that you received. You must make sure that they, too, receive
# or can get the source code. And you must show them these terms so they
# know their rights.
#
# Developers that use the GNU GPL protect your rights with two steps:
# (1) assert copyright on the software, and (2) offer you this License
# giving you legal permission to copy, distribute and/or modify it.
#
# For the developers' and authors' protection, the GPL clearly explains
# that there is no warranty for this free software. For both users' and
# authors' sake, the GPL requires that modified versions be marked as
# changed, so that their problems will not be attributed erroneously to
# authors of previous versions.
#
# Some devices are designed to deny users access to install or run
# modified versions of the software inside them, although the manufacturer
# can do so. This is fundamentally incompatible with the aim of
# protecting users' freedom to change the software. The systematic
# pattern of such abuse occurs in the area of products for individuals to
# use, which is precisely where it is most unacceptable. Therefore, we
# have designed this version of the GPL to prohibit the practice for those
# products. If such problems arise substantially in other domains, we
# stand ready to extend this provision to those domains in future versions
# of the GPL, as needed to protect the freedom of users.
#
# Finally, every program is threatened constantly by software patents.
# States should not allow patents to restrict development and use of
# software on general-purpose computers, but in those that do, we wish to
# avoid the special danger that patents applied to a free program could
# make it effectively proprietary. To prevent this, the GPL assures that
# patents cannot be used to render the program non-free.
#
# The precise terms and conditions for copying, distribution and
# modification follow.
#
# TERMS AND CONDITIONS
#
# 0. Definitions.
#
# "This License" refers to version 3 of the GNU General Public License.
#
# "Copyright" also means copyright-like laws that apply to other kinds of
# works, such as semiconductor masks.
#
# "The Program" refers to any copyrightable work licensed under this
# License. Each licensee is addressed as "you". "Licensees" and
# "recipients" may be individuals or organizations.
#
# To "modify" a work means to copy from or adapt all or part of the work
# in a fashion requiring copyright permission, other than the making of an
# exact copy. The resulting work is called a "modified version" of the
# earlier work or a work "based on" the earlier work.
#
# A "covered work" means either the unmodified Program or a work based
# on the Program.
#
# To "propagate" a work means to do anything with it that, without
# permission, would make you directly or secondarily liable for
# infringement under applicable copyright law, except executing it on a
# computer or modifying a private copy. Propagation includes copying,
# distribution (with or without modification), making available to the
# public, and in some countries other activities as well.
#
# To "convey" a work means any kind of propagation that enables other
# parties to make or receive copies. Mere interaction with a user through
# a computer network, with no transfer of a copy, is not conveying.
#
# An interactive user interface displays "Appropriate Legal Notices"
# to the extent that it includes a convenient and prominently visible
# feature that (1) displays an appropriate copyright notice, and (2)
# tells the user that there is no warranty for the work (except to the
# extent that warranties are provided), that licensees may convey the
# work under this License, and how to view a copy of this License. If
# the interface presents a list of user commands or options, such as a
# menu, a prominent item in the list meets this criterion.
#
# 1. Source Code.
#
# The "source code" for a work means the preferred form of the work
# for making modifications to it. "Object code" means any non-source
# form of a work.
#
# A "Standard Interface" means an interface that either is an official
# standard defined by a recognized standards body, or, in the case of
# interfaces specified for a particular programming language, one that
# is widely used among developers working in that language.
#
# The "System Libraries" of an executable work include anything, other
# than the work as a whole, that (a) is included in the normal form of
# packaging a Major Component, but which is not part of that Major
# Component, and (b) serves only to enable use of the work with that
# Major Component, or to implement a Standard Interface for which an
# implementation is available to the public in source code form. A
# "Major Component", in this context, means a major essential component
# (kernel, window system, and so on) of the specific operating system
# (if any) on which the executable work runs, or a compiler used to
# produce the work, or an object code interpreter used to run it.
#
# The "Corresponding Source" for a work in object code form means all
# the source code needed to generate, install, and (for an executable
# work) run the object code and to modify the work, including scripts to
# control those activities. However, it does not include the work's
# System Libraries, or general-purpose tools or generally available free
# programs which are used unmodified in performing those activities but
# which are not part of the work. For example, Corresponding Source
# includes interface definition files associated with source files for
# the work, and the source code for shared libraries and dynamically
# linked subprograms that the work is specifically designed to require,
# such as by intimate data communication or control flow between those
# subprograms and other parts of the work.
#
# The Corresponding Source need not include anything that users
# can regenerate automatically from other parts of the Corresponding
# Source.
#
# The Corresponding Source for a work in source code form is that
# same work.
#
# 2. Basic Permissions.
#
# All rights granted under this License are granted for the term of
# copyright on the Program, and are irrevocable provided the stated
# conditions are met. This License explicitly affirms your unlimited
# permission to run the unmodified Program. The output from running a
# covered work is covered by this License only if the output, given its
# content, constitutes a covered work. This License acknowledges your
# rights of fair use or other equivalent, as provided by copyright law.
#
# You may make, run and propagate covered works that you do not
# convey, without conditions so long as your license otherwise remains
# in force. You may convey covered works to others for the sole purpose
# of having them make modifications exclusively for you, or provide you
# with facilities for running those works, provided that you comply with
# the terms of this License in conveying all material for which you do
# not control copyright. Those thus making or running the covered works
# for you must do so exclusively on your behalf, under your direction
# and control, on terms that prohibit them from making any copies of
# your copyrighted material outside their relationship with you.
#
# Conveying under any other circumstances is permitted solely under
# the conditions stated below. Sublicensing is not allowed; section 10
# makes it unnecessary.
#
# 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
#
# No covered work shall be deemed part of an effective technological
# measure under any applicable law fulfilling obligations under article
# 11 of the WIPO copyright treaty adopted on 20 December 1996, or
# similar laws prohibiting or restricting circumvention of such
# measures.
#
# When you convey a covered work, you waive any legal power to forbid
# circumvention of technological measures to the extent such circumvention
# is effected by exercising rights under this License with respect to
# the covered work, and you disclaim any intention to limit operation or
# modification of the work as a means of enforcing, against the work's
# users, your or third parties' legal rights to forbid circumvention of
# technological measures.
#
# 4. Conveying Verbatim Copies.
#
# You may convey verbatim copies of the Program's source code as you
# receive it, in any medium, provided that you conspicuously and
# appropriately publish on each copy an appropriate copyright notice;
# keep intact all notices stating that this License and any
# non-permissive terms added in accord with section 7 apply to the code;
# keep intact all notices of the absence of any warranty; and give all
# recipients a copy of this License along with the Program.
#
# You may charge any price or no price for each copy that you convey,
# and you may offer support or warranty protection for a fee.
#
# 5. Conveying Modified Source Versions.
#
# You may convey a work based on the Program, or the modifications to
# produce it from the Program, in the form of source code under the
# terms of section 4, provided that you also meet all of these conditions:
#
# a) The work must carry prominent notices stating that you modified
# it, and giving a relevant date.
#
# b) The work must carry prominent notices stating that it is
# released under this License and any conditions added under section
# 7. This requirement modifies the requirement in section 4 to
# "keep intact all notices".
#
# c) You must license the entire work, as a whole, under this
# License to anyone who comes into possession of a copy. This
# License will therefore apply, along with any applicable section 7
# additional terms, to the whole of the work, and all its parts,
# regardless of how they are packaged. This License gives no
# permission to license the work in any other way, but it does not
# invalidate such permission if you have separately received it.
#
# d) If the work has interactive user interfaces, each must display
# Appropriate Legal Notices; however, if the Program has interactive
# interfaces that do not display Appropriate Legal Notices, your
# work need not make them do so.
#
# A compilation of a covered work with other separate and independent
# works, which are not by their nature extensions of the covered work,
# and which are not combined with it such as to form a larger program,
# in or on a volume of a storage or distribution medium, is called an
# "aggregate" if the compilation and its resulting copyright are not
# used to limit the access or legal rights of the compilation's users
# beyond what the individual works permit. Inclusion of a covered work
# in an aggregate does not cause this License to apply to the other
# parts of the aggregate.
#
# 6. Conveying Non-Source Forms.
#
# You may convey a covered work in object code form under the terms
# of sections 4 and 5, provided that you also convey the
# machine-readable Corresponding Source under the terms of this License,
# in one of these ways:
#
# a) Convey the object code in, or embodied in, a physical product
# (including a physical distribution medium), accompanied by the
# Corresponding Source fixed on a durable physical medium
# customarily used for software interchange.
#
# b) Convey the object code in, or embodied in, a physical product
# (including a physical distribution medium), accompanied by a
# written offer, valid for at least three years and valid for as
# long as you offer spare parts or customer support for that product
# model, to give anyone who possesses the object code either (1) a
# copy of the Corresponding Source for all the software in the
# product that is covered by this License, on a durable physical
# medium customarily used for software interchange, for a price no
# more than your reasonable cost of physically performing this
# conveying of source, or (2) access to copy the
# Corresponding Source from a network server at no charge.
#
# c) Convey individual copies of the object code with a copy of the
# written offer to provide the Corresponding Source. This
# alternative is allowed only occasionally and noncommercially, and
# only if you received the object code with such an offer, in accord
# with subsection 6b.
#
# d) Convey the object code by offering access from a designated
# place (gratis or for a charge), and offer equivalent access to the
# Corresponding Source in the same way through the same place at no
# further charge. You need not require recipients to copy the
# Corresponding Source along with the object code. If the place to
# copy the object code is a network server, the Corresponding Source
# may be on a different server (operated by you or a third party)
# that supports equivalent copying facilities, provided you maintain
# clear directions next to the object code saying where to find the
# Corresponding Source. Regardless of what server hosts the
# Corresponding Source, you remain obligated to ensure that it is
# available for as long as needed to satisfy these requirements.
#
# e) Convey the object code using peer-to-peer transmission, provided
# you inform other peers where the object code and Corresponding
# Source of the work are being offered to the general public at no
# charge under subsection 6d.
#
# A separable portion of the object code, whose source code is excluded
# from the Corresponding Source as a System Library, need not be
# included in conveying the object code work.
#
# A "User Product" is either (1) a "consumer product", which means any
# tangible personal property which is normally used for personal, family,
# or household purposes, or (2) anything designed or sold for incorporation
# into a dwelling. In determining whether a product is a consumer product,
# doubtful cases shall be resolved in favor of coverage. For a particular
# product received by a particular user, "normally used" refers to a
# typical or common use of that class of product, regardless of the status
# of the particular user or of the way in which the particular user
# actually uses, or expects or is expected to use, the product. A product
# is a consumer product regardless of whether the product has substantial
# commercial, industrial or non-consumer uses, unless such uses represent
# the only significant mode of use of the product.
#
# "Installation Information" for a User Product means any methods,
# procedures, authorization keys, or other information required to install
# and execute modified versions of a covered work in that User Product from
# a modified version of its Corresponding Source. The information must
# suffice to ensure that the continued functioning of the modified object
# code is in no case prevented or interfered with solely because
# modification has been made.
#
# If you convey an object code work under this section in, or with, or
# specifically for use in, a User Product, and the conveying occurs as
# part of a transaction in which the right of possession and use of the
# User Product is transferred to the recipient in perpetuity or for a
# fixed term (regardless of how the transaction is characterized), the
# Corresponding Source conveyed under this section must be accompanied
# by the Installation Information. But this requirement does not apply
# if neither you nor any third party retains the ability to install
# modified object code on the User Product (for example, the work has
# been installed in ROM).
#
# The requirement to provide Installation Information does not include a
# requirement to continue to provide support service, warranty, or updates
# for a work that has been modified or installed by the recipient, or for
# the User Product in which it has been modified or installed. Access to a
# network may be denied when the modification itself materially and
# adversely affects the operation of the network or violates the rules and
# protocols for communication across the network.
#
# Corresponding Source conveyed, and Installation Information provided,
# in accord with this section must be in a format that is publicly
# documented (and with an implementation available to the public in
# source code form), and must require no special password or key for
# unpacking, reading or copying.
#
# 7. Additional Terms.
#
# "Additional permissions" are terms that supplement the terms of this
# License by making exceptions from one or more of its conditions.
# Additional permissions that are applicable to the entire Program shall
# be treated as though they were included in this License, to the extent
# that they are valid under applicable law. If additional permissions
# apply only to part of the Program, that part may be used separately
# under those permissions, but the entire Program remains governed by
# this License without regard to the additional permissions.
#
# When you convey a copy of a covered work, you may at your option
# remove any additional permissions from that copy, or from any part of
# it. (Additional permissions may be written to require their own
# removal in certain cases when you modify the work.) You may place
# additional permissions on material, added by you to a covered work,
# for which you have or can give appropriate copyright permission.
#
# Notwithstanding any other provision of this License, for material you
# add to a covered work, you may (if authorized by the copyright holders of
# that material) supplement the terms of this License with terms:
#
# a) Disclaiming warranty or limiting liability differently from the
# terms of sections 15 and 16 of this License; or
#
# b) Requiring preservation of specified reasonable legal notices or
# author attributions in that material or in the Appropriate Legal
# Notices displayed by works containing it; or
#
# c) Prohibiting misrepresentation of the origin of that material, or
# requiring that modified versions of such material be marked in
# reasonable ways as different from the original version; or
#
# d) Limiting the use for publicity purposes of names of licensors or
# authors of the material; or
#
# e) Declining to grant rights under trademark law for use of some
# trade names, trademarks, or service marks; or
#
# f) Requiring indemnification of licensors and authors of that
# material by anyone who conveys the material (or modified versions of
# it) with contractual assumptions of liability to the recipient, for
# any liability that these contractual assumptions directly impose on
# those licensors and authors.
#
# All other non-permissive additional terms are considered "further
# restrictions" within the meaning of section 10. If the Program as you
# received it, or any part of it, contains a notice stating that it is
# governed by this License along with a term that is a further
# restriction, you may remove that term. If a license document contains
# a further restriction but permits relicensing or conveying under this
# License, you may add to a covered work material governed by the terms
# of that license document, provided that the further restriction does
# not survive such relicensing or conveying.
#
# If you add terms to a covered work in accord with this section, you
# must place, in the relevant source files, a statement of the
# additional terms that apply to those files, or a notice indicating
# where to find the applicable terms.
#
# Additional terms, permissive or non-permissive, may be stated in the
# form of a separately written license, or stated as exceptions;
# the above requirements apply either way.
#
# 8. Termination.
#
# You may not propagate or modify a covered work except as expressly
# provided under this License. Any attempt otherwise to propagate or
# modify it is void, and will automatically terminate your rights under
# this License (including any patent licenses granted under the third
# paragraph of section 11).
#
# However, if you cease all violation of this License, then your
# license from a particular copyright holder is reinstated (a)
# provisionally, unless and until the copyright holder explicitly and
# finally terminates your license, and (b) permanently, if the copyright
# holder fails to notify you of the violation by some reasonable means
# prior to 60 days after the cessation.
#
# Moreover, your license from a particular copyright holder is
# reinstated permanently if the copyright holder notifies you of the
# violation by some reasonable means, this is the first time you have
# received notice of violation of this License (for any work) from that
# copyright holder, and you cure the violation prior to 30 days after
# your receipt of the notice.
#
# Termination of your rights under this section does not terminate the
# licenses of parties who have received copies or rights from you under
# this License. If your rights have been terminated and not permanently
# reinstated, you do not qualify to receive new licenses for the same
# material under section 10.
#
# 9. Acceptance Not Required for Having Copies.
#
# You are not required to accept this License in order to receive or
# run a copy of the Program. Ancillary propagation of a covered work
# occurring solely as a consequence of using peer-to-peer transmission
# to receive a copy likewise does not require acceptance. However,
# nothing other than this License grants you permission to propagate or
# modify any covered work. These actions infringe copyright if you do
# not accept this License. Therefore, by modifying or propagating a
# covered work, you indicate your acceptance of this License to do so.
#
# 10. Automatic Licensing of Downstream Recipients.
#
# Each time you convey a covered work, the recipient automatically
# receives a license from the original licensors, to run, modify and
# propagate that work, subject to this License. You are not responsible
# for enforcing compliance by third parties with this License.
#
# An "entity transaction" is a transaction transferring control of an
# organization, or substantially all assets of one, or subdividing an
# organization, or merging organizations. If propagation of a covered
# work results from an entity transaction, each party to that
# transaction who receives a copy of the work also receives whatever
# licenses to the work the party's predecessor in interest had or could
# give under the previous paragraph, plus a right to possession of the
# Corresponding Source of the work from the predecessor in interest, if
# the predecessor has it or can get it with reasonable efforts.
#
# You may not impose any further restrictions on the exercise of the
# rights granted or affirmed under this License. For example, you may
# not impose a license fee, royalty, or other charge for exercise of
# rights granted under this License, and you may not initiate litigation
# (including a cross-claim or counterclaim in a lawsuit) alleging that
# any patent claim is infringed by making, using, selling, offering for
# sale, or importing the Program or any portion of it.
#
# 11. Patents.
#
# A "contributor" is a copyright holder who authorizes use under this
# License of the Program or a work on which the Program is based. The
# work thus licensed is called the contributor's "contributor version".
#
# A contributor's "essential patent claims" are all patent claims
# owned or controlled by the contributor, whether already acquired or
# hereafter acquired, that would be infringed by some manner, permitted
# by this License, of making, using, or selling its contributor version,
# but do not include claims that would be infringed only as a
# consequence of further modification of the contributor version. For
# purposes of this definition, "control" includes the right to grant
# patent sublicenses in a manner consistent with the requirements of
# this License.
#
# Each contributor grants you a non-exclusive, worldwide, royalty-free
# patent license under the contributor's essential patent claims, to
# make, use, sell, offer for sale, import and otherwise run, modify and
# propagate the contents of its contributor version.
#
# In the following three paragraphs, a "patent license" is any express
# agreement or commitment, however denominated, not to enforce a patent
# (such as an express permission to practice a patent or covenant not to
# sue for patent infringement). To "grant" such a patent license to a
# party means to make such an agreement or commitment not to enforce a
# patent against the party.
#
# If you convey a covered work, knowingly relying on a patent license,
# and the Corresponding Source of the work is not available for anyone
# to copy, free of charge and under the terms of this License, through a
# publicly available network server or other readily accessible means,
# then you must either (1) cause the Corresponding Source to be so
# available, or (2) arrange to deprive yourself of the benefit of the
# patent license for this particular work, or (3) arrange, in a manner
# consistent with the requirements of this License, to extend the patent
# license to downstream recipients. "Knowingly relying" means you have
# actual knowledge that, but for the patent license, your conveying the
# covered work in a country, or your recipient's use of the covered work
# in a country, would infringe one or more identifiable patents in that
# country that you have reason to believe are valid.
#
# If, pursuant to or in connection with a single transaction or
# arrangement, you convey, or propagate by procuring conveyance of, a
# covered work, and grant a patent license to some of the parties
# receiving the covered work authorizing them to use, propagate, modify
# or convey a specific copy of the covered work, then the patent license
# you grant is automatically extended to all recipients of the covered
# work and works based on it.
#
# A patent license is "discriminatory" if it does not include within
# the scope of its coverage, prohibits the exercise of, or is
# conditioned on the non-exercise of one or more of the rights that are
# specifically granted under this License. You may not convey a covered
# work if you are a party to an arrangement with a third party that is
# in the business of distributing software, under which you make payment
# to the third party based on the extent of your activity of conveying
# the work, and under which the third party grants, to any of the
# parties who would receive the covered work from you, a discriminatory
# patent license (a) in connection with copies of the covered work
# conveyed by you (or copies made from those copies), or (b) primarily
# for and in connection with specific products or compilations that
# contain the covered work, unless you entered into that arrangement,
# or that patent license was granted, prior to 28 March 2007.
#
# Nothing in this License shall be construed as excluding or limiting
# any implied license or other defenses to infringement that may
# otherwise be available to you under applicable patent law.
#
# 12. No Surrender of Others' Freedom.
#
# If conditions are imposed on you (whether by court order, agreement or
# otherwise) that contradict the conditions of this License, they do not
# excuse you from the conditions of this License. If you cannot convey a
# covered work so as to satisfy simultaneously your obligations under this
# License and any other pertinent obligations, then as a consequence you may
# not convey it at all. For example, if you agree to terms that obligate you
# to collect a royalty for further conveying from those to whom you convey
# the Program, the only way you could satisfy both those terms and this
# License would be to refrain entirely from conveying the Program.
#
# 13. Use with the GNU Affero General Public License.
#
# Notwithstanding any other provision of this License, you have
# permission to link or combine any covered work with a work licensed
# under version 3 of the GNU Affero General Public License into a single
# combined work, and to convey the resulting work. The terms of this
# License will continue to apply to the part which is the covered work,
# but the special requirements of the GNU Affero General Public License,
# section 13, concerning interaction through a network will apply to the
# combination as such.
#
# 14. Revised Versions of this License.
#
# The Free Software Foundation may publish revised and/or new versions of
# the GNU General Public License from time to time. Such new versions will
# be similar in spirit to the present version, but may differ in detail to
# address new problems or concerns.
#
# Each version is given a distinguishing version number. If the
# Program specifies that a certain numbered version of the GNU General
# Public License "or any later version" applies to it, you have the
# option of following the terms and conditions either of that numbered
# version or of any later version published by the Free Software
# Foundation. If the Program does not specify a version number of the
# GNU General Public License, you may choose any version ever published
# by the Free Software Foundation.
#
# If the Program specifies that a proxy can decide which future
# versions of the GNU General Public License can be used, that proxy's
# public statement of acceptance of a version permanently authorizes you
# to choose that version for the Program.
#
# Later license versions may give you additional or different
# permissions. However, no additional obligations are imposed on any
# author or copyright holder as a result of your choosing to follow a
# later version.
#
# 15. Disclaimer of Warranty.
#
# THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
# APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
# HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
# OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
# IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
# ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
#
# 16. Limitation of Liability.
#
# IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
# WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
# THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
# GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
# USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
# DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
# PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
# EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 17. Interpretation of Sections 15 and 16.
#
# If the disclaimer of warranty and limitation of liability provided
# above cannot be given local legal effect according to their terms,
# reviewing courts shall apply local law that most closely approximates
# an absolute waiver of all civil liability in connection with the
# Program, unless a warranty or assumption of liability accompanies a
# copy of the Program in return for a fee.
#
# END OF TERMS AND CONDITIONS
#
# How to Apply These Terms to Your New Programs
#
# If you develop a new program, and you want it to be of the greatest
# possible use to the public, the best way to achieve this is to make it
# free software which everyone can redistribute and change under these terms.
#
# To do so, attach the following notices to the program. It is safest
# to attach them to the start of each source file to most effectively
# state the exclusion of warranty; and each file should have at least
# the "copyright" line and a pointer to where the full notice is found.
#
# {one line to give the program's name and a brief idea of what it does.}
# Copyright (C) {year} {name of author}
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Also add information on how to contact you by electronic and paper mail.
#
# If the program does terminal interaction, make it output a short
# notice like this when it starts in an interactive mode:
#
# {project} Copyright (C) {year} {fullname}
# This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
# This is free software, and you are welcome to redistribute it
# under certain conditions; type `show c' for details.
#
# The hypothetical commands `show w' and `show c' should show the appropriate
# parts of the General Public License. Of course, your program's commands
# might be different; for a GUI interface, you would use an "about box".
#
# You should also get your employer (if you work as a programmer) or school,
# if any, to sign a "copyright disclaimer" for the program, if necessary.
# For more information on this, and how to apply and follow the GNU GPL, see
# <http://www.gnu.org/licenses/>.
#
# The GNU General Public License does not permit incorporating your program
# into proprietary programs. If your program is a subroutine library, you
# may consider it more useful to permit linking proprietary applications with
# the library. If this is what you want to do, use the GNU Lesser General
# Public License instead of this License. But first, please read
# <http://www.gnu.org/philosophy/why-not-lgpl.html>.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
from test_interp import test_interp
class qa_test_interp (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_test_interp, "qa_test_interp.xml")
| quentinbodinier/custom_gnuradio_blocks | python/qa_test_interp.py | Python | gpl-3.0 | 36,971 | 0.003489 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`~gensim.models.hdpmodel.HdpModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_dictionary, common_corpus
>>> from gensim.sklearn_api import HdpTransformer
>>>
>>> # Lets extract the distribution of each document in topics
>>> model = HdpTransformer(id2word=common_dictionary)
>>> distr = model.fit_transform(common_corpus)
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class HdpTransformer(TransformerMixin, BaseEstimator):
"""Base HDP module, wraps :class:`~gensim.models.hdpmodel.HdpModel`.
The inner workings of this class heavily depends on `Wang, Paisley, Blei: "Online Variational
Inference for the Hierarchical Dirichlet Process, JMLR (2011)"
<http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
"""
def __init__(self, id2word, max_chunks=None, max_time=None, chunksize=256, kappa=1.0, tau=64.0, K=15, T=150,
alpha=1, gamma=1, eta=0.01, scale=1.0, var_converge=0.0001, outputdir=None, random_state=None):
"""
Parameters
----------
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping between a words ID and the word itself in the vocabulary.
max_chunks : int, optional
Upper bound on how many chunks to process.It wraps around corpus beginning in another corpus pass,
if there are not enough chunks in the corpus.
max_time : int, optional
Upper bound on time in seconds for which model will be trained.
chunksize : int, optional
Number of documents to be processed by the model in each mini-batch.
kappa : float, optional
Learning rate, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
tau : float, optional
Slow down parameter, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
K : int, optional
Second level truncation level, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
T : int, optional
Top level truncation level, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
alpha : int, optional
Second level concentration, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
gamma : int, optional
First level concentration, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
eta : float, optional
The topic Dirichlet, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
scale : float, optional
Weights information from the mini-chunk of corpus to calculate rhot.
var_converge : float, optional
Lower bound on the right side of convergence. Used when updating variational parameters
for a single document.
outputdir : str, optional
Path to a directory where topic and options information will be stored.
random_state : int, optional
Seed used to create a :class:`~np.random.RandomState`. Useful for obtaining reproducible results.
"""
self.gensim_model = None
self.id2word = id2word
self.max_chunks = max_chunks
self.max_time = max_time
self.chunksize = chunksize
self.kappa = kappa
self.tau = tau
self.K = K
self.T = T
self.alpha = alpha
self.gamma = gamma
self.eta = eta
self.scale = scale
self.var_converge = var_converge
self.outputdir = outputdir
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.hdp.HdpTransformer`
The trained model.
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.HdpModel(
corpus=corpus, id2word=self.id2word, max_chunks=self.max_chunks,
max_time=self.max_time, chunksize=self.chunksize, kappa=self.kappa, tau=self.tau,
K=self.K, T=self.T, alpha=self.alpha, gamma=self.gamma, eta=self.eta, scale=self.scale,
var_converge=self.var_converge, outputdir=self.outputdir, random_state=self.random_state
)
return self
def transform(self, docs):
"""Infer a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
Parameters
----------
docs : {iterable of list of (int, number), list of (int, number)}
Document or sequence of documents in BOW format.
Returns
-------
numpy.ndarray of shape [`len(docs), num_topics`]
Topic distribution for `docs`.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
distribution, max_num_topics = [], 0
for doc in docs:
topicd = self.gensim_model[doc]
distribution.append(topicd)
max_num_topics = max(max_num_topics, max(topic[0] for topic in topicd) + 1)
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(t, max_num_topics) for t in distribution]
return np.reshape(np.array(distribution), (len(docs), max_num_topics))
def partial_fit(self, X):
"""Train model over a potentially incomplete set of documents.
Uses the parameters set in the constructor.
This method can be used in two ways:
* On an unfitted model in which case the model is initialized and trained on `X`.
* On an already fitted model in which case the model is **updated** by `X`.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.hdp.HdpTransformer`
The trained model.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
if self.gensim_model is None:
self.gensim_model = models.HdpModel(
id2word=self.id2word, max_chunks=self.max_chunks,
max_time=self.max_time, chunksize=self.chunksize, kappa=self.kappa, tau=self.tau,
K=self.K, T=self.T, alpha=self.alpha, gamma=self.gamma, eta=self.eta, scale=self.scale,
var_converge=self.var_converge, outputdir=self.outputdir, random_state=self.random_state
)
self.gensim_model.update(corpus=X)
return self
| midnightradio/gensim | gensim/sklearn_api/hdp.py | Python | gpl-3.0 | 8,719 | 0.004358 |
"""
Support for Yamaha Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.yamaha/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA,
MEDIA_TYPE_MUSIC,
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_NAME, CONF_HOST, STATE_OFF, STATE_ON)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['rxv==0.2.0']
_LOGGER = logging.getLogger(__name__)
SUPPORT_YAMAHA = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE | \
SUPPORT_PLAY_MEDIA
CONF_SOURCE_NAMES = 'source_names'
CONF_SOURCE_IGNORE = 'source_ignore'
CONF_ZONE_IGNORE = 'zone_ignore'
DEFAULT_NAME = 'Yamaha Receiver'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_SOURCE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ZONE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SOURCE_NAMES, default={}): {cv.string: cv.string},
})
# pylint: disable=too-many-locals
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Yamaha platform."""
import rxv
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
source_ignore = config.get(CONF_SOURCE_IGNORE)
source_names = config.get(CONF_SOURCE_NAMES)
zone_ignore = config.get(CONF_ZONE_IGNORE)
if discovery_info is not None:
name = discovery_info[0]
model = discovery_info[1]
ctrl_url = discovery_info[2]
desc_url = discovery_info[3]
receivers = rxv.RXV(
ctrl_url,
model_name=model,
friendly_name=name,
unit_desc_url=desc_url).zone_controllers()
_LOGGER.info("Receivers: %s", receivers)
elif host is None:
receivers = []
for recv in rxv.find():
receivers.extend(recv.zone_controllers())
else:
ctrl_url = "http://{}:80/YamahaRemoteControl/ctrl".format(host)
receivers = rxv.RXV(ctrl_url, name).zone_controllers()
for receiver in receivers:
if receiver.zone not in zone_ignore:
add_devices([
YamahaDevice(name, receiver, source_ignore, source_names)])
class YamahaDevice(MediaPlayerDevice):
"""Representation of a Yamaha device."""
# pylint: disable=too-many-public-methods, abstract-method
# pylint: disable=too-many-instance-attributes
def __init__(self, name, receiver, source_ignore, source_names):
"""Initialize the Yamaha Receiver."""
self._receiver = receiver
self._muted = False
self._volume = 0
self._pwstate = STATE_OFF
self._current_source = None
self._source_list = None
self._source_ignore = source_ignore or []
self._source_names = source_names or {}
self._reverse_mapping = None
self.update()
self._name = name
self._zone = receiver.zone
def update(self):
"""Get the latest details from the device."""
if self._receiver.on:
self._pwstate = STATE_ON
else:
self._pwstate = STATE_OFF
self._muted = self._receiver.mute
self._volume = (self._receiver.volume / 100) + 1
if self.source_list is None:
self.build_source_list()
current_source = self._receiver.input
self._current_source = self._source_names.get(
current_source, current_source)
def build_source_list(self):
"""Build the source list."""
self._reverse_mapping = {alias: source for source, alias in
self._source_names.items()}
self._source_list = sorted(
self._source_names.get(source, source) for source in
self._receiver.inputs()
if source not in self._source_ignore)
@property
def name(self):
"""Return the name of the device."""
name = self._name
if self._zone != "Main_Zone":
# Zone will be one of Main_Zone, Zone_2, Zone_3
name += " " + self._zone.replace('_', ' ')
return name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_YAMAHA
def turn_off(self):
"""Turn off media player."""
self._receiver.on = False
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
receiver_vol = 100 - (volume * 100)
negative_receiver_vol = -receiver_vol
self._receiver.volume = negative_receiver_vol
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._receiver.mute = mute
def turn_on(self):
"""Turn the media player on."""
self._receiver.on = True
self._volume = (self._receiver.volume / 100) + 1
def select_source(self, source):
"""Select input source."""
self._receiver.input = self._reverse_mapping.get(source, source)
def play_media(self, media_type, media_id, **kwargs):
"""Play media from an ID.
This exposes a pass through for various input sources in the
Yamaha to direct play certain kinds of media. media_type is
treated as the input type that we are setting, and media id is
specific to it.
"""
if media_type == "NET RADIO":
self._receiver.net_radio(media_id)
@property
def media_content_type(self):
"""Return the media content type."""
if self.source == "NET RADIO":
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Return the media title.
This will vary by input source, as they provide different
information in metadata.
"""
if self.source == "NET RADIO":
info = self._receiver.play_status()
if info.song:
return "%s: %s" % (info.station, info.song)
else:
return info.station
| betrisey/home-assistant | homeassistant/components/media_player/yamaha.py | Python | mit | 7,019 | 0 |
import ctypes.wintypes as ctypes
import braille
import brailleInput
import globalPluginHandler
import scriptHandler
import inputCore
import api
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
MAPVK_VK_TO_VSC = 0
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENT_SCANCODE = 0x0008
KEYEVENTF_UNICODE = 0x0004
class MOUSEINPUT(ctypes.Structure):
_fields_ = (
('dx', ctypes.c_long),
('dy', ctypes.c_long),
('mouseData', ctypes.DWORD),
('dwFlags', ctypes.DWORD),
('time', ctypes.DWORD),
('dwExtraInfo', ctypes.POINTER(ctypes.c_ulong)),
)
class KEYBDINPUT(ctypes.Structure):
_fields_ = (
('wVk', ctypes.WORD),
('wScan', ctypes.WORD),
('dwFlags', ctypes.DWORD),
('time', ctypes.DWORD),
('dwExtraInfo', ctypes.POINTER(ctypes.c_ulong)),
)
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (
('uMsg', ctypes.DWORD),
('wParamL', ctypes.WORD),
('wParamH', ctypes.WORD),
)
class INPUTUnion(ctypes.Union):
_fields_ = (
('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT),
)
class INPUT(ctypes.Structure):
_fields_ = (
('type', ctypes.DWORD),
('union', INPUTUnion))
class BrailleInputGesture(braille.BrailleDisplayGesture, brailleInput.BrailleInputGesture):
def __init__(self, **kwargs):
super(BrailleInputGesture, self).__init__()
for key, value in kwargs.iteritems():
setattr(self, key, value)
self.source="remote{}{}".format(self.source[0].upper(),self.source[1:])
self.scriptPath=getattr(self,"scriptPath",None)
self.script=self.findScript() if self.scriptPath else None
def findScript(self):
if not (isinstance(self.scriptPath,list) and len(self.scriptPath)==3):
return None
module,cls,scriptName=self.scriptPath
focus = api.getFocusObject()
if not focus:
return None
if scriptName.startswith("kb:"):
# Emulate a key press.
return scriptHandler._makeKbEmulateScript(scriptName)
import globalCommands
# Global plugin level.
if cls=='GlobalPlugin':
for plugin in globalPluginHandler.runningPlugins:
if module==plugin.__module__:
func = getattr(plugin, "script_%s" % scriptName, None)
if func:
return func
# App module level.
app = focus.appModule
if app and cls=='AppModule' and module==app.__module__:
func = getattr(app, "script_%s" % scriptName, None)
if func:
return func
# Tree interceptor level.
treeInterceptor = focus.treeInterceptor
if treeInterceptor and treeInterceptor.isReady:
func = getattr(treeInterceptor , "script_%s" % scriptName, None)
# We are no keyboard input
return func
# NVDAObject level.
func = getattr(focus, "script_%s" % scriptName, None)
if func:
return func
for obj in reversed(api.getFocusAncestors()):
func = getattr(obj, "script_%s" % scriptName, None)
if func and getattr(func, 'canPropagate', False):
return func
# Global commands.
func = getattr(globalCommands.commands, "script_%s" % scriptName, None)
if func:
return func
return None
def send_key(vk=None, scan=None, extended=False, pressed=True):
i = INPUT()
i.union.ki.wVk = vk
if scan:
i.union.ki.wScan = scan
else: #No scancode provided, try to get one
i.union.ki.wScan = ctypes.windll.user32.MapVirtualKeyW(vk, MAPVK_VK_TO_VSC)
if not pressed:
i.union.ki.dwFlags |= KEYEVENTF_KEYUP
if extended:
i.union.ki.dwFlags |= KEYEVENTF_EXTENDEDKEY
i.type = INPUT_KEYBOARD
ctypes.windll.user32.SendInput(1, ctypes.byref(i), ctypes.sizeof(INPUT))
| nishimotz/NVDARemote | addon/globalPlugins/remoteClient/input.py | Python | gpl-2.0 | 3,588 | 0.032609 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.