text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""
Test suite for ParasitoidModel, for use with py.test
Created on Fri May 08 12:12:19 2015
Author: Christopher Strickland
Email: wcstrick@live.unc.edu
"""
import pytest
import numpy as np
import math
from scipy import sparse, signal
import ParasitoidModel as PM
import globalvars
###############################################################################
# #
# Test Fixtures #
# #
###############################################################################
@pytest.fixture
def g_wind_prob_params():
# Return logistic shape parameters for the g_wind_prob function
aw = 1.8
bw = 6
return (aw,bw)
@pytest.fixture
def f_time_prob_params():
# Return logistic parameters for the f_time_prob function, shape and bias
a1 =7.
b1 = 2.
a2 = 19.
b2 = 2.
return (a1,b1,a2,b2)
@pytest.fixture(scope="module")
def D_params():
sig_x = 4.0 # std deviation in meters
sig_y = 4.0
corr = 0.
return (sig_x,sig_y,corr)
@pytest.fixture(scope="module")
def flight_consts():
# lambda constant in h_flight_prob
lam = 1.0
# meters to travel in advection per km/hr wind speed
mu_r = 1 # scaling flight advection to wind advection
# number of time periods (minutes) in one flight
n_periods = 6
return (lam,mu_r,n_periods)
@pytest.fixture(scope="module")
def site_name():
return 'data/carnarvonearl'
@pytest.fixture(scope="module")
def start_time(site_name):
if site_name == 'data/carnarvonearl':
return '00:30'
elif site_name == 'data/kalbar':
return '00:00'
@pytest.fixture(scope="module")
def emerg_data(site_name):
emerg_data = PM.emergence_data(site_name)
return emerg_data
@pytest.fixture(scope="module")
def wind_data(site_name,start_time):
wind_data,days = PM.get_wind_data(site_name,30,start_time)
return wind_data
@pytest.fixture
def wind_data_days(site_name,start_time):
return PM.get_wind_data(site_name,30,start_time)
############ Decorators ############
slow = pytest.mark.skipif(not pytest.config.getoption('--runslow'),
reason = 'need --runslow option to run')
cuda_run = pytest.mark.skipif(not globalvars.cuda,
reason = 'need globalvars.cuda == True')
###############################################################################
# #
# Tests #
# #
###############################################################################
def test_emerg_data(site_name):
# Basic tests for expected structure in emerg_data dict
emerg_data = PM.emergence_data(site_name)
assert isinstance(emerg_data,dict)
for field in emerg_data: #these should be fields.
#emerg_data[field] is a dict
for date in emerg_data[field]: #these should be dates since release
assert isinstance(date,int)
def test_wind_data(site_name):
# Basic tests for expected structure in wind_data dict and days list
wind_data,days = PM.read_wind_file(site_name)
assert isinstance(wind_data,dict)
assert isinstance(days,list)
for day in days:
assert days.count(day) == 1
assert day in wind_data
assert all(days[ii] < days[ii+1] for ii in range(len(days)-1))
for key in wind_data:
assert isinstance(key,int) #date should be int since release
assert wind_data[key].shape[1] == 3 #windx,windy,windr
def test_get_wind_data(site_name,start_time):
interp_num = 30
wind_data_raw,days_raw = PM.read_wind_file(site_name)
wind_data,days = PM.get_wind_data(site_name,interp_num,start_time)
assert wind_data[days[0]].shape[0] == interp_num*wind_data_raw[days[0]].shape[0]
assert days_raw == days
if start_time == '00:30':
assert all(wind_data[days[0]][0,:] == wind_data_raw[days[0]][0,:])
assert all(wind_data[days[0]][interp_num-1,:] ==
wind_data_raw[days[0]][0,:])
for ii in range(wind_data_raw[days[0]].shape[0]-1):
assert all(wind_data[days[0]][interp_num*(1+ii),:] ==
wind_data_raw[days[0]][ii,:])
assert all(wind_data[days[1]][0,:] == wind_data_raw[days[0]][-1,:])
elif start_time == '00:00':
assert all(wind_data[days[-1]][-1,:] == wind_data_raw[days[-1]][-1,:])
assert all(wind_data[days[-1]][-interp_num+1,:] ==
wind_data_raw[days[-1]][-1,:])
for ii in range(wind_data_raw[days[0]].shape[0]-1):
assert all(wind_data[days[0]][interp_num*(ii),:] ==
wind_data_raw[days[0]][ii,:])
for key in wind_data_raw:
assert key in wind_data
assert all(np.sqrt(wind_data[key][:,0]**2+wind_data[key][:,1]**2) ==
wind_data[key][:,2])
def test_g_prob_of_flying_by_wind_speed(g_wind_prob_params):
ds = 0.1 #interval at which to test function
# pass in a bunch of values to the function being tested,
# get out probability scaling
flight_prob = PM.g_wind_prob(np.arange(0,3.1,ds),*g_wind_prob_params)
# test for desirable properties #
# check that all scaling values are between 0 and 1
assert (np.all(0 <= flight_prob) and np.all(flight_prob <= 1))
# should be a strictly decreasing function of wind speed
for ii in range(flight_prob.size-1):
assert flight_prob[ii] > flight_prob[ii+1]
# low wind speeds should have no effect
low = 0.5 #upper bound on a "low" wind speed
#check that these low wind speeds return a value close to 1
assert np.all(flight_prob[0:int(low/ds)+1] > 0.99)
def test_f_prob_of_flying_by_time_of_day(f_time_prob_params):
# number of discrete times per day to look at
n = 48 # data is every 30 min
time_of_day = np.linspace(0,24,n)
# get probability scaling
flight_prob = PM.f_time_prob(n,*f_time_prob_params)
# test for desirable properties #
# check that f is a probability mass function
assert np.all(flight_prob >= 0)
assert math.isclose(flight_prob.sum(),1)
# no flights between 10 pm and midnight
ii = 1
while time_of_day[-ii] >= 22:
try:
assert flight_prob[-ii] < 0.01/n
except:
# Give some feedback on time of failure
print('Time of failure: {0}\n'.format(time_of_day[-ii]))
raise
ii += 1
# no flights between midnight and 3 am
ii = 0
while time_of_day[ii] <= 3:
try:
assert flight_prob[ii] < 0.01/n
except:
# Give some feedback on time of failure
print('Time of failure: {0}\n'.format(time_of_day[ii]))
raise
ii += 1
# no penalty between 11 am and 3 pm
ii = 0
while time_of_day[ii] < 11:
# find 11:00
ii += 1
while time_of_day[ii] <= 15:
try:
assert flight_prob[ii] > 0.99/n
except:
print('Time of failure: {0}\n'.format(time_of_day[ii]))
raise
ii += 1
def test_h_flight_prob(wind_data,g_wind_prob_params,f_time_prob_params):
# lambda constant controlling probability of flying in a given day
# under ideal conditions.
lam = 1.
# try a few days of wind data
for ii in range(1,4):
day_wind = wind_data[ii]
# get f and g to test for certain properties in comparison with h
n = day_wind.shape[0] #number of wind data entries in the day
#get just the windr values
try:
windr = day_wind[:,2]
except IndexError:
windr = day_wind[2] # for testing prob_mass
n = 1
f_func = PM.f_time_prob(n,*f_time_prob_params)
g_func = PM.g_wind_prob(windr,*g_wind_prob_params)
assert np.all(f_func*g_func <= f_func)
assert (f_func-f_func*g_func).sum() <=1
# get the probability function for the day
flight_params = (*g_wind_prob_params,*f_time_prob_params)
flight_prob = PM.h_flight_prob(day_wind,lam,
*flight_params)
# test that it has proper probability properties
assert np.all(flight_prob >= 0)
# should be add up to less than or equal to 1,
# 1-sum is prob. of not flying
assert flight_prob.sum() <= 1
# we should be strictly adding probability to f_func*g_func
assert np.all(flight_prob >= f_func*g_func)
def test_get_mvn_cdf_values():
# This test should make sure (x,y) coordinate pairs are correctly
# translated to row/columns, among other things.
# Use a covarience matrix with some correlation. Test one with high varience
# vs. one with small varience to make sure that the adaptive integration
# is working properly.
cell_length = 2
mu = np.zeros(2)
sig_x1 = 4; sig_y1 = 4 # (in meters)
corr1 = 0.5
S1 = np.array([[sig_x1**2, corr1*sig_x1*sig_y1],
[corr1*sig_x1*sig_y1, sig_y1**2]])
# bigger
sig_x2 = 10; sig_y2 = 10
corr2 = -corr1
S2 = np.array([[sig_x2**2, corr2*sig_x2*sig_y2],
[corr2*sig_x2*sig_y2, sig_y2**2]])
# Get cdf values
cdf_mat1 = PM.get_mvn_cdf_values(cell_length,mu,S1)
cdf_mat2 = PM.get_mvn_cdf_values(cell_length,mu,S2)
# should behave like an approximation to a probability mass function
assert 0.99 < cdf_mat1.sum() < 1
assert 0.99 < cdf_mat2.sum() < 1
# 2 should be bigger than 1
assert cdf_mat2.size > cdf_mat1.size
cdf_len = cdf_mat1.shape[0] #odd number
cdf_cent = int(cdf_len/2) #center of cdf_mat
# With positive correlation, we expect more probability in the first and
# third quadrants.
# compare 2nd quadrant and 1st quadrant
assert cdf_mat1[0:cdf_cent,0:cdf_cent].sum() < \
cdf_mat1[0:cdf_cent,cdf_cent+1:].sum()
# With negative correlation, we expect more probability in the second and
# fourth quadrants.
# compare 3rd quadrant and 4th quadrant
assert cdf_mat2[cdf_cent+1:,0:cdf_cent].sum() < \
cdf_mat1[cdf_cent+1:,cdf_cent+1:].sum()
# The mean is within the origin cell, so this should be the location with
# the most probability
mdpt = int(cdf_mat1.shape[0]/2) #shape is an odd number. flooring should
# get us where we want in a 0-based index
assert cdf_mat1.max() == cdf_mat1[mdpt,mdpt]
def test_prob_mass_func_generation(wind_data,g_wind_prob_params,
f_time_prob_params,D_params,flight_consts,domain_info):
# day to test
day = 1
# lambda constant in h_flight_prob
lam = flight_consts[0]
#### Run over a single 30 min period to see what happens in detail ####
# to do this, we pass in wind_data with only a single time period in the
# first day.
# Data has only day one, with one time period (chosen from middle of day)
sing_wind_data = {1:wind_data[1][24*30,:]}
sing_wind_data_cpy = dict(sing_wind_data)
# Need to alter parameters to f function a bit to get probability of flying
# around midnight, when the time period will start...
hparams1 = (lam,*g_wind_prob_params,-4.,2.,19.,2.)
# This will give us one 24hr time period. mu_r has to scale accoringly
mu_r1 = 0.1/24 # 6 min flight at full wind advection
#pytest.set_trace()
pmf = PM.prob_mass(1,sing_wind_data,hparams1,D_params,D_params,mu_r1,1,
*domain_info)
pmf = pmf.tocsr()
# Find the center. pmf is always square
midpt = pmf.shape[0]//2
# sing_wind_data is mutable. Verify that it is unchanged.
assert sing_wind_data == sing_wind_data_cpy
# Check that the shifted normal distribution is in the correct quadrant
# given the wind vector's direction
wind_sign = np.sign(sing_wind_data[1][0:2]) #signum, (x,y)
if wind_sign[0] < 0: # x < 0, column < midpt
if wind_sign[1] < 0: # y < 0, row > midpt
assert pmf[midpt+5:,0:midpt-5].sum() > 0
else: # y > 0, row < midpt
assert pmf[0:midpt-5,0:midpt-5].sum() > 0
else: # x > 0, column > midpt
if wind_sign[1] < 0: # y < 0, row > midpt
assert pmf[midpt+5:,midpt+5:].sum() > 0
else: # y > 0, row < midpt
assert pmf[0:midpt-5,midpt+5:].sum() > 0
# DO THIS BLOCK LAST FOR SINGLE RUN! ALTERS pmf
# Midday on the first day had wind. Most of the probability is around the
# origin because wind decreases the likelihood of flight, but other than
# this, much of the probabiilty should be away from the origin.
# assert np.unravel_index(pmf.argmax(), pmf.shape) != (midpt,midpt)
pmf[midpt,midpt] = 0
assert 1 > pmf.sum() > 0
#### Run for the entire day, using full wind_data dictionary ####
# parameters for h_flight_prob
hparams = (lam,*g_wind_prob_params,*f_time_prob_params)
# wind_data is mutable. have a copy on hand to check against
wind_data_cpy = dict(wind_data)
# get the day's probability density for location of a parasitoid
params = (*flight_consts[1:],*domain_info)
pmf = PM.prob_mass(day,wind_data,hparams,D_params,D_params,*params)
# wind_data should be unchanged
assert wind_data == wind_data_cpy
#check offseting algorithm
offset = domain_info[1] - pmf.shape[0]//2
dom_len = domain_info[1]*2 + 1
firstsol = sparse.coo_matrix((pmf.data,
(pmf.row+offset,pmf.col+offset)),shape=(dom_len,dom_len))
# should be a probability mass function, before/after conversion
firstsol = firstsol.tocsr()
assert math.isclose(pmf.sum(),1)
assert math.isclose(firstsol.sum(),1)
# most of the probability should still be near the origin
midpt = firstsol.shape[0]//2
assert firstsol[midpt-4:midpt+5,midpt-4:midpt+5].sum() > (firstsol.sum()
- firstsol[midpt-4:midpt+5,midpt-4:midpt+5].sum())
# but not all
assert not math.isclose(firstsol[midpt-4:midpt+5,midpt-4:midpt+5].sum(),1)
#### Run again, but this time with noon release ####
pmf2 = PM.prob_mass(day,wind_data,hparams,D_params,D_params,*params,0.5)
# should be a probability mass function
assert math.isclose(pmf2.sum(),1)
# most of the probability should still be at the origin
midpt2 = pmf2.shape[0]//2
pmf2 = pmf2.tocsr()
assert pmf2[midpt2-4:midpt2+5,midpt2-4:midpt2+5].sum() > (pmf2.sum()
- pmf2[midpt2-4:midpt2+5,midpt2-4:midpt2+5].sum())
# but not all
assert not math.isclose(pmf2[midpt2-4:midpt2+5,midpt2-4:midpt2+5].sum(),1)
# this solution should have more left at the origin than the first one
assert pmf2[midpt2,midpt2] > firstsol[midpt,midpt] | mountaindust/Parasitoids | tests/test_ParsitoidModel.py | Python | gpl-3.0 | 14,934 | 0.014062 |
"""
Conversion pack for October 2021 release
"""
CONVERSIONS = {
# Renamed items
"Quafe Zero": "Quafe Zero Classic",
"Exigent Sentry Drone Navigation Mutaplasmid": "Exigent Sentry Drone Precision Mutaplasmid",
}
| pyfa-org/Pyfa | service/conversions/releaseOct2021.py | Python | gpl-3.0 | 225 | 0.004444 |
# -*- coding:utf-8 -*-
# @version: 1.0
# @author:
# @date: '14-4-10'
import os
import logging
import threading
from ConfigParser import ConfigParser
from ConfigParser import NoSectionError, InterpolationMissingOptionError, Error
import simplejson as json
from utils.logger import Logger
_lock = threading.RLock()
class Environment():
instance = None
def __init__(self):
self._working_path = ""
self._app_name = ""
@staticmethod
def get_instance():
if not Environment.instance:
Environment.instance = Environment()
return Environment.instance
def init_by_file_name(self, start_file_path, start_file_name, start_file_depth=1):
start_file_name = os.path.join(start_file_path, start_file_name)
self.init(start_file_name, start_file_depth)
def init(self, start_file_name, start_file_depth):
"""
初始化应用环境
:param start_file_name: 调用本方法的代码文件的完整路径
:param start_file_depth: 调用本方法的代码文件距离工作目录的深度。如果在工作目录下,深度为1;如果在工作目录的一级子文件夹下,深度为2, 以此类推。
"""
self._working_path, self._app_name = self._parse_start_file_name(
start_file_name, start_file_depth)
self._set_working_path(self._working_path)
self._init_logger()
self._configure_parser = ConfigParser()
self._is_configure_loaded = False
self._load_configure()
def get_db_setting(self, db_setting_section_name):
return self.get_configure_value(db_setting_section_name, "host"), \
self.get_configure_value(db_setting_section_name, "db"), \
self.get_configure_value(db_setting_section_name, "user"), \
self.get_configure_value(db_setting_section_name, "passwd")
def get_app_name(self):
return self._app_name
def get_working_path(self):
return self._working_path
def _get_configure_value(self, section, key):
value = None
try:
value = self._configure_parser.get(section, key)
return value
except NoSectionError, e:
logging.error(e.message)
return None
except InterpolationMissingOptionError, e:
value = e.message.split("rawval : ")
if value and len(value) > 1:
value = value[1][:-1]
else:
raise Error
return value
def get_configure_value(self, section, key, default="", value_type=str):
_lock.acquire()
value = self._get_configure_value(section, key)
_lock.release()
if value_type in [str, unicode]:
pass
elif value_type in [int, long]:
value = int(value)
elif value_type in [float]:
value = float(value)
elif value_type == json:
value = json.loads(value)
else:
pass
value = default if value is None else value
return value
def set_configure_value(self, section, key, value=""):
_lock.acquire()
if not section in self._configure_parser.sections():
self._configure_parser.add_section(section)
if type(value) == dict:
value = json.dumps(value)
self._configure_parser.set(section, key, value)
with file(self._config_path, "w") as fp:
self._configure_parser.write(fp)
_lock.release()
def _parse_start_file_name(self, start_file_name, start_file_depth):
"""
解析启动文件名称和该文件深度,返回程序工作目录和程序名称
:param start_file_name: 调用本方法的代码文件的完整路径
:param start_file_depth: 调用本方法的代码文件距离工作目录的深度。如果在工作目录下,深度为1;如果在工作目录的一级子文件夹下,深度为2, 以此类推。
:return:
"""
start_file_name = start_file_name.replace("\\", "/")
file_name_parts = start_file_name.split('/')
file_name_parts.remove("")
if not file_name_parts:
logging.error(u"启动文件输入参数错误,输入的不是完整的文件名: " + start_file_name)
return
app_name = file_name_parts[-1]
if "." in app_name:
app_name = app_name[:app_name.rindex(".")]
file_name_parts = file_name_parts[:(start_file_depth) * -1]
working_dir = os.sep.join(file_name_parts)
return working_dir, app_name
def _init_logger(self, logging_file_name="logging.conf"):
log_file_whole_name = os.path.join(
self._working_path, "conf", logging_file_name)
print "Load logging file:", log_file_whole_name
Logger.load_configure(log_file_whole_name)
def _load_configure(self):
configure_file_name = os.path.join(
self._working_path, "conf", self._app_name + ".conf")
print "Load configure file:", configure_file_name
if self._is_configure_loaded:
return
if not configure_file_name:
return
self._configure_parser.read(configure_file_name)
def _set_working_path(self, work_path):
work_path = os.path.abspath(work_path)
os.chdir(work_path)
print "Set working dir:", work_path
if __name__ == "__main__":
# Environment.get_instance()._load_configure()
# print Environment.get_instance().get_configure_value("zhiShiTuPu",
# "user")
print Environment.get_instance()._parse_start_file_name(
"F:\\newgit\\nluData\\query-crawler\\crawler\\query_crawler.py", 1)
| ChainBoy/init_python_project | utils/environment.py | Python | apache-2.0 | 5,907 | 0.000727 |
"""Helpers for HomeKit data stored in HA storage."""
from homeassistant.core import callback
from homeassistant.helpers.storage import Store
from .const import DOMAIN
ENTITY_MAP_STORAGE_KEY = f"{DOMAIN}-entity-map"
ENTITY_MAP_STORAGE_VERSION = 1
ENTITY_MAP_SAVE_DELAY = 10
class EntityMapStorage:
"""
Holds a cache of entity structure data from a paired HomeKit device.
HomeKit has a cacheable entity map that describes how an IP or BLE
endpoint is structured. This object holds the latest copy of that data.
An endpoint is made of accessories, services and characteristics. It is
safe to cache this data until the c# discovery data changes.
Caching this data means we can add HomeKit devices to HA immediately at
start even if discovery hasn't seen them yet or they are out of range. It
is also important for BLE devices - accessing the entity structure is
very slow for these devices.
"""
def __init__(self, hass):
"""Create a new entity map store."""
self.hass = hass
self.store = Store(hass, ENTITY_MAP_STORAGE_VERSION, ENTITY_MAP_STORAGE_KEY)
self.storage_data = {}
async def async_initialize(self):
"""Get the pairing cache data."""
raw_storage = await self.store.async_load()
if not raw_storage:
# There is no cached data about HomeKit devices yet
return
self.storage_data = raw_storage.get("pairings", {})
def get_map(self, homekit_id):
"""Get a pairing cache item."""
return self.storage_data.get(homekit_id)
@callback
def async_create_or_update_map(self, homekit_id, config_num, accessories):
"""Create a new pairing cache."""
data = {"config_num": config_num, "accessories": accessories}
self.storage_data[homekit_id] = data
self._async_schedule_save()
return data
@callback
def async_delete_map(self, homekit_id):
"""Delete pairing cache."""
if homekit_id not in self.storage_data:
return
self.storage_data.pop(homekit_id)
self._async_schedule_save()
@callback
def _async_schedule_save(self):
"""Schedule saving the entity map cache."""
self.store.async_delay_save(self._data_to_save, ENTITY_MAP_SAVE_DELAY)
@callback
def _data_to_save(self):
"""Return data of entity map to store in a file."""
return {"pairings": self.storage_data}
| nkgilley/home-assistant | homeassistant/components/homekit_controller/storage.py | Python | apache-2.0 | 2,471 | 0.000405 |
__author__ = 'thauser'
from mock import patch, MagicMock
from pnc_cli import productreleases
from pnc_cli.swagger_client.models import ProductReleaseRest
def test_create_product_release_object():
compare = ProductReleaseRest()
compare.version = '1.0.1.DR1'
compare.support_level = 'EOL'
result = productreleases.create_product_release_object(version='1.0.1.DR1', support_level='EOL')
assert result.to_dict() == compare.to_dict()
@patch('pnc_cli.productreleases.releases_api.get_all', return_value=MagicMock(content='list of all releases'))
def test_list_product_releases(mock):
result = productreleases.list_product_releases()
mock.assert_called_once_with(page_size=200, sort='', q='')
assert result == 'list of all releases'
@patch('pnc_cli.productreleases.create_product_release_object')
@patch('pnc_cli.productreleases.releases_api.create_new')
@patch('pnc_cli.productreleases.productversions_api.get_specific')
def test_create_release_badversion(mock_get_specific, mock_create_new, mock_create_object):
result = productreleases.create_release(version='x.x')
assert not mock_create_object.called
assert not mock_create_new.called
assert not mock_get_specific.called
assert not result
@patch('pnc_cli.productreleases.create_product_release_object', return_value='created release')
@patch('pnc_cli.productreleases.releases_api.create_new', return_value=MagicMock(content='created release'))
@patch('pnc_cli.productreleases.productversions_api.get_specific',
return_value=MagicMock(content=MagicMock(version='1.0')))
def test_create_release(mock_get_specific, mock_create_new, mock_create_object):
result = productreleases.create_release(version='0.DR1',
release_date='2016-01-01',
download_url='https://tom.com',
product_version_id=1,
product_milestone_id=1,
support_level='EOL')
mock_get_specific.assert_called_once_with(id=1)
mock_create_new.assert_called_once_with(body='created release')
mock_create_object.assert_called_once_with(version='1.0.0.DR1',
release_date='2016-01-01',
download_url='https://tom.com',
product_version_id=1,
product_milestone_id=1,
support_level='EOL')
assert result == 'created release'
@patch('pnc_cli.productreleases.releases_api.get_all_by_product_version_id',
return_value=MagicMock(content='list of releases for version'))
def test_list_releases_for_version(mock):
result = productreleases.list_releases_for_version(1)
mock.assert_called_once_with(version_id=1)
assert result == 'list of releases for version'
@patch('pnc_cli.productreleases.releases_api.get_specific', return_value=MagicMock(content='single release'))
def test_get_release(mock):
result = productreleases.get_release(1)
mock.assert_called_once_with(id=1)
assert result == 'single release'
@patch('pnc_cli.productreleases.releases_api.get_specific',
return_value=MagicMock(content='release'))
def test_product_release_exists(mock):
result = productreleases._product_release_exists(1)
mock.assert_called_once_with(id=1)
assert result
@patch('pnc_cli.productreleases._product_release_exists', return_value=False)
@patch('pnc_cli.productreleases.releases_api.get_specific')
@patch('pnc_cli.productreleases.releases_api.update')
def test_update_release_notexist(mock_update, mock_get_specific, mock_release_exists):
result = productreleases.update_release(1)
mock_release_exists.assert_called_once_with(1)
assert not mock_get_specific.called
assert not mock_update.called
@patch('pnc_cli.productreleases._product_release_exists', return_value=True)
@patch('pnc_cli.productreleases.releases_api.get_specific')
@patch('pnc_cli.productreleases.releases_api.update', return_value=MagicMock(content='updated release'))
def test_update_release(mock_update, mock_get_specific, mock_release_exists):
mock = MagicMock()
mockcontent = MagicMock(content=mock)
mock_get_specific.return_value = mockcontent
result = productreleases.update_release(id=1, version='2.2.2.GA', support_level='EOL')
mock_release_exists.assert_called_once_with(1)
assert getattr(mock, 'version') == '2.2.2.GA'
assert getattr(mock, 'support_level') == 'EOL'
mock_update.assert_called_once_with(id=1, body=mock)
assert result == 'updated release'
| jianajavier/pnc-cli | test/unit/test_productreleases.py | Python | apache-2.0 | 4,762 | 0.0021 |
list2=['tom','jerry','mickey']
list1=['hardy','bob','minnie']
print(list1+list2)
print(list2+list1)
print(list1*3)
print(list2+['disney','nick','pogo']) | zac11/AutomateThingsWithPython | Lists/list_concat.py | Python | mit | 155 | 0.058065 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['limited']
| scalingdata/Impala | thirdparty/hive-1.2.1.2.3.0.0-2557/lib/py/thrift/reflection/__init__.py | Python | apache-2.0 | 807 | 0.001239 |
# coding: utf-8
from sqlalchemy import Column, DateTime, ForeignKey, Integer, Text, text
from sqlalchemy.orm import relationship
from Houdini.Data import Base
metadata = Base.metadata
class Ban(Base):
__tablename__ = 'ban'
PenguinID = Column(ForeignKey(u'penguin.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), primary_key=True, nullable=False)
Issued = Column(DateTime, primary_key=True, nullable=False, server_default=text("current_timestamp()"))
Expires = Column(DateTime, primary_key=True, nullable=False, server_default=text("current_timestamp()"))
ModeratorID = Column(ForeignKey(u'penguin.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), nullable=False, index=True)
Reason = Column(Integer, nullable=False)
Comment = Column(Text)
penguin = relationship(u'Penguin', primaryjoin='Ban.ModeratorID == Penguin.ID')
penguin1 = relationship(u'Penguin', primaryjoin='Ban.PenguinID == Penguin.ID') | TunnelBlanket/Houdini | Houdini/Data/Ban.py | Python | mit | 936 | 0.007479 |
#
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Tests for subunit.tag_stream."""
import unittest
from testtools.compat import StringIO
import subunit
import subunit.test_results
class TestSubUnitTags(unittest.TestCase):
def setUp(self):
self.original = StringIO()
self.filtered = StringIO()
def test_add_tag(self):
self.original.write("tags: foo\n")
self.original.write("test: test\n")
self.original.write("tags: bar -quux\n")
self.original.write("success: test\n")
self.original.seek(0)
result = subunit.tag_stream(self.original, self.filtered, ["quux"])
self.assertEqual([
"tags: quux",
"tags: foo",
"test: test",
"tags: bar",
"success: test",
],
self.filtered.getvalue().splitlines())
def test_remove_tag(self):
self.original.write("tags: foo\n")
self.original.write("test: test\n")
self.original.write("tags: bar -quux\n")
self.original.write("success: test\n")
self.original.seek(0)
result = subunit.tag_stream(self.original, self.filtered, ["-bar"])
self.assertEqual([
"tags: -bar",
"tags: foo",
"test: test",
"tags: -quux",
"success: test",
],
self.filtered.getvalue().splitlines())
def test_suite():
loader = subunit.tests.TestUtil.TestLoader()
result = loader.loadTestsFromName(__name__)
return result
| kraziegent/mysql-5.6 | xtrabackup/test/python/subunit/tests/test_subunit_tags.py | Python | gpl-2.0 | 2,267 | 0.000882 |
"""
Pre-order, in-order and post-order traversal of binary trees.
Author: Wenru Dong
"""
from typing import TypeVar, Generic, Generator, Optional
T = TypeVar("T")
class TreeNode(Generic[T]):
def __init__(self, value: T):
self.val = value
self.left = None
self.right = None
# Pre-order traversal
def pre_order(root: Optional[TreeNode[T]]) -> Generator[T, None, None]:
if root:
yield root.val
yield from pre_order(root.left)
yield from pre_order(root.right)
# In-order traversal
def in_order(root: Optional[TreeNode[T]]) -> Generator[T, None, None]:
if root:
yield from in_order(root.left)
yield root.val
yield from in_order(root.right)
# Post-order traversal
def post_order(root: Optional[TreeNode[T]]) -> Generator[T, None, None]:
if root:
yield from post_order(root.left)
yield from post_order(root.right)
yield root.val
if __name__ == "__main__":
singer = TreeNode("Taylor Swift")
genre_country = TreeNode("Country")
genre_pop = TreeNode("Pop")
album_fearless = TreeNode("Fearless")
album_red = TreeNode("Red")
album_1989 = TreeNode("1989")
album_reputation = TreeNode("Reputation")
song_ls = TreeNode("Love Story")
song_wh = TreeNode("White Horse")
song_wanegbt = TreeNode("We Are Never Ever Getting Back Together")
song_ikywt = TreeNode("I Knew You Were Trouble")
song_sio = TreeNode("Shake It Off")
song_bb = TreeNode("Bad Blood")
song_lwymmd = TreeNode("Look What You Made Me Do")
song_g = TreeNode("Gorgeous")
singer.left, singer.right = genre_country, genre_pop
genre_country.left, genre_country.right = album_fearless, album_red
genre_pop.left, genre_pop.right = album_1989, album_reputation
album_fearless.left, album_fearless.right = song_ls, song_wh
album_red.left, album_red.right = song_wanegbt, song_ikywt
album_1989.left, album_1989.right = song_sio, song_bb
album_reputation.left, album_reputation.right = song_lwymmd, song_g
print(list(pre_order(singer)))
print(list(in_order(singer)))
print(list(post_order(singer)))
| wangzheng0822/algo | python/23_binarytree/binary_tree.py | Python | apache-2.0 | 2,175 | 0.001839 |
import os
import time
import threading
import warnings
from django.conf import settings
from django.db import connections
from django.dispatch import receiver, Signal
from django.utils import timezone
from django.utils.functional import empty
template_rendered = Signal(providing_args=["template", "context"])
setting_changed = Signal(providing_args=["setting", "value", "enter"])
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
# Settings that may not work well when using 'override_settings' (#19031)
COMPLEX_OVERRIDE_SETTINGS = set(['DATABASES'])
@receiver(setting_changed)
def clear_cache_handlers(**kwargs):
if kwargs['setting'] == 'CACHES':
from django.core.cache import caches
caches._caches = threading.local()
@receiver(setting_changed)
def update_installed_apps(**kwargs):
if kwargs['setting'] == 'INSTALLED_APPS':
# Rebuild any AppDirectoriesFinder instance.
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
# Rebuild management commands cache
from django.core.management import get_commands
get_commands.cache_clear()
# Rebuild templatetags module cache.
from django.template import base as mod
mod.templatetags_modules = []
# Rebuild app_template_dirs cache.
from django.template.loaders import app_directories as mod
mod.app_template_dirs = mod.calculate_app_template_dirs()
# Rebuild translations cache.
from django.utils.translation import trans_real
trans_real._translations = {}
@receiver(setting_changed)
def update_connections_time_zone(**kwargs):
if kwargs['setting'] == 'TIME_ZONE':
# Reset process time zone
if hasattr(time, 'tzset'):
if kwargs['value']:
os.environ['TZ'] = kwargs['value']
else:
os.environ.pop('TZ', None)
time.tzset()
# Reset local time zone cache
timezone._localtime = None
# Reset the database connections' time zone
if kwargs['setting'] == 'USE_TZ' and settings.TIME_ZONE != 'UTC':
USE_TZ, TIME_ZONE = kwargs['value'], settings.TIME_ZONE
elif kwargs['setting'] == 'TIME_ZONE' and not settings.USE_TZ:
USE_TZ, TIME_ZONE = settings.USE_TZ, kwargs['value']
else:
# no need to change the database connnections' time zones
return
tz = 'UTC' if USE_TZ else TIME_ZONE
for conn in connections.all():
conn.settings_dict['TIME_ZONE'] = tz
tz_sql = conn.ops.set_time_zone_sql()
if tz_sql:
conn.cursor().execute(tz_sql, [tz])
@receiver(setting_changed)
def clear_context_processors_cache(**kwargs):
if kwargs['setting'] == 'TEMPLATE_CONTEXT_PROCESSORS':
from django.template import context
context._standard_context_processors = None
@receiver(setting_changed)
def clear_template_loaders_cache(**kwargs):
if kwargs['setting'] == 'TEMPLATE_LOADERS':
from django.template import loader
loader.template_source_loaders = None
@receiver(setting_changed)
def clear_serializers_cache(**kwargs):
if kwargs['setting'] == 'SERIALIZATION_MODULES':
from django.core import serializers
serializers._serializers = {}
@receiver(setting_changed)
def language_changed(**kwargs):
if kwargs['setting'] in {'LANGUAGES', 'LANGUAGE_CODE', 'LOCALE_PATHS'}:
from django.utils.translation import trans_real
trans_real._default = None
trans_real._active = threading.local()
if kwargs['setting'] in {'LANGUAGES', 'LOCALE_PATHS'}:
from django.utils.translation import trans_real
trans_real._translations = {}
trans_real.check_for_language.cache_clear()
@receiver(setting_changed)
def file_storage_changed(**kwargs):
if kwargs['setting'] in ('MEDIA_ROOT', 'DEFAULT_FILE_STORAGE'):
from django.core.files.storage import default_storage
default_storage._wrapped = empty
@receiver(setting_changed)
def complex_setting_changed(**kwargs):
if kwargs['enter'] and kwargs['setting'] in COMPLEX_OVERRIDE_SETTINGS:
# Considering the current implementation of the signals framework,
# stacklevel=5 shows the line containing the override_settings call.
warnings.warn("Overriding setting %s can lead to unexpected behaviour."
% kwargs['setting'], stacklevel=5)
| TimBuckley/effective_django | django/test/signals.py | Python | bsd-3-clause | 4,520 | 0 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create a ccache binary for mac hosts."""
import argparse
import os
import subprocess
import sys
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
INFRA_BOTS_DIR = os.path.realpath(os.path.join(FILE_DIR, os.pardir, os.pardir))
sys.path.insert(0, INFRA_BOTS_DIR)
import utils
URL = "https://github.com/ccache/ccache/releases/download/v3.7.7/ccache-3.7.7.tar.gz"
VERSION = "ccache-3.7.7"
def create_asset(target_dir):
# configure --prefix requires an absolute path.
target_dir = os.path.abspath(target_dir)
# Download and extract the source.
with utils.tmp_dir():
subprocess.check_call(["curl", "-L", "-o", VERSION + ".tar.gz",
"https://github.com/ccache/ccache/releases/download/v3.7.7/ccache-3.7.7.tar.gz"])
subprocess.check_call(["tar", "-xzf", VERSION + ".tar.gz"])
os.chdir(VERSION)
subprocess.check_call(["./configure", "--disable-man", "--prefix=" + target_dir])
subprocess.check_call(["make"])
subprocess.check_call(["make" ,"install"])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
| youtube/cobalt | third_party/skia_next/third_party/skia/infra/bots/assets/ccache_mac/create.py | Python | bsd-3-clause | 1,385 | 0.01083 |
# Copyright (c) 2011, 2012, Jeroen Ketema, University of Twente
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the University of Twente nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pyth.plugins.rtf15.reader import Rtf15Reader
from pyth.plugins.plaintext.writer import PlaintextWriter
from os.path import join
from StringIO import StringIO
from sys import stderr
from uuid import uuid4
from zipfile import ZipFile
destroy_event_id = str(uuid4())
class OdlExtractException(Exception):
pass
class PackageData:
def __init__(self):
self.name = None
self.ident = None
self.is_child = False
self.child_id = []
self.children = []
class TransitionData:
def __init__(self):
self.ident = None
self.source = None
self.target = None
self.event = None
self.event_id = None
self.action = None
self.guard = None
self.guard_id = None
self.class_id = None
class AssociationData:
def __init__(self):
self.owner = [None, None]
self.upper = [None, None]
self.lower = [None, None]
self.name = [None, None]
self.role = [None, None]
class StateData:
def __init__(self):
self.vtype = "uml:State"
self.name = None
self.class_id = None
self.substates = []
self.is_parallel = False
self.superstate = None
class AttributeData:
def __init__(self):
self.name = None
self.ident = None
self.default = None
self.kind = None
self.type = None
class ParameterData:
def __init__(self):
self.name = None
self.kind = None
self.type = None
class EnumeratedTypeData:
def __init__(self):
self.name = None
self.literals = []
class EnumeratedLiteralData:
def __init__(self):
self.name = None
self.ident = None
def GetVersion(data):
version = None
for data_item in data[1]:
if data_item[0] == "Version":
if version == None:
version = data_item
else:
raise OdlExtractException("Multiple versions found for item")
if version == None:
raise OdlExtractException("No version data found")
else:
return version
def GetId(data):
for item in data:
if item[0] == "Attribute" \
and item[1] == "_Art1_Id":
return item[2][0]
raise OdlExtractException("No model identifier found")
def GetModel(odl_data):
for ident in odl_data:
if odl_data[ident][0] == "_Art1_Model":
model_id = GetId(odl_data[ident][1])
name = ident.replace(' ', '_').replace('-', '_').replace('&', "and")
return (model_id, name)
raise OdlExtractException("No model name found")
def GetName(data):
version = GetVersion(data)
return version[1][6:].replace(' ', '_') \
.replace('-', '_').replace('&', "and")
def GetNamePlain(data):
version = GetVersion(data)
return version[1][6:]
def GetConstruction(data):
version = GetVersion(data)
for item in version[2]:
if item[0] == "Attribute" \
and item[1] == "_Art1_Construction":
return item[2][0]
raise OdlExtractException("Construction type not found")
def GetClasses(odl_data, used_classes):
"""Yields dictionary from class identifer to class name
"""
classes = {}
for ident in odl_data:
if odl_data[ident][0] == "_Art1_Class":
if ident in classes:
raise OdlExtractException("Class defined multiple times")
if used_classes != None and ident not in used_classes:
continue
classes[ident] = GetName(odl_data[ident])
return classes
def GetGeneralizations(version, general, ident):
for data in version[2]:
if data[0] == "Relationship" \
and data[1] == "_Art1_Class_To_Generalization" \
and data[2] == "_Art1_Generalization":
general[data[3]] = ident
return general
def GetSpecializations(version, special, ident):
special[ident] = []
for data in version[2]:
if data[0] == "Relationship" \
and data[1] == "_Art1_Class_To_Specialization" \
and data[2] == "_Art1_Specialization":
special[ident].append(data[3])
return special
def GetSpecialInGeneral(version, special_gen, ident):
for data in version[2]:
if data[0] == "Relationship" \
and data[1] == "_Art1_Generalization_To_Specialization" \
and data[2] == "_Art1_Specialization":
special_gen[data[3]] = ident
return special_gen
def GetSuperClasses(odl_data, classes):
general = {}
special = {}
for ident in classes:
version = GetVersion(odl_data[ident])
general = GetGeneralizations(version, general, ident)
special = GetSpecializations(version, special, ident)
special_gen = {}
for ident in odl_data:
if odl_data[ident][0] == "_Art1_Generalization":
version = GetVersion(odl_data[ident])
special_gen = GetSpecialInGeneral(version, special_gen, ident)
super_classes = {}
for ident in special:
super_classes[ident] = {}
for special_ident in special[ident]:
super_classes[ident][special_ident] = \
general[special_gen[special_ident]]
return super_classes
def GetAttributeIds(version, attrib_ids, ident):
attrib_ids[ident] = []
for data in version[2]:
if data[0] == "Relationship" \
and data[1] == "_Art1_Class_To_Attribute" \
and data[2] == "_Art1_Attribute":
attrib_ids[ident].append(data[3])
return attrib_ids
def IsDefaultValue(version):
for data in version[2]:
if data[0] == "Attribute" \
and data[1] == "_Art1_CustomPropertyName" \
and data[2][0] == "Default Value":
return True
return False
def GetDefaultValue(ident, odl_data, source):
version = GetVersion(odl_data[ident])
for data in version[2]:
if data[0] == "Relationship" \
and data[1] == "_Art1_ModelObject_To_CustomPropertyTextObject" \
and data[2] == "_Art1_CustomPropertyTextObject":
custom_version = GetVersion(odl_data[data[3]])
if IsDefaultValue(custom_version):
return GetExternal(custom_version, odl_data, source)
return None
def GetKindAndType(ident, odl_data):
version = GetVersion(odl_data[ident])
for data in version[2]:
if data[0] == "Relationship":
if data[1] == "_Art1_TypedAttribute_To_DataType" \
or data[1] == "_Art1_TypedParameter_To_DataType":
return (data[2], data[3])
return (None, None)
def GetAttributes(odl_data, classes, source):
"""Yields dictionary from class identifer to attribute data
"""
attrib_ids = {}
for ident in classes:
version = GetVersion(odl_data[ident])
attrib_ids = GetAttributeIds(version, attrib_ids, ident)
names = {}
for ident in odl_data:
if odl_data[ident][0] == "_Art1_Attribute":
names[ident] = GetName(odl_data[ident])
attributes = {}
for ident in attrib_ids:
attributes[ident] = []
for attrib_id in attrib_ids[ident]:
data = AttributeData()
data.name = names[attrib_id]
data.ident = attrib_id
data.default = GetDefaultValue(attrib_id, odl_data, source)
(data.kind, data.type) \
= GetKindAndType(attrib_id, odl_data)
if data.type == None:
stderr.write("Warning: attribute \"" \
+ GetNamePlain(odl_data[attrib_id]) + "\" " \
+ "of \"" + GetNamePlain(odl_data[ident]) \
+ "\" does not have type\n")
attributes[ident].append(data)
return attributes
def ParseMultiplicity(index, multiplicity, association):
if multiplicity == "*":
association.upper[index] = "*"
elif multiplicity == "1":
association.upper[index] = "1"
association.lower[index] = "1"
elif multiplicity == "2":
association.upper[index] = "2"
association.lower[index] = "2"
elif multiplicity == "0..1":
association.upper[index] = "1"
association.lower[index] = "0"
elif multiplicity == "1..*":
association.upper[index] = "*"
association.lower[index] = "1"
elif multiplicity == "":
association.upper[index] = "*"
else:
raise OdlExtractException("Unknown multiplicity " + multiplicity \
+ " found in association")
def GetAssociations(odl_data, classes):
associations = {}
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Association":
continue
association = AssociationData()
version = GetVersion(odl_data[ident])
for item in version[2]:
if item[0] == "Attribute":
if item[1] == "_Art1_EndMultiplicityUml":
ParseMultiplicity(0, item[2][0], association)
elif item[1] == "_Art1_StartMultiplicityUml":
ParseMultiplicity(1, item[2][0], association)
if association.upper[0] == None:
association.upper[0] = "1"
association.lower[0] = "0"
if association.upper[1] == None:
association.upper[1] = "*"
associations[ident] = association
roles = {}
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Role":
continue
version = GetVersion(odl_data[ident])
name = None
index = None
assoc = None
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_Role_To_Association" \
and item[2] == "_Art1_Association":
assoc = item[3]
name = GetName(odl_data[ident])
elif item[0] == "Attribute" \
and item[1] == "_Art1_AssociationEnd":
index = int(item[2][0])
if index == None:
index = 0
roles[ident] = (assoc, index)
associations[assoc].name[index] = name
associations[assoc].role[index] = ident
for ident in classes:
version = GetVersion(odl_data[ident])
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_Class_To_Role" \
and item[2] == "_Art1_Role":
associations[roles[item[3]][0]].owner[roles[item[3]][1]] = ident
associations_used = {}
for ident in associations:
if associations[ident].owner[0] in classes \
and associations[ident].owner[1] in classes:
associations_used[ident] = associations[ident]
return associations_used
def GetEvents(odl_data, used_events):
events = { destroy_event_id : "<<Destroy>>" }
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Event":
continue
if used_events != None and ident not in used_events:
continue
events[ident] = GetName(odl_data[ident])
return events
def GetParameters(odl_data):
parameters = { destroy_event_id : [] }
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Event":
continue
version = GetVersion(odl_data[ident])
parameters[ident] = []
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_Event_To_Parameter" \
and item[2] == "_Art1_Parameter":
parameter = ParameterData()
parameter.name = GetName(odl_data[item[3]])
(parameter.kind, parameter.type) \
= GetKindAndType(item[3], odl_data)
if parameter.type == None:
stderr.write("Warning: parameter \"" \
+ GetNamePlain(odl_data[item[3]]) + "\" " \
+ "of \"" + GetNamePlain(odl_data[ident]) \
+ "\" does not have type\n")
parameters[ident].append(parameter)
return parameters
def GetStates(odl_data, classes):
states = {}
for ident in odl_data:
if odl_data[ident][0] != "_Art1_State":
continue
version = GetVersion(odl_data[ident])
data = StateData()
data.name = GetName(odl_data[ident])
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_States_To_Class" \
and item[2] == "_Art1_Class":
data.class_id = item[3]
elif item[0] == "Attribute" \
and item[1] == "_Art1_StateType":
if item[2][0] == "0":
data.vtype = "uml:Pseudostate"
elif item[2][0] == "1":
data.vtype = "uml:FinalState"
elif item[0] == "Relationship" \
and item[1] == "_Art1_SuperState_To_SubStates" \
and item[2] == "_Art1_State":
data.substates.append(item[3])
states[ident] = data
for ident in states:
for substate_id in states[ident].substates:
states[substate_id].superstate = ident
for ident in states:
version = GetVersion(odl_data[ident])
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_ConcurrentStates_To_CompositeState" \
and item[2] == "_Art1_State":
states[item[3]].substates.append(ident)
states[item[3]].is_parallel = True
states[ident].superstate = item[3]
used_states = {}
for ident in states:
if states[ident].class_id in classes:
used_states[ident] = states[ident]
return used_states
def GetReplaceData(version, odl_data):
start = None
name = None
obj = None
for data in version[2]:
if data[0] == "Attribute" \
and data[1] == "_Art1_TokenStart":
start = int(data[2][0])
elif data[0] == "Attribute" \
and data[1] == "_Art1_LastNameText":
name = data[2][0]
name = name[:len(name) - 1]
elif data[0] == "Relationship" \
and data[1] == "_Art1_ModelObjectToken_To_ModelObject":
obj = GetName(odl_data[data[3]])
return (start, name, obj)
def ReplaceTextNames(external, version, odl_data, class_id):
old_external = external
replacements = {}
for data in version[2]:
if data[0] == "Relationship" \
and data[1] == "_Art1_TextObject_To_ModelObjectToken" \
and data[2] == "_Art1_ModelObjectToken":
replace = GetReplaceData(GetVersion(odl_data[data[3]]), odl_data)
replacements[replace[0]] = replace
i = len(external) - 1
while i >= 0:
if i in replacements:
length = len(replacements[i][1])
replace = replacements[i][2]
# In some sporadic cases there is no link back to a model object.
# Use the token name itself with appropriate replacements
if replace == None:
name = replacements[i][1]
stderr.write("Warning: In \"" + GetName(odl_data[class_id]) \
+ "\", token \"" + name + "\" not linked at " \
+ "position " + str(i) + " of:\n\n" \
+ old_external + "\n")
replace = name.replace(' ', '_').replace('-', '_'). \
replace('&', "and")
if external[i:i + length] == replacements[i][1]:
external = external[:i] + replace + external[i + length:]
else:
raise OdlExtractException("Cannot replace token \"" \
+ replacements[i][1] \
+ "\" at position " \
+ str(i) + " in:\n\n" \
+ old_external)
i -= 1
return external
def GetExternal(version, odl_data, source, class_id):
external = ""
for item in version[2]:
if item[0] == "Attribute" \
and item[1] == "_Art1_RTF":
if len(item[2]) == 2:
if isinstance(source, ZipFile):
data = source.open(item[2][0]).read()
else:
file_name = join(source, item[2][0])
f = open(file_name, 'rb')
data = f.read()
f.close()
data = data.replace("\x0c", "")
elif len(item[2]) == 1:
data = item[2][0]
if data == "":
return ""
f = StringIO()
f.write(data)
doc = Rtf15Reader.read(f, clean_paragraphs = False)
external = PlaintextWriter.write(doc).getvalue()
external = external.replace("\n\n", "\n")
return ReplaceTextNames(external, version, odl_data, class_id)
def GetTypeEvent(version):
for item in version[2]:
if item[0] == "Attribute" \
and item[1] == "_Art1_EventType":
return int(item[2][0])
return None
def FillTransitionDetails(odl_data, source, transitions):
for ident in odl_data:
if odl_data[ident][0] != "_Art1_EventActionBlock":
continue
trans_ident = ident
version = GetVersion(odl_data[ident])
etype = GetTypeEvent(version)
event = None
event_id = None
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_EventActionBlock_To_Transition" \
and item[2] == "_Art1_Transition":
trans_ident = item[3]
if trans_ident not in transitions:
continue
# Needed for warning handling in GetExternal
class_id = transitions[trans_ident].class_id
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_EventActionBlock_To_SignalEvent" \
and item[2] == "_Art1_Event" \
and etype == 0:
event = "signal/" + GetName(odl_data[item[3]])
event_id = item[3]
elif item[0] == "Relationship" \
and item[1] == "_Art1_EventActionBlock_To_ChangeEvent" \
and item[2] == "_Art1_ChangeEvent":
change_version = GetVersion(odl_data[item[3]])
if etype == 2:
event = "Time/" + GetExternal(change_version, odl_data, \
source, class_id)
elif etype == 3:
event = "Change/" + GetExternal(change_version, odl_data, \
source, class_id)
elif item[0] == "Relationship" \
and item[1] == "_Art1_EventActionBlock_To_GuardCondition" \
and item[2] == "_Art1_GuardCondition":
guard_version = GetVersion(odl_data[item[3]])
guard = GetExternal(guard_version, odl_data, source, \
class_id)
guard_id = item[3]
if etype == 4:
event = "Entry/"
elif etype == 5:
event = "Exit/"
elif etype == None:
event = "None"
elif etype == 7: # Special <<Destroy>> signal
event = "signal/<<Destroy>>"
event_id = destroy_event_id
action = "delete self"
elif etype == 8: # Apparently also represents the absence of a signal
event = "None"
if event == None:
raise OdlExtractException("Found unknown event type: " + str(etype))
if etype == 0 and trans_ident == ident:
event = "signal_in/" + event[7:]
if etype != 7:
action = GetExternal(version, odl_data, source, class_id)
transitions[trans_ident].action = action
transitions[trans_ident].event = event
transitions[trans_ident].event_id = event_id
transitions[trans_ident].guard = guard
transitions[trans_ident].guard_id = guard_id
return transitions
def GetTransitions(odl_data, source, states):
transitions = {}
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Transition":
continue
version = GetVersion(odl_data[ident])
data = TransitionData()
data.ident = ident
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_TransitionEnd_To_EndState" \
and item[2] == "_Art1_State":
data.target = item[3]
transitions[ident] = data
for ident in states:
version = GetVersion(odl_data[ident])
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_StartState_To_TransitionStart" \
and item[2] == "_Art1_Transition":
transitions[item[3]].source = ident
transitions[item[3]].class_id = states[ident].class_id
elif item[0] == "Relationship" \
and item[1] == "_Art1_State_To_EventActionBlock" \
and item[2] == "_Art1_EventActionBlock":
data = TransitionData()
data.ident = item[3]
data.source = ident
data.target = ident
data.class_id = states[ident].class_id
transitions[item[3]] = data
used_transitions = {}
for ident in transitions:
if transitions[ident].source != None \
and transitions[ident].target != None:
used_transitions[ident] = transitions[ident]
return FillTransitionDetails(odl_data, source, used_transitions).values()
def GetBasicTypes(odl_data):
basic_types = {}
for ident in odl_data:
if odl_data[ident][0] != "_Art1_BasicType":
continue
basic_types[ident] = GetName(odl_data[ident])
return basic_types
def GetEnumeratedTypes(odl_data):
enumerated_types = {}
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Typedef":
continue
construction = GetConstruction(odl_data[ident])
if construction != "0":
continue
version = GetVersion(odl_data[ident])
data = EnumeratedTypeData()
data.name = GetName(odl_data[ident])
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_Enumeration_To_EnumerationLiteral" \
and item[2] == "_Art1_EnumerationLiteral":
literal = EnumeratedLiteralData()
literal.name = GetName(odl_data[item[3]])
literal.ident = item[3]
data.literals.append(literal)
enumerated_types[ident] = data
return enumerated_types
def GetAliasTypes(odl_data):
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Typedef":
continue
construction = GetConstruction(odl_data[ident])
if construction != "1":
continue
name = GetName(odl_data[ident])
stderr.write("Warning: alias type \"" + name + "\" unhandled\n")
def GetSequenceTypes(odl_data):
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Typedef":
continue
construction = GetConstruction(odl_data[ident])
if construction != "2":
continue
name = GetName(odl_data[ident])
stderr.write("Warning: sequence type \"" + name + "\" unhandled\n")
def GetArrayTypes(odl_data):
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Typedef":
continue
construction = GetConstruction(odl_data[ident])
if construction != "3":
continue
name = GetName(odl_data[ident])
stderr.write("Warning: array type " + name + " unhandled\n")
def FindSubpackageOf(ident, path, odl_data):
version = GetVersion(odl_data[ident])
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_Package_To_PackageItem" \
and item[2] == "_Art1_Package" \
and GetName(odl_data[item[3]]) == path[0]:
if len(path) == 1:
return item[3]
else:
return FindSubpackageOf(item[3], path[1:], odl_data)
raise Exception("Subpackage " + path[0] + " not found")
def FindPackage(path, odl_data):
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Package":
continue
if GetName(odl_data[ident]) == path[0]:
if len(path) == 1:
return ident
else:
return FindSubpackageOf(ident, path[1:], odl_data)
raise Exception("Package " + path[0] + " not found")
def FindAllSubpackages(ident, odl_data):
version = GetVersion(odl_data[ident])
subpackages = [ident]
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_Package_To_PackageItem" \
and item[2] == "_Art1_Package":
subpackages += FindAllSubpackages(item[3], odl_data)
return subpackages
def FindClassesInPackages(packages, odl_data):
used_classes = []
for ident in packages:
version = GetVersion(odl_data[ident])
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_Package_To_PackageItem" \
and item[2] == "_Art1_Class":
used_classes.append(item[3])
return used_classes
def FindEventsInPackages(packages, odl_data):
used_events = []
for ident in packages:
version = GetVersion(odl_data[ident])
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_Package_To_PackageItem" \
and item[2] == "_Art1_Event":
used_events.append(item[3])
return used_events
def FindPackageClasses(path, odl_data):
path_list = path.replace(' ', '_').replace('-', '_').replace('&', "and") \
.rsplit('/')
ident = FindPackage(path_list, odl_data)
packages = FindAllSubpackages(ident, odl_data)
return FindClassesInPackages(packages, odl_data)
def FindPackageEvents(path, odl_data):
path_list = path.replace(' ', '_').replace('-', '_').replace('&', "and") \
.rsplit('/')
ident = FindPackage(path_list, odl_data)
packages = FindAllSubpackages(ident, odl_data)
return FindEventsInPackages(packages, odl_data)
def GetPackageHierarchy(odl_data):
packages = {}
for ident in odl_data:
if odl_data[ident][0] != "_Art1_Package":
continue
package = PackageData()
package.name = GetNamePlain(odl_data[ident])
package.ident = ident
version = GetVersion(odl_data[ident])
for item in version[2]:
if item[0] == "Relationship" \
and item[1] == "_Art1_Package_To_PackageItem" \
and item[2] == "_Art1_Package":
package.child_id.append(item[3])
packages[ident] = package
for ident in packages:
for child_id in packages[ident].child_id:
if child_id not in packages:
raise Exception("Subpackage " + child_id + " not_found")
packages[ident].children.append(packages[child_id])
packages[child_id].is_child = True
top_packages = []
for ident in packages:
if not packages[ident].is_child:
top_packages.append(packages[ident])
return top_packages
| jeroenk/artisanConvert | odl/odl_extract.py | Python | bsd-3-clause | 30,114 | 0.005678 |
"""my_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from app1 import urls as app1_urls
urlpatterns = [
path('admin/', admin.site.urls),
path('app1/', include(app1_urls)),
]
| jessamynsmith/my_project | my_project/urls.py | Python | mit | 837 | 0 |
import pygame
fonts = []
is_initialized = False
FONT_TINY = 0
FONT_SMALL = 1
FONT_BIG = 2
CENTER_X = 'center_x'
def render(text, x, y, font_id, surface, render_mode=0):
global is_initialized
global fonts
if not is_initialized:
fonts = [
pygame.font.Font('gfx/MunroSmall.ttf', 10),
pygame.font.Font('gfx/MunroSmall.ttf', 20),
pygame.font.Font('gfx/MunroSmall.ttf', 40)
]
is_initialized = True
temp_surface = fonts[font_id].render(text, 0, (255, 0, 0))
if render_mode == CENTER_X:
text_rect = temp_surface.get_rect()
text_rect.x = x - tuple(text_rect)[2]/2
text_rect.y = y
surface.blit(temp_surface, text_rect)
else:
surface.blit(temp_surface, (x, y)) | homecoded/radioberry | radio/TextRenderer.py | Python | mit | 779 | 0.002567 |
class MyClass:
def __init__(self):
print(self.a) # Haven't defined self.a yet, can't use
self.a = 5
| shweta97/pyta | examples/pylint/E0203_access_member_before_definition.py | Python | gpl-3.0 | 121 | 0 |
class Solution:
def dailyTemperatures(self, T):
ans = []
m = [None]*101
for i in range(len(T)-1, -1, -1):
x = T[i]
m[x] = i
ans.append(min([x for x in m[x+1:] if x is not None], default=i)-i)
ans.reverse()
return ans
print(Solution().dailyTemperatures([73, 74, 75, 71, 69, 72, 76, 73]))
| zuun77/givemegoogletshirts | leetcode/python/739_daily-temperatures.py | Python | apache-2.0 | 367 | 0.002725 |
#!/usr/bin/env python3
# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
numbers = []
indexes = []
total = 0
lowest = None
highest = None
while True:
try:
line = input("enter a number or Enter to finish: ")
if not line:
break
indexes.append(len(numbers))
number = int(line)
numbers.append(number)
total += number
if lowest is None or lowest > number:
lowest = number
if highest is None or highest < number:
highest = number
except ValueError as err:
print(err)
swapped = True
while swapped:
swapped = False
for index in indexes:
if index + 1 == len(numbers):
break
if numbers[index] > numbers[index + 1]:
temp = numbers[index]
numbers[index] = numbers[index + 1]
numbers[index + 1] = temp
swapped = True
if numbers:
index = int(len(numbers) / 2)
median = numbers[index]
if index and index * 2 == len(numbers):
median = (median + numbers[index - 1]) / 2
print("numbers:", numbers)
if numbers:
print("count =", len(numbers), "total =", total,
"lowest =", lowest, "highest =", highest,
"mean =", total / len(numbers), "median =", median)
| therealjumbo/python_summer | py31eg/average2_ans.py | Python | gpl-3.0 | 1,820 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-16 13:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tmv_app', '0080_auto_20180214_1234'),
]
operations = [
migrations.AddField(
model_name='dynamictopic',
name='ipcc_score',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='dynamictopic',
name='ipcc_share',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='dynamictopic',
name='share',
field=models.FloatField(null=True),
),
]
| mcallaghan/tmv | BasicBrowser/tmv_app/migrations/0081_auto_20180216_1308.py | Python | gpl-3.0 | 767 | 0 |
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from django.db.models import Model
from zerver.lib.create_user import create_user_profile, get_display_email_address
from zerver.lib.initial_password import initial_password
from zerver.lib.streams import render_stream_description
from zerver.models import Realm, RealmAuditLog, Recipient, Stream, Subscription, UserProfile
def bulk_create_users(realm: Realm,
users_raw: Set[Tuple[str, str, bool]],
bot_type: Optional[int]=None,
bot_owner: Optional[UserProfile]=None,
tos_version: Optional[str]=None,
timezone: str="") -> None:
"""
Creates and saves a UserProfile with the given email.
Has some code based off of UserManage.create_user, but doesn't .save()
"""
existing_users = frozenset(UserProfile.objects.filter(
realm=realm).values_list('email', flat=True))
users = sorted([user_raw for user_raw in users_raw if user_raw[0] not in existing_users])
# Now create user_profiles
profiles_to_create: List[UserProfile] = []
for (email, full_name, active) in users:
profile = create_user_profile(realm, email,
initial_password(email), active, bot_type,
full_name, bot_owner, False, tos_version,
timezone, tutorial_status=UserProfile.TUTORIAL_FINISHED,
enter_sends=True)
profiles_to_create.append(profile)
if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
UserProfile.objects.bulk_create(profiles_to_create)
else:
for user_profile in profiles_to_create:
user_profile.email = user_profile.delivery_email
UserProfile.objects.bulk_create(profiles_to_create)
for user_profile in profiles_to_create:
user_profile.email = get_display_email_address(user_profile, realm)
UserProfile.objects.bulk_update(profiles_to_create, ['email'])
user_ids = {user.id for user in profiles_to_create}
RealmAuditLog.objects.bulk_create(
[RealmAuditLog(realm=realm, modified_user=profile_,
event_type=RealmAuditLog.USER_CREATED, event_time=profile_.date_joined)
for profile_ in profiles_to_create])
recipients_to_create: List[Recipient] = []
for user_id in user_ids:
recipient = Recipient(type_id=user_id, type=Recipient.PERSONAL)
recipients_to_create.append(recipient)
Recipient.objects.bulk_create(recipients_to_create)
bulk_set_users_or_streams_recipient_fields(UserProfile, profiles_to_create, recipients_to_create)
recipients_by_user_id: Dict[int, Recipient] = {}
for recipient in recipients_to_create:
recipients_by_user_id[recipient.type_id] = recipient
subscriptions_to_create: List[Subscription] = []
for user_id in user_ids:
recipient = recipients_by_user_id[user_id]
subscription = Subscription(user_profile_id=user_id, recipient=recipient)
subscriptions_to_create.append(subscription)
Subscription.objects.bulk_create(subscriptions_to_create)
def bulk_set_users_or_streams_recipient_fields(model: Model,
objects: Union[Iterable[UserProfile], Iterable[Stream]],
recipients: Optional[Iterable[Recipient]]=None) -> None:
assert model in [UserProfile, Stream]
for obj in objects:
assert isinstance(obj, model)
if model == UserProfile:
recipient_type = Recipient.PERSONAL
elif model == Stream:
recipient_type = Recipient.STREAM
if recipients is None:
object_ids = [obj.id for obj in objects]
recipients = Recipient.objects.filter(type=recipient_type, type_id__in=object_ids)
objects_dict = {obj.id: obj for obj in objects}
objects_to_update = set()
for recipient in recipients:
assert recipient.type == recipient_type
result = objects_dict.get(recipient.type_id)
if result is not None:
result.recipient = recipient
objects_to_update.add(result)
model.objects.bulk_update(objects_to_update, ['recipient'])
# This is only sed in populate_db, so doesn't really need tests
def bulk_create_streams(realm: Realm,
stream_dict: Dict[str, Dict[str, Any]]) -> None: # nocoverage
existing_streams = frozenset([name.lower() for name in
Stream.objects.filter(realm=realm)
.values_list('name', flat=True)])
streams_to_create: List[Stream] = []
for name, options in stream_dict.items():
if 'history_public_to_subscribers' not in options:
options['history_public_to_subscribers'] = (
not options.get("invite_only", False) and not realm.is_zephyr_mirror_realm)
if name.lower() not in existing_streams:
streams_to_create.append(
Stream(
realm=realm,
name=name,
description=options["description"],
rendered_description=render_stream_description(options["description"]),
invite_only=options.get("invite_only", False),
stream_post_policy=options.get("stream_post_policy",
Stream.STREAM_POST_POLICY_EVERYONE),
history_public_to_subscribers=options["history_public_to_subscribers"],
is_web_public=options.get("is_web_public", False),
is_in_zephyr_realm=realm.is_zephyr_mirror_realm,
),
)
# Sort streams by name before creating them so that we can have a
# reliable ordering of `stream_id` across different python versions.
# This is required for test fixtures which contain `stream_id`. Prior
# to python 3.3 hashes were not randomized but after a security fix
# hash randomization was enabled in python 3.3 which made iteration
# of dictionaries and sets completely unpredictable. Here the order
# of elements while iterating `stream_dict` will be completely random
# for python 3.3 and later versions.
streams_to_create.sort(key=lambda x: x.name)
Stream.objects.bulk_create(streams_to_create)
recipients_to_create: List[Recipient] = []
for stream in Stream.objects.filter(realm=realm).values('id', 'name'):
if stream['name'].lower() not in existing_streams:
recipients_to_create.append(Recipient(type_id=stream['id'],
type=Recipient.STREAM))
Recipient.objects.bulk_create(recipients_to_create)
bulk_set_users_or_streams_recipient_fields(Stream, streams_to_create, recipients_to_create)
| brainwane/zulip | zerver/lib/bulk_create.py | Python | apache-2.0 | 6,975 | 0.004301 |
"""Support for building sinan, bootstraping it on a new version of erlang"""
import sys
import os
import commands
from optparse import OptionParser
class BuildError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
ERTS_VERSION = "5.6.3"
BUILD_PATH = "_build/development/apps/%s/ebin"
ERLWARE_PATH = "/usr/local/erlware"
ERLC = "erlc +debug_info "
LOCAL_APPS = [("etask", "0.5.0"),
("sinan", "0.11.0.2"),
("sinan_web_api", "0.1.0.5")]
ERLWARE_APPS = ["fconf-0.3.0.0",
"ktuo-0.4.0.1",
"crary-0.2.3",
"eunit-2.0",
"cryptographic-0.2.1",
"ewlib-0.8.2.0",
"ewrepo-0.19.0.0",
"gas-6.1.1",
"kernel-2.12.3",
"ibrowse-1.4",
"uri-0.2.0",
"sgte-0.7.1",
"gtime-0.9.4",
"asn1-1.5.2"]
def generate_local_path(app):
ebin = "_build/development/apps/%s-%s/ebin" % (app[0], app[1])
include = "_build/development/apps/%s-%s/include" % (app[0], app[1])
if not os.path.isdir(ebin):
raise BuildError(ebin + " is not a directory")
return " -pa %s -I %s " % (ebin, include)
def generate_erlware_path(path):
ebin = "%s/packages/%s/lib/%s/ebin" % (ERLWARE_PATH, ERTS_VERSION, path)
include = "%s/packages/%s/lib/%s/include" % (ERLWARE_PATH, ERTS_VERSION, path)
if not os.path.isdir(ebin):
raise BuildError(ebin + " is not a directory")
return " -pa %s -I %s " % (ebin, include)
| asceth/sinan | support/support.py | Python | mit | 1,629 | 0.003683 |
from datetime import timedelta
import pytest
from furl import furl
from api.preprint_providers.permissions import GroupHelper
from osf_tests.factories import (
ReviewActionFactory,
AuthUserFactory,
PreprintFactory,
PreprintProviderFactory,
ProjectFactory,
)
def get_actual(app, url, user=None, sort=None, expect_errors=False, **filters):
url = furl(url)
for k, v in filters.items():
url.args['filter[{}]'.format(k)] = v
if sort is not None:
url.args['sort'] = sort
url = url.url
if expect_errors:
if user is None:
res = app.get(url, expect_errors=True)
else:
res = app.get(url, auth=user.auth, expect_errors=True)
return res
actual = []
while url:
if user is None:
res = app.get(url)
else:
res = app.get(url, auth=user.auth)
actual.extend([l['id'] for l in res.json['data']])
url = res.json['links']['next']
if sort is None:
return set(actual)
return actual
@pytest.mark.django_db
class ReviewActionFilterMixin(object):
@pytest.fixture()
def url(self):
raise NotImplementedError
@pytest.fixture()
def providers(self):
return [
PreprintProviderFactory(
reviews_workflow='pre-moderation'
) for _ in range(5)]
@pytest.fixture()
def all_actions(self, providers):
actions = []
for provider in providers:
preprint = PreprintFactory(
provider=provider,
project=ProjectFactory(is_public=True)
)
for _ in range(5):
actions.append(ReviewActionFactory(target=preprint))
return actions
@pytest.fixture()
def allowed_providers(self, providers):
return providers
@pytest.fixture()
def expected_actions(self, all_actions, allowed_providers):
provider_ids = set([p.id for p in allowed_providers])
return [a for a in all_actions if a.target.provider_id in provider_ids]
@pytest.fixture()
def user(self, allowed_providers):
user = AuthUserFactory()
for provider in allowed_providers:
user.groups.add(GroupHelper(provider).get_group('moderator'))
return user
def test_filter_actions(self, app, url, user, expected_actions):
# unfiltered
expected = set([l._id for l in expected_actions])
actual = get_actual(app, url, user)
assert expected == actual
if not expected_actions:
return
action = expected_actions[0]
# filter by id
expected = set([action._id])
actual = get_actual(app, url, user, id=action._id)
assert expected == actual
# filter by trigger
expected = set(
[l._id for l in expected_actions if l.trigger == action.trigger])
actual = get_actual(app, url, user, trigger=action.trigger)
assert expected == actual
# filter by from_state
expected = set(
[l._id for l in expected_actions if l.from_state == action.from_state])
actual = get_actual(app, url, user, from_state=action.from_state)
assert expected == actual
# filter by to_state
expected = set(
[l._id for l in expected_actions if l.to_state == action.to_state])
actual = get_actual(app, url, user, to_state=action.to_state)
assert expected == actual
# filter by date_created
expected = set([l._id for l in expected_actions])
actual = get_actual(app, url, user, date_created=action.created)
assert expected == actual
expected = set()
actual = get_actual(
app, url, user,
date_created=action.created - timedelta(days=1))
assert expected == actual
# filter by date_modified
expected = set([l._id for l in expected_actions])
actual = get_actual(app, url, user, date_modified=action.modified)
assert expected == actual
expected = set()
actual = get_actual(
app, url, user,
date_modified=action.modified - timedelta(days=1))
assert expected == actual
# filter by target
expected = set(
[l._id for l in expected_actions if l.target_id == action.target_id])
actual = get_actual(app, url, user, target=action.target._id)
assert expected == actual
# filter by provider
expected = set(
[l._id for l in expected_actions if l.target.provider_id == action.target.provider_id])
actual = get_actual(
app, url, user, provider=action.target.provider._id)
assert expected == actual
@pytest.mark.django_db
class ReviewableFilterMixin(object):
@pytest.fixture()
def url(self):
raise NotImplementedError
@pytest.fixture()
def expected_reviewables(self):
raise NotImplementedError
@pytest.fixture()
def user(self):
raise NotImplementedError
def test_reviewable_filters(self, app, url, user, expected_reviewables):
# unfiltered
expected = set([r._id for r in expected_reviewables])
actual = get_actual(app, url, user)
assert expected == actual
if not expected_reviewables:
return
reviewable = expected_reviewables[0]
# filter by reviews_state
expected = set(
[r._id for r in expected_reviewables if r.machine_state == reviewable.machine_state])
actual = get_actual(
app, url, user, reviews_state=reviewable.machine_state)
assert expected == actual
# order by date_last_transitioned
expected = [
r._id for r in sorted(
expected_reviewables,
key=lambda r: r.date_last_transitioned)]
actual = get_actual(app, url, user, sort='date_last_transitioned')
assert expected == actual
expected.reverse()
actual = get_actual(app, url, user, sort='-date_last_transitioned')
assert expected == actual
@pytest.mark.django_db
class ReviewProviderFilterMixin(object):
@pytest.fixture()
def url(self):
raise NotImplementedError
@pytest.fixture()
def expected_providers(self):
return [
PreprintProviderFactory(reviews_workflow='pre-moderation'),
PreprintProviderFactory(reviews_workflow='post-moderation'),
PreprintProviderFactory(reviews_workflow='pre-moderation'),
PreprintProviderFactory(reviews_workflow=None),
]
@pytest.fixture()
def moderator_pair(self, expected_providers):
user = AuthUserFactory()
provider = expected_providers[0]
user.groups.add(GroupHelper(provider).get_group('moderator'))
return (user, provider)
@pytest.fixture()
def admin_pair(self, expected_providers):
user = AuthUserFactory()
provider = expected_providers[1]
user.groups.add(GroupHelper(provider).get_group('admin'))
return (user, provider)
def test_review_provider_filters(
self, app, url, moderator_pair, admin_pair, expected_providers):
# unfiltered
expected = set([p._id for p in expected_providers])
actual = get_actual(app, url)
assert expected == actual
provider = expected_providers[0]
# filter by reviews_workflow
expected = set(
[p._id for p in expected_providers if p.reviews_workflow == provider.reviews_workflow])
actual = get_actual(
app, url, reviews_workflow=provider.reviews_workflow)
assert expected == actual
# filter by permissions (admin)
user, provider = admin_pair
expected = set([provider._id])
actual = get_actual(app, url, user, permissions='view_actions')
assert expected == actual
actual = get_actual(app, url, user, permissions='set_up_moderation')
assert expected == actual
actual = get_actual(
app, url, user, permissions='set_up_moderation,view_actions')
assert expected == actual
# filter by permissions (moderator)
user, provider = moderator_pair
expected = set([provider._id])
actual = get_actual(app, url, user, permissions='view_actions')
assert expected == actual
actual = get_actual(
app, url, user, permissions='set_up_moderation,view_actions')
assert expected == actual
expected = set()
actual = get_actual(app, url, user, permissions='set_up_moderation')
assert expected == actual
# filter by permissions (rando)
user = AuthUserFactory()
expected = set()
actual = get_actual(app, url, user, permissions='view_actions')
assert expected == actual
actual = get_actual(app, url, user, permissions='set_up_moderation')
assert expected == actual
actual = get_actual(
app, url, user, permissions='set_up_moderation,view_actions')
assert expected == actual
# filter by permissions requires auth
res = get_actual(
app, url, expect_errors=True,
permissions='set_up_moderation')
assert res.status_code == 401
| laurenrevere/osf.io | api_tests/reviews/mixins/filter_mixins.py | Python | apache-2.0 | 9,384 | 0.001492 |
"""Constants used in Mackup."""
# Current version
VERSION = '0.8.7'
# Support platforms
PLATFORM_DARWIN = 'Darwin'
PLATFORM_LINUX = 'Linux'
# Directory containing the application configs
APPS_DIR = 'applications'
# Mackup application name
MACKUP_APP_NAME = 'mackup'
# Default Mackup backup path where it stores its files in Dropbox
MACKUP_BACKUP_PATH = 'Mackup'
# Mackup config file
MACKUP_CONFIG_FILE = '.mackup.cfg'
# Directory that can contains user defined app configs
CUSTOM_APPS_DIR = '.mackup'
# Supported engines
ENGINE_DROPBOX = 'dropbox'
ENGINE_GDRIVE = 'google_drive'
ENGINE_BOX = 'box'
ENGINE_COPY = 'copy'
ENGINE_ICLOUD = 'icloud'
ENGINE_FS = 'file_system'
| Timidger/mackup | mackup/constants.py | Python | gpl-3.0 | 677 | 0 |
import os
import re
import glob
import sys
#print(str(sys.argv[1]))
files = glob.glob(r'/disk2/octane_node160/'+str(sys.argv[1])+'/*.*')
files.sort()
#print(files)
result = []
for infile in files:
linestring = infile[28:]
f = open(infile)
file = f.read()
f.close()
m = re.search(r"Score \(version 9\): (\d*)",file)
linestring = linestring +(";")
if(m is not None):
# print("Warmup (120s): " + m.group(1))
linestring = linestring + (m.group(1))
m = re.search(r"DEOPT COUNTER\: (\d*)", file)
linestring = linestring +(";")
if(m is not None):
# print("DEOPT Counter: " + m.group(1))
linestring = linestring +(m.group(1))
result.append(linestring)
result.sort()
for b in result:
print(b);
| mohlerm/hotspot | evaluation/eval_octane.py | Python | gpl-2.0 | 763 | 0.011796 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestWebsiteSettings(unittest.TestCase):
pass
| adityahase/frappe | frappe/website/doctype/website_settings/test_website_settings.py | Python | mit | 227 | 0.008811 |
from product.models import *
from django.shortcuts import render_to_response
from django.views.generic import ListView, DetailView
from datetime import datetime
class ListDrink(ListView):
model = Drink
context_object_name = "product_list"
template_name = "product_list.html"
paginate_by = 5
class DetailDrink(DetailView):
model = Drink
context_object_name = "product"
template_name = "product.html"
class ListAppetizer(ListView):
model = Appetizer
context_object_name = "product_list"
template_name = "product_list.html"
paginate_by = 5
class DetailAppetizer(DetailView):
model = Appetizer
context_object_name = "product"
template_name = "product.html" | Elfhir/apero-imac | product/views.py | Python | mpl-2.0 | 671 | 0.026826 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Source separation algorithms attempt to extract recordings of individual
sources from a recording of a mixture of sources. Evaluation methods for
source separation compare the extracted sources from reference sources and
attempt to measure the perceptual quality of the separation.
See also the bss_eval MATLAB toolbox:
http://bass-db.gforge.inria.fr/bss_eval/
Conventions
-----------
An audio signal is expected to be in the format of a 1-dimensional array where
the entries are the samples of the audio signal. When providing a group of
estimated or reference sources, they should be provided in a 2-dimensional
array, where the first dimension corresponds to the source number and the
second corresponds to the samples.
Metrics
-------
* :func:`mir_eval.separation.bss_eval_sources`: Computes the bss_eval_sources
metrics from bss_eval, which optionally optimally match the estimated sources
to the reference sources and measure the distortion and artifacts present in
the estimated sources as well as the interference between them.
* :func:`mir_eval.separation.bss_eval_sources_framewise`: Computes the
bss_eval_sources metrics on a frame-by-frame basis.
* :func:`mir_eval.separation.bss_eval_images`: Computes the bss_eval_images
metrics from bss_eval, which includes the metrics in
:func:`mir_eval.separation.bss_eval_sources` plus the image to spatial
distortion ratio.
* :func:`mir_eval.separation.bss_eval_images_framewise`: Computes the
bss_eval_images metrics on a frame-by-frame basis.
References
----------
.. [#vincent2006performance] Emmanuel Vincent, Rémi Gribonval, and Cédric
Févotte, "Performance measurement in blind audio source separation," IEEE
Trans. on Audio, Speech and Language Processing, 14(4):1462-1469, 2006.
This code is licensed under the MIT License:
The MIT License (MIT)
Copyright (c) 2014 Colin Raffel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Please see http://craffel.github.io/mir_eval/ for more information
Addition by Pieter Appeltans on 4/4/2018: bss_eval for noisy signals
'''
import numpy as np
import scipy.fftpack
from scipy.linalg import toeplitz
from scipy.signal import fftconvolve
import collections
import itertools
import warnings
#from . import util
# The maximum allowable number of sources (prevents insane computational load)
MAX_SOURCES = 100
def filter_kwargs(_function, *args, **kwargs):
"""Given a function and args and keyword args to pass to it, call the function
but using only the keyword arguments which it accepts. This is equivalent
to redefining the function with an additional \*\*kwargs to accept slop
keyword args.
If the target function already accepts \*\*kwargs parameters, no filtering
is performed.
Parameters
----------
_function : callable
Function to call. Can take in any number of args or kwargs
"""
if has_kwargs(_function):
return _function(*args, **kwargs)
# Get the list of function arguments
func_code = six.get_function_code(_function)
function_args = func_code.co_varnames[:func_code.co_argcount]
# Construct a dict of those kwargs which appear in the function
filtered_kwargs = {}
for kwarg, value in list(kwargs.items()):
if kwarg in function_args:
filtered_kwargs[kwarg] = value
# Call the function with the supplied args and the filtered kwarg dict
return _function(*args, **filtered_kwargs)
def validate(reference_sources, estimated_sources):
"""Checks that the input data to a metric are valid, and throws helpful
errors if not.
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources
"""
if reference_sources.shape != estimated_sources.shape:
raise ValueError('The shape of estimated sources and the true '
'sources should match. reference_sources.shape '
'= {}, estimated_sources.shape '
'= {}'.format(reference_sources.shape,
estimated_sources.shape))
if reference_sources.ndim > 3 or estimated_sources.ndim > 3:
raise ValueError('The number of dimensions is too high (must be less '
'than 3). reference_sources.ndim = {}, '
'estimated_sources.ndim '
'= {}'.format(reference_sources.ndim,
estimated_sources.ndim))
if reference_sources.size == 0:
warnings.warn("reference_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(reference_sources):
raise ValueError('All the reference sources should be non-silent (not '
'all-zeros), but at least one of the reference '
'sources is all 0s, which introduces ambiguity to the'
' evaluation. (Otherwise we can add infinitely many '
'all-zero sources.)')
if estimated_sources.size == 0:
warnings.warn("estimated_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(estimated_sources):
raise ValueError('All the estimated sources should be non-silent (not '
'all-zeros), but at least one of the estimated '
'sources is all 0s. Since we require each reference '
'source to be non-silent, having a silent estimated '
'source will result in an underdetermined system.')
if (estimated_sources.shape[0] > MAX_SOURCES or
reference_sources.shape[0] > MAX_SOURCES):
raise ValueError('The supplied matrices should be of shape (nsrc,'
' nsampl) but reference_sources.shape[0] = {} and '
'estimated_sources.shape[0] = {} which is greater '
'than mir_eval.separation.MAX_SOURCES = {}. To '
'override this check, set '
'mir_eval.separation.MAX_SOURCES to a '
'larger value.'.format(reference_sources.shape[0],
estimated_sources.shape[0],
MAX_SOURCES))
def validate_extended(reference_sources, estimated_sources,noise_source):
"""Checks that the input data to a metric are valid, and throws helpful
errors if not.
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources
"""
validate(reference_sources, estimated_sources)
if noise_source.size == 0:
warnings.warn("noise_source is empty, should be of size "
"(1, nsample). sdr, sir, sar, snr, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(noise_source[np.newaxis,:]):
raise ValueError('Noise sources should be non-silent (not '
'all-zeros)')
def _any_source_silent(sources):
"""Returns true if the parameter sources has any silent first dimensions"""
return np.any(np.all(np.sum(
sources, axis=tuple(range(2, sources.ndim))) == 0, axis=1))
def bss_eval_sources_extended(reference_sources, estimated_sources,noise_source,
compute_permutation=True):
"""
Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_sources.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have same shape as
estimated_sources)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have same shape as
reference_sources)
noise_source :np.ndarray, shape = (1,nsampl)
matrix containing the noise sourse (must have same number of
samples as reference_sources)
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
snr : np.ndarray, shape=(nsrc,)
vector of Source to Noise Ratios (SNR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``[0, 1, ...,
nsrc-1]`` if ``compute_permutation`` is ``False``.
References
----------
.. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau
Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik
Lutter and Ngoc Q.K. Duong, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012.
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate_extended(reference_sources, estimated_sources,noise_source)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0 or noise_source.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = estimated_sources.shape[0]
# does user desire permutations?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
snr = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf,e_noise, e_artif = \
_bss_decomp_mtifilt_extended(reference_sources,
estimated_sources[jest],
noise_source,
jtrue, 512)
sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue],snr[jest,jtrue] = \
_bss_source_crit_extended(s_true, e_spat, e_interf,e_noise, e_artif)
# select the best ordering
perms = list(itertools.permutations(list(range(nsrc))))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], sir[idx], sar[idx],snr[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
snr = np.empty(nsrc)
for j in range(nsrc):
s_true, e_spat, e_interf,e_noise, e_artif = \
_bss_decomp_mtifilt_extended(reference_sources,
estimated_sources[j],
noise_source,
j, 512)
sdr[j], sir[j], sar[j],snr[j] = \
_bss_source_crit_extended(s_true, e_spat, e_interf,e_noise, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, sir, sar,snr, popt)
def bss_eval_sources_extended_noise(reference_sources, estimated_sources,noise_source,
compute_permutation=True):
"""
Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_sources.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,snr
... perm) = mir_eval.separation.bss_eval_sources(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have same shape as
estimated_sources)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have same shape as
reference_sources)
noise_source :np.ndarray, shape = (1,nsampl)
matrix containing the noise sourse (must have same number of
samples as reference_sources)
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
snr : np.ndarray, shape=(nsrc,)
vector of Source to Noise Ratios (SNR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``[0, 1, ...,
nsrc-1]`` if ``compute_permutation`` is ``False``.
References
----------
.. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau
Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik
Lutter and Ngoc Q.K. Duong, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012.
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
#validate_extended(reference_sources, estimated_sources,noise_source)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0 or noise_source.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = estimated_sources.shape[0]
nspk = reference_sources.shape[0]
# does user desire permutations?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nspk))
sir = np.empty((nsrc, nspk))
sar = np.empty((nsrc, nspk))
snr = np.empty((nsrc, nspk))
for jest in range(nsrc):
for jtrue in range(nspk):
s_true, e_spat, e_interf,e_noise, e_artif = \
_bss_decomp_mtifilt_extended(reference_sources,
estimated_sources[jest],
noise_source,
jtrue, 512)
sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue],snr[jest,jtrue] = \
_bss_source_crit_extended(s_true, e_spat, e_interf,e_noise, e_artif)
# select the best ordering
perms = list(itertools.permutations(list(range(nsrc))))
mean_sir = np.empty(len(perms))
dum = np.arange(nspk)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm[0:nspk], dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt[0:nspk], dum)
print sdr
print popt
return (sdr[idx], sir[idx], sar[idx],snr[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nspk)
sir = np.empty(nspk)
sar = np.empty(nspk)
snr = np.empty(nspk)
for j in range(nspk):
s_true, e_spat, e_interf,e_noise, e_artif = \
_bss_decomp_mtifilt_extended(reference_sources,
estimated_sources[j],
noise_source,
j, 512)
sdr[j], sir[j], sar[j],snr[j] = \
_bss_source_crit_extended(s_true, e_spat, e_interf,e_noise, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, sir, sar,snr, popt)
def bss_eval_sources(reference_sources, estimated_sources,
compute_permutation=True):
"""
Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_sources.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have same shape as
estimated_sources)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have same shape as
reference_sources)
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``[0, 1, ...,
nsrc-1]`` if ``compute_permutation`` is ``False``.
References
----------
.. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau
Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik
Lutter and Ngoc Q.K. Duong, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012.
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = estimated_sources.shape[0]
# does user desire permutations?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[jest],
jtrue, 512)
sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# select the best ordering
perms = list(itertools.permutations(list(range(nsrc))))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], sir[idx], sar[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
for j in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[j],
j, 512)
sdr[j], sir[j], sar[j] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, sir, sar, popt)
def bss_eval_sources_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
"""Framewise computation of bss_eval_sources
Please be aware that this function does not compute permutations (by
default) on the possible relations between reference_sources and
estimated_sources due to the dangers of a changing permutation. Therefore
(by default), it assumes that ``reference_sources[i]`` corresponds to
``estimated_sources[i]``. To enable computing permutations please set
``compute_permutation`` to be ``True`` and check that the returned ``perm``
is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of :func:`mir_eval.separation.bss_eval_sources` called on
``reference_sources`` and ``estimated_sources`` (with the
``compute_permutation`` parameter passed to
:func:`mir_eval.separation.bss_eval_sources`) is returned.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources_framewise(
reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int, optional
Window length for framewise evaluation (default value is 30s at a
sample rate of 44.1kHz)
hop : int, optional
Hop size for framewise evaluation (default value is 15s at a
sample rate of 44.1kHz)
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``range(nsrc)`` for
all windows if ``compute_permutation`` is ``False``
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the sources result
if nwin < 2:
result = bss_eval_sources(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice]
est_slice = estimated_sources[:, win_slice]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], sir[:, k], sar[:, k], perm[:, k] = bss_eval_sources(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, sir, sar, perm
def bss_eval_images(reference_sources, estimated_sources,
compute_permutation=True):
"""Implementation of the bss_eval_images function from the
BSS_EVAL Matlab toolbox.
Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
This method also provides the ISR measure.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_images.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, isr, sir, sar,
... perm) = mir_eval.separation.bss_eval_images(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing estimated sources
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
isr : np.ndarray, shape=(nsrc,)
vector of source Image to Spatial distortion Ratios (ISR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``(1,2,...,nsrc)``
if ``compute_permutation`` is ``False``.
References
----------
.. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau
Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik
Lutter and Ngoc Q.K. Duong, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012.
"""
# make sure the input has 3 dimensions
# assuming input is in shape (nsampl) or (nsrc, nsampl)
estimated_sources = np.atleast_3d(estimated_sources)
reference_sources = np.atleast_3d(reference_sources)
# we will ensure input doesn't have more than 3 dimensions in validate
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), \
np.array([]), np.array([])
# determine size parameters
nsrc = estimated_sources.shape[0]
nsampl = estimated_sources.shape[1]
nchan = estimated_sources.shape[2]
# does the user desire permutation?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
isr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt_images(
reference_sources,
np.reshape(
estimated_sources[jest],
(nsampl, nchan),
order='F'
),
jtrue,
512
)
sdr[jest, jtrue], isr[jest, jtrue], \
sir[jest, jtrue], sar[jest, jtrue] = \
_bss_image_crit(s_true, e_spat, e_interf, e_artif)
# select the best ordering
perms = list(itertools.permutations(range(nsrc)))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], isr[idx], sir[idx], sar[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
isr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
Gj = [0] * nsrc # prepare G matrics with zeroes
G = np.zeros(1)
for j in range(nsrc):
# save G matrix to avoid recomputing it every call
s_true, e_spat, e_interf, e_artif, Gj_temp, G = \
_bss_decomp_mtifilt_images(reference_sources,
np.reshape(estimated_sources[j],
(nsampl, nchan),
order='F'),
j, 512, Gj[j], G)
Gj[j] = Gj_temp
sdr[j], isr[j], sir[j], sar[j] = \
_bss_image_crit(s_true, e_spat, e_interf, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, isr, sir, sar, popt)
def bss_eval_images_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
"""Framewise computation of bss_eval_images
Please be aware that this function does not compute permutations (by
default) on the possible relations between ``reference_sources`` and
``estimated_sources`` due to the dangers of a changing permutation.
Therefore (by default), it assumes that ``reference_sources[i]``
corresponds to ``estimated_sources[i]``. To enable computing permutations
please set ``compute_permutation`` to be ``True`` and check that the
returned ``perm`` is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of ``bss_eval_sources`` called on ``reference_sources`` and
``estimated_sources`` (with the ``compute_permutation`` parameter passed to
``bss_eval_images``) is returned
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, isr, sir, sar,
... perm) = mir_eval.separation.bss_eval_images_framewise(
reference_sources,
... estimated_sources,
window,
.... hop)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int
Window length for framewise evaluation
hop : int
Hop size for framewise evaluation
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
isr : np.ndarray, shape=(nsrc, nframes)
vector of source Image to Spatial distortion Ratios (ISR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number perm[j] corresponds to
true source number j)
Note: perm will be range(nsrc) for all windows if compute_permutation
is False
"""
# make sure the input has 3 dimensions
# assuming input is in shape (nsampl) or (nsrc, nsampl)
estimated_sources = np.atleast_3d(estimated_sources)
reference_sources = np.atleast_3d(reference_sources)
# we will ensure input doesn't have more than 3 dimensions in validate
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the images result
if nwin < 2:
result = bss_eval_images(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
isr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice, :]
est_slice = estimated_sources[:, win_slice, :]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], isr[:, k], sir[:, k], sar[:, k], perm[:, k] = \
bss_eval_images(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, isr, sir, sar, perm
def _bss_decomp_mtifilt_extended(reference_sources, estimated_source,noise_source, j, flen):
"""Decomposition of an estimated source image into four components
representing respectively the true source image, spatial (or filtering)
distortion, interference and artifacts, derived from the true source
images using multichannel time-invariant filters.
"""
nsampl = estimated_source.size
# decomposition
# true source image
s_true = np.hstack((reference_sources[j], np.zeros(flen - 1)))
# spatial (or filtering) distortion
e_spat = _project(reference_sources[j, np.newaxis, :], estimated_source,
flen) - s_true
# interference
e_interf = _project(reference_sources,
estimated_source, flen) - s_true - e_spat
e_noise = _project_extended(reference_sources,estimated_source,noise_source, flen) - \
_project(reference_sources,estimated_source, flen)
# artifacts
e_artif = -s_true - e_spat - e_interf - e_noise
e_artif[:nsampl] += estimated_source
return (s_true, e_spat, e_interf,e_noise, e_artif)
def _bss_decomp_mtifilt(reference_sources, estimated_source, j, flen):
"""Decomposition of an estimated source image into four components
representing respectively the true source image, spatial (or filtering)
distortion, interference and artifacts, derived from the true source
images using multichannel time-invariant filters.
"""
nsampl = estimated_source.size
# decomposition
# true source image
s_true = np.hstack((reference_sources[j], np.zeros(flen - 1)))
# spatial (or filtering) distortion
e_spat = _project(reference_sources[j, np.newaxis, :], estimated_source,
flen) - s_true
# interference
e_interf = _project(reference_sources,
estimated_source, flen) - s_true - e_spat
# artifacts
e_artif = -s_true - e_spat - e_interf
e_artif[:nsampl] += estimated_source
return (s_true, e_spat, e_interf, e_artif)
def _bss_decomp_mtifilt_images(reference_sources, estimated_source, j, flen,
Gj=None, G=None):
"""Decomposition of an estimated source image into four components
representing respectively the true source image, spatial (or filtering)
distortion, interference and artifacts, derived from the true source
images using multichannel time-invariant filters.
Adapted version to work with multichannel sources.
Improved performance can be gained by passing Gj and G parameters initially
as all zeros. These parameters store the results from the computation of
the G matrix in _project_images and then return them for subsequent calls
to this function. This only works when not computing permuations.
"""
nsampl = np.shape(estimated_source)[0]
nchan = np.shape(estimated_source)[1]
# are we saving the Gj and G parameters?
saveg = Gj is not None and G is not None
# decomposition
# true source image
s_true = np.hstack((np.reshape(reference_sources[j],
(nsampl, nchan),
order="F").transpose(),
np.zeros((nchan, flen - 1))))
# spatial (or filtering) distortion
if saveg:
e_spat, Gj = _project_images(reference_sources[j, np.newaxis, :],
estimated_source, flen, Gj)
else:
e_spat = _project_images(reference_sources[j, np.newaxis, :],
estimated_source, flen)
e_spat = e_spat - s_true
# interference
if saveg:
e_interf, G = _project_images(reference_sources,
estimated_source, flen, G)
else:
e_interf = _project_images(reference_sources,
estimated_source, flen)
e_interf = e_interf - s_true - e_spat
# artifacts
e_artif = -s_true - e_spat - e_interf
e_artif[:, :nsampl] += estimated_source.transpose()
# return Gj and G only if they were passed in
if saveg:
return (s_true, e_spat, e_interf, e_artif, Gj, G)
else:
return (s_true, e_spat, e_interf, e_artif)
def _project_extended(reference_sources, estimated_source,noise_source, flen):
"""Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and flen-1
input:
reference_sources: original sources
estimated_source: the estimated source
noise_source: the seperated signal of the noise
"""
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nsrc, flen - 1))))
noise_source = np.hstack((noise_source,np.zeros(flen-1)))
sources = np.vstack((reference_sources,noise_source))
estimated_source = np.hstack((estimated_source, np.zeros(flen - 1)))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
G = np.zeros(((nsrc+1) * flen, (nsrc+1) * flen))
for i in range(nsrc+1):
for j in range(nsrc+1):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros((nsrc+1) * flen)
for i in range(nsrc+1):
ssef = sf[i] * np.conj(sef)
ssef = np.real(scipy.fftpack.ifft(ssef))
D[i * flen: (i+1) * flen] = np.hstack((ssef[0], ssef[-1:-flen:-1]))
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nsrc+1, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nsrc+1, order='F')
# Filtering
sproj = np.zeros(nsampl + flen - 1)
for i in range(nsrc+1):
sproj += fftconvolve(C[:, i], sources[i])[:nsampl + flen - 1]
return sproj
def _project(reference_sources, estimated_source, flen):
"""Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and flen-1
"""
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nsrc, flen - 1))))
estimated_source = np.hstack((estimated_source, np.zeros(flen - 1)))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
G = np.zeros((nsrc * flen, nsrc * flen))
for i in range(nsrc):
for j in range(nsrc):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros(nsrc * flen)
for i in range(nsrc):
ssef = sf[i] * np.conj(sef)
ssef = np.real(scipy.fftpack.ifft(ssef))
D[i * flen: (i+1) * flen] = np.hstack((ssef[0], ssef[-1:-flen:-1]))
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nsrc, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nsrc, order='F')
# Filtering
sproj = np.zeros(nsampl + flen - 1)
for i in range(nsrc):
sproj += fftconvolve(C[:, i], reference_sources[i])[:nsampl + flen - 1]
return sproj
def _project_images(reference_sources, estimated_source, flen, G=None):
"""Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and flen-1.
Passing G as all zeros will populate the G matrix and return it so it can
be passed into the next call to avoid recomputing G (this will only works
if not computing permutations).
"""
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
nchan = reference_sources.shape[2]
reference_sources = np.reshape(np.transpose(reference_sources, (2, 0, 1)),
(nchan*nsrc, nsampl), order='F')
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nchan*nsrc, flen - 1))))
estimated_source = \
np.hstack((estimated_source.transpose(), np.zeros((nchan, flen - 1))))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
if G is None:
saveg = False
G = np.zeros((nchan * nsrc * flen, nchan * nsrc * flen))
for i in range(nchan * nsrc):
for j in range(i+1):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
else: # avoid recomputing G (only works if no permutation is desired)
saveg = True # return G
if np.all(G == 0): # only compute G if passed as 0
G = np.zeros((nchan * nsrc * flen, nchan * nsrc * flen))
for i in range(nchan * nsrc):
for j in range(i+1):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros((nchan * nsrc * flen, nchan))
for k in range(nchan * nsrc):
for i in range(nchan):
ssef = sf[k] * np.conj(sef[i])
ssef = np.real(scipy.fftpack.ifft(ssef))
D[k * flen: (k+1) * flen, i] = \
np.hstack((ssef[0], ssef[-1:-flen:-1])).transpose()
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nchan*nsrc, nchan, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nchan*nsrc, nchan,
order='F')
# Filtering
sproj = np.zeros((nchan, nsampl + flen - 1))
for k in range(nchan * nsrc):
for i in range(nchan):
sproj[i] += fftconvolve(C[:, k, i].transpose(),
reference_sources[k])[:nsampl + flen - 1]
# return G only if it was passed in
if saveg:
return sproj, G
else:
return sproj
def _bss_source_crit_extended(s_true, e_spat, e_interf, e_noise, e_artif):
"""Measurement of the separation quality for a given source in terms of
filtered true source, interference, noise and artifacts.
input:
s_true + e_spat: allowed variation of target speaker
e_interf: component caused by interfering speakers
e_noise: component caused by noise
e_artif: component not caused by other speakers or noise
output:
SDR,SIR,SNR and SAR
"""
# energy ratios
s_filt = s_true + e_spat
sdr = _safe_db(np.sum(s_filt**2), np.sum((e_interf + e_noise + e_artif)**2))
sir = _safe_db(np.sum(s_filt**2), np.sum(e_interf**2))
snr = _safe_db(np.sum((s_filt+e_interf)**2), np.sum(e_noise**2))
sar = _safe_db(np.sum((s_filt + e_interf + e_noise)**2), np.sum(e_artif**2))
return (sdr, sir,snr, sar)
def _bss_source_crit(s_true, e_spat, e_interf, e_artif):
"""Measurement of the separation quality for a given source in terms of
filtered true source, interference and artifacts.
"""
# energy ratios
s_filt = s_true + e_spat
sdr = _safe_db(np.sum(s_filt**2), np.sum((e_interf + e_artif)**2))
sir = _safe_db(np.sum(s_filt**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_filt + e_interf)**2), np.sum(e_artif**2))
return (sdr, sir, sar)
def _bss_image_crit(s_true, e_spat, e_interf, e_artif):
"""Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts.
"""
# energy ratios
sdr = _safe_db(np.sum(s_true**2), np.sum((e_spat+e_interf+e_artif)**2))
isr = _safe_db(np.sum(s_true**2), np.sum(e_spat**2))
sir = _safe_db(np.sum((s_true+e_spat)**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_true+e_spat+e_interf)**2), np.sum(e_artif**2))
return (sdr, isr, sir, sar)
def _safe_db(num, den):
"""Properly handle the potential +Inf db SIR, instead of raising a
RuntimeWarning. Only denominator is checked because the numerator can never
be 0.
"""
if den == 0:
return np.Inf
return 10 * np.log10(num / den)
def evaluate(reference_sources, estimated_sources, **kwargs):
"""Compute all metrics for the given reference and estimated signals.
NOTE: This will always compute :func:`mir_eval.separation.bss_eval_images`
for any valid input and will additionally compute
:func:`mir_eval.separation.bss_eval_sources` for valid input with fewer
than 3 dimensions.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated source
>>> scores = mir_eval.separation.evaluate(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing estimated sources
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Compute all the metrics
scores = collections.OrderedDict()
sdr, isr, sir, sar, perm = filter_kwargs(
bss_eval_images,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images - Source to Distortion'] = sdr.tolist()
scores['Images - Image to Spatial'] = isr.tolist()
scores['Images - Source to Interference'] = sir.tolist()
scores['Images - Source to Artifact'] = sar.tolist()
scores['Images - Source permutation'] = perm.tolist()
sdr, isr, sir, sar, perm = filter_kwargs(
bss_eval_images_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images Frames - Source to Distortion'] = sdr.tolist()
scores['Images Frames - Image to Spatial'] = isr.tolist()
scores['Images Frames - Source to Interference'] = sir.tolist()
scores['Images Frames - Source to Artifact'] = sar.tolist()
scores['Images Frames - Source permutation'] = perm.tolist()
# Verify we can compute sources on this input
if reference_sources.ndim < 3 and estimated_sources.ndim < 3:
sdr, sir, sar, perm = filter_kwargs(
bss_eval_sources_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources Frames - Source to Distortion'] = sdr.tolist()
scores['Sources Frames - Source to Interference'] = sir.tolist()
scores['Sources Frames - Source to Artifact'] = sar.tolist()
scores['Sources Frames - Source permutation'] = perm.tolist()
sdr, sir, sar, perm = filter_kwargs(
bss_eval_sources,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources - Source to Distortion'] = sdr.tolist()
scores['Sources - Source to Interference'] = sir.tolist()
scores['Sources - Source to Artifact'] = sar.tolist()
scores['Sources - Source permutation'] = perm.tolist()
return scores
if __name__=="__main__":
# Simple demo
ts = np.linspace(0,5,10000)
srcs = np.array([np.sin(ts*600),
np.cos(357*ts+0.01)])
recons = srcs[::-1] + np.random.randn(*srcs.shape)*2
sdr, sir, sar, permut = bss_eval_sources(srcs, recons)
print("""SDR: {}
SIR: {}
SAR: {}""".format(sdr, sir, sar))
| JeroenZegers/Nabu-MSSS | nabu/postprocessing/scorers/bss_eval.py | Python | mit | 57,733 | 0.001472 |
#!/usr/bin/env python3
'''Check online DNSSEC signing module (just basic checks).'''
import dns.rdatatype
from dnstest.test import Test
from dnstest.utils import *
from dnstest.module import ModOnlineSign
t = Test(stress=False)
ModOnlineSign.check()
knot = t.server("knot")
zones = t.zone_rnd(4, dnssec=False, records=5)
t.link(zones, knot, journal_content="none")
knot.add_module(zones[0], ModOnlineSign())
knot.add_module(zones[1], ModOnlineSign("ECDSAP384SHA384", key_size="384"))
knot.dnssec(zones[2]).enable = True
knot.dnssec(zones[3]).enable = True
knot.dnssec(zones[3]).nsec3 = True
def check_zone(zone, dnskey_rdata_start):
# Check SOA record.
soa1 = knot.dig(zone.name, "SOA", dnssec=True)
soa1.check(rcode="NOERROR", flags="QR AA")
soa1.check_count(1, "RRSIG")
t.sleep(1) # Ensure different RRSIGs.
soa2 = knot.dig(zone.name, "SOA", dnssec=True)
soa2.check(rcode="NOERROR", flags="QR AA")
soa2.check_count(1, "RRSIG")
for rrset in soa1.resp.answer:
if rrset.rdtype == dns.rdatatype.SOA:
if rrset not in soa2.resp.answer:
set_err("DIFFERENT SOA")
check_log("ERROR: DIFFERENT SOA")
elif rrset.rdtype == dns.rdatatype.RRSIG:
if rrset in soa2.resp.answer:
set_err("UNCHANGED RRSIG")
check_log("ERROR: UNCHANGED RRSIG")
else:
set_err("UNEXPECTED RRSET")
check_log("ERROR: UNEXPECTED RRSET")
detail_log("%s" % rrset)
# Check DNSKEY record.
resp = knot.dig(zone.name, "DNSKEY", dnssec=True)
resp.check(rcode="NOERROR", flags="QR AA")
resp.check_count(1, "DNSKEY")
resp.check_count(1, "RRSIG")
for rrset in resp.resp.answer:
if rrset.rdtype != dns.rdatatype.DNSKEY:
continue
else:
isset(dnskey_rdata_start in rrset.to_text(), "DNSKEY ALGORITHM")
# Check NSEC record.
resp = knot.dig("nx." + zone.name, "A", dnssec=True)
resp.check(rcode="NOERROR", flags="QR AA")
resp.check_count(0, section="answer")
resp.check_count(1, "SOA", section="authority")
resp.check_count(1, "NSEC", section="authority")
resp.check_count(2, "RRSIG", section="authority")
t.start()
serial = knot.zones_wait(zones)
check_zone(zones[0], "257 3 13")
check_zone(zones[1], "257 3 14")
for z in zones:
knot.update_zonefile(z, random=True)
knot.reload()
knot.zones_wait(zones, serial)
t.end()
| CZ-NIC/knot | tests-extra/tests/modules/onlinesign/test.py | Python | gpl-3.0 | 2,463 | 0.001624 |
# -*- coding: utf-8 -*-
__author__ = 'Ostico <ostico@gmail.com>'
import unittest
import os
os.environ['DEBUG'] = "1"
os.environ['DEBUG_VERBOSE'] = "0"
import pyorient
class CommandTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CommandTestCase, self).__init__(*args, **kwargs)
self.client = None
self.cluster_info = None
self.class_id1 = None
def setUp(self):
self.client = pyorient.OrientDB("localhost", 2424)
self.client.connect("root", "root")
db_name = "test_tr"
try:
self.client.db_drop(db_name)
except pyorient.PyOrientStorageException as e:
print(e)
finally:
db = self.client.db_create(db_name, pyorient.DB_TYPE_GRAPH,
pyorient.STORAGE_TYPE_MEMORY)
pass
self.cluster_info = self.client.db_open(
db_name, "root", "root", pyorient.DB_TYPE_GRAPH, ""
)
self.class_id1 = \
self.client.command("create class my_v_class extends V")[0]
def test_boolean(self):
rec = self.client.command('create vertex v content {"abcdef":false,'
'"qwerty":TRUE}')
assert rec[0].abcdef is not True, "abcdef expected False: '%s'" % rec[
0].abcdef
assert rec[0].qwerty is True, "qwerty expected True: '%s'" % rec[
0].qwerty
rec_value = self.client.query('select from v')
assert rec_value[0].abcdef is not True, "abcdef expected False: '%s'" % \
rec_value[0].abcdef
assert rec_value[0].qwerty is True, "qwerty expected True: '%s'" % \
rec_value[0].qwerty
def test_record_create_nonstrings(self):
# this should succeed with no exception
self.client.record_create(self.class_id1, {'@my_v_class': {'a': 1.5, 'b': 'foo'}})
def test_record_create_embedded_list(self):
# this should succeed with no exception
self.client.record_create(self.class_id1, {'@my_v_class': {'a': ['bar', 'bar']}})
def test_record_create_embedded_dictionary(self):
# this should succeed with no exception
self.client.record_create(self.class_id1, {'@my_v_class': {'a': [{'bar': 'bar'}]}})
def test_new_orient_dict(self):
rec = self.client.command('create vertex v content {"a":false,'
'"q":TRUE}')
assert rec[0].a is False
assert rec[0].q is True
import re
# this can differ from orientDB versions, so i use a regular expression
assert re.match('[0-1]', str(rec[0]._version))
assert rec[0]._rid == '#10:0'
rec = {'a': 1, 'b': 2, 'c': 3}
rec_position = self.client.record_create(3, rec)
assert rec_position.a == 1
assert rec_position.b == 2
assert rec_position.c == 3
# this can differ from orientDB versions, so i use a regular expression
assert re.match('[0-1]', str(rec_position._version))
assert rec_position._rid == '#3:0'
res = self.client.query("select from " + rec_position._rid)
assert res[0].a == 1
assert res[0].b == 2
assert res[0].c == 3
# this can differ from orientDB versions, so i use a regular expression
assert re.match('[0-1]', str(res[0]._version))
assert res[0]._rid == '#3:0'
print(res[0].oRecordData['a'])
def test_embedded_map(self):
res = self.client.command(
'create vertex v content {"a":1,"b":{"d":"e"},"c":3}'
)
# print(res[0])
res = self.client.command(
'create vertex v content {"a":1,"b":{},"c":3}'
)
# print(res[0])
# print(res[0].oRecordData['b'])
assert res[0].oRecordData['b'] == {}, "Failed to asert that received " + \
res[0].oRecordData['b'] + " equals '{}"
res = self.client.command('create vertex v content {"a":1,"b":{}}')
# print(res[0])
assert res[0].oRecordData['b'] == {}, "Failed to asert that received " \
"" + res[0].oRecordData['b'] + \
" equals '{}"
res = self.client.command(
'create vertex v content {"b":{},"a":1,"d":{}}'
)
# print(res[0])
assert res[0].oRecordData['b'] == {}, "Failed to asert that received " \
"" + res[0].oRecordData['b'] + \
" equals '{}"
assert res[0].oRecordData['d'] == {}, "Failed to asert that received " \
"" + res[0].oRecordData['d'] + \
" equals '{}"
def test_nested_objects_1(self):
res = self.client.command(
'create vertex v content {"b":[[1]],"a":{},"d":[12],"c":["x"]}'
)
print(res[0])
def test_nested_objects_2(self):
res = self.client.command(
'create vertex v content {"b":[[1,"abc"]]}'
)
print(res[0])
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1] == "abc"
def test_nested_objects_3(self):
res = self.client.command(
'create vertex v content {"b":[[1,{"abc":2}]]}'
)
print(res[0])
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1]['abc'] == 2
def test_nested_objects_4(self):
res = self.client.command(
'create vertex v content {"b":[[1,{"abc":2}],[3,{"cde":4}]]}'
)
print(res[0])
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1]['abc'] == 2
assert res[0].oRecordData['b'][1][0] == 3
assert res[0].oRecordData['b'][1][1]['cde'] == 4
def test_nested_objects_5(self):
res = self.client.command(
'create vertex v content '
'{"b":[[1,{"dx":[1,2]},"abc"]],"a":{},"d":[12],"c":["x"],"s":111}'
)
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1]['dx'][0] == 1
assert res[0].oRecordData['b'][0][1]['dx'][1] == 2
assert res[0].oRecordData['b'][0][2] == "abc"
assert res[0].oRecordData['a'] == {}
assert res[0].oRecordData['d'][0] == 12
assert res[0].oRecordData['c'][0] == "x"
assert res[0].oRecordData['s'] == 111
print(res[0])
def test_nested_objects_6(self):
res = self.client.command(
'create vertex v content '
'{"b":[[1,2,"abc"]]}'
)
assert res[0].oRecordData['b'][0][0] == 1
assert res[0].oRecordData['b'][0][1] == 2
assert res[0].oRecordData['b'][0][2] == "abc"
print(res[0])
def test_nested_objects_7(self):
res = self.client.command(
'create vertex v content '
'{"b":[{"xx":{"xxx":[1,2,"abc"]}}]}'
)
assert isinstance(res[0].oRecordData['b'], list)
assert isinstance(res[0].oRecordData['b'][0], dict)
assert isinstance(res[0].oRecordData['b'][0]['xx'], dict)
assert isinstance(res[0].oRecordData['b'][0]['xx']['xxx'], list)
assert res[0].oRecordData['b'][0]['xx']['xxx'][0] == 1
assert res[0].oRecordData['b'][0]['xx']['xxx'][1] == 2
assert res[0].oRecordData['b'][0]['xx']['xxx'][2] == "abc"
print(res[0])
def test_nested_objects_8(self):
res = self.client.command(
'create vertex v content '
'{"b":[{"xx":{"xxx":[1,2,"abc"]}}],"c":[{"yy":{"yyy":[3,4,"cde"]}}]}'
)
assert isinstance(res[0].oRecordData['b'], list)
assert isinstance(res[0].oRecordData['b'][0], dict)
assert isinstance(res[0].oRecordData['b'][0]['xx'], dict)
assert isinstance(res[0].oRecordData['b'][0]['xx']['xxx'], list)
assert res[0].oRecordData['b'][0]['xx']['xxx'][0] == 1
assert res[0].oRecordData['b'][0]['xx']['xxx'][1] == 2
assert res[0].oRecordData['b'][0]['xx']['xxx'][2] == "abc"
assert isinstance(res[0].oRecordData['c'], list)
assert isinstance(res[0].oRecordData['c'][0], dict)
assert isinstance(res[0].oRecordData['c'][0]['yy'], dict)
assert isinstance(res[0].oRecordData['c'][0]['yy']['yyy'], list)
assert res[0].oRecordData['c'][0]['yy']['yyy'][0] == 3
assert res[0].oRecordData['c'][0]['yy']['yyy'][1] == 4
assert res[0].oRecordData['c'][0]['yy']['yyy'][2] == "cde"
print(res[0])
def test_nested_objects_9(self):
res = self.client.command(
'create vertex v content '
'{"a":[[1,2],[3,4],[5,6],null]}'
)
assert isinstance(res[0].oRecordData['a'], list)
assert isinstance(res[0].oRecordData['a'][0], list)
assert isinstance(res[0].oRecordData['a'][1], list)
assert isinstance(res[0].oRecordData['a'][2], list)
assert res[0].oRecordData['a'][0][0] == 1
assert res[0].oRecordData['a'][0][1] == 2
print(res[0])
def test_nested_objects_10(self):
res = self.client.command(
'create vertex v content '
'{"embedded_map":{"one":[1,2]}}'
)
assert isinstance(res[0].oRecordData['embedded_map'], dict)
assert isinstance(res[0].oRecordData['embedded_map']['one'], list)
assert res[0].oRecordData['embedded_map']['one'][0] == 1
assert res[0].oRecordData['embedded_map']['one'][1] == 2
print(res[0])
def test_nested_objects_11(self):
res = self.client.command(
'create vertex v content '
'{"embedded_map":{"one":{"three":4}}}'
)
assert isinstance(res[0].oRecordData['embedded_map'], dict)
assert isinstance(res[0].oRecordData['embedded_map']['one'], dict)
assert res[0].oRecordData['embedded_map']['one']["three"] == 4
print(res[0])
def test_nested_objects_12(self):
res = self.client.command(
'create vertex v content '
'{"embedded_map":{"one":2}}'
)
assert isinstance(res[0].oRecordData['embedded_map'], dict)
assert res[0].oRecordData['embedded_map']['one'] == 2
print(res[0])
def test_nested_objects_13(self):
res = self.client.command(
'create vertex v content '
'{"a":1,"b":{},"c":3}'
)
assert res[0].oRecordData['a'] == 1
assert isinstance(res[0].oRecordData['b'], dict)
assert len(res[0].oRecordData['b']) == 0
assert res[0].oRecordData['c'] == 3
print(res[0])
def test_quotes(self):
import json
test_data = {'scenario': 'a "quote" follows'}
record = self.client.command("CREATE VERTEX V CONTENT " +
json.dumps(test_data))[0]
assert record._rid == '#10:0'
assert record.oRecordData['scenario'] == 'a "quote" follows'
def test_db_list(self):
self.client.connect("root", "root")
databases = self.client.db_list()
assert databases.oRecordData['databases']['GratefulDeadConcerts']
def test_datetime(self):
x = self.client.query(
"SELECT DATE('2015-01-02 03:04:05')"
)
x = x[0].oRecordData
import datetime
assert 'DATE' in x
assert isinstance(x['DATE'], datetime.datetime)
assert str(x['DATE']) == '2015-01-02 03:04:05'
def test_deserialize_numeric_types(self):
lon1 = self.client.command(
"CREATE VERTEX V CONTENT {'longitude': 1.1}")[0].longitude
lon2 = self.client.command(
"CREATE VERTEX V CONTENT {'longitude': -1.1}")[0].longitude
lon3 = self.client.command(
"CREATE VERTEX V CONTENT {'longNum': 5356336298435356336}"
)[0].longNum
lon4 = self.client.command(
"CREATE VERTEX V CONTENT {'sciNum': 6.022E23}"
)[0].sciNum
lon5 = self.client.command(
"CREATE VERTEX V CONTENT {'sciNum': 6.022E-23}"
)[0].sciNum
assert isinstance(lon1, float), \
"type(lon1) is not equal to 'float': %r" % type(lon1)
assert isinstance(lon2, float), \
"type(lon2) is not equal to 'float': %r" % type(lon2)
assert isinstance(lon4, float), \
"type(lon4) is not equal to 'float': %r" % type(lon4)
assert isinstance(lon5, float), \
"type(lon5) is not equal to 'float': %r" % type(lon5)
assert isinstance(lon3, int), \
"type(lon3) is not equal to 'int': %r" \
% type(lon3)
| orientechnologies/pyorient | tests/test_record_contents.py | Python | apache-2.0 | 12,906 | 0.000852 |
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
import collections
from astroid import nodes
class ASTWalker:
def __init__(self, linter):
# callbacks per node types
self.nbstatements = 0
self.visit_events = collections.defaultdict(list)
self.leave_events = collections.defaultdict(list)
self.linter = linter
self.exception_msg = False
def _is_method_enabled(self, method):
if not hasattr(method, "checks_msgs"):
return True
for msg_desc in method.checks_msgs:
if self.linter.is_message_enabled(msg_desc):
return True
return False
def add_checker(self, checker):
"""walk to the checker's dir and collect visit and leave methods"""
vcids = set()
lcids = set()
visits = self.visit_events
leaves = self.leave_events
for member in dir(checker):
cid = member[6:]
if cid == "default":
continue
if member.startswith("visit_"):
v_meth = getattr(checker, member)
# don't use visit_methods with no activated message:
if self._is_method_enabled(v_meth):
visits[cid].append(v_meth)
vcids.add(cid)
elif member.startswith("leave_"):
l_meth = getattr(checker, member)
# don't use leave_methods with no activated message:
if self._is_method_enabled(l_meth):
leaves[cid].append(l_meth)
lcids.add(cid)
visit_default = getattr(checker, "visit_default", None)
if visit_default:
for cls in nodes.ALL_NODE_CLASSES:
cid = cls.__name__.lower()
if cid not in vcids:
visits[cid].append(visit_default)
# for now we have no "leave_default" method in Pylint
def walk(self, astroid):
"""call visit events of astroid checkers for the given node, recurse on
its children, then leave events.
"""
cid = astroid.__class__.__name__.lower()
# Detect if the node is a new name for a deprecated alias.
# In this case, favour the methods for the deprecated
# alias if any, in order to maintain backwards
# compatibility.
visit_events = self.visit_events.get(cid, ())
leave_events = self.leave_events.get(cid, ())
try:
if astroid.is_statement:
self.nbstatements += 1
# generate events for this node on each checker
for callback in visit_events or ():
callback(astroid)
# recurse on children
for child in astroid.get_children():
self.walk(child)
for callback in leave_events or ():
callback(astroid)
except Exception:
if self.exception_msg is False:
file = getattr(astroid.root(), "file", None)
print(f"Exception on node {repr(astroid)} in file '{file}'")
self.exception_msg = True
raise
| ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/pylint/utils/ast_walker.py | Python | mit | 3,263 | 0.000306 |
import datetime, re
from mod_helper import *
debug = True
def sesamstrasseShow():
mediaList = ObjectContainer(no_cache=True)
if debug == True: Log("Running sesamstrasseShow()...")
try:
urlMain = "http://www.sesamstrasse.de"
content = getURL(urlMain+"/home/homepage1077.html")
spl = content.split('<div class="thumb">')
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
match = re.compile('href="(.+?)"', re.DOTALL).findall(entry)
url = urlMain+match[0]
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumb = urlMain+match[0]
thumb = thumb[:thumb.find("_")]+"_v-original.jpg"
match = re.compile('<div class="subline">(.+?) \\| (.+?):', re.DOTALL).findall(entry)
date = ""
duration = ""
if match:
date = match[0][0]
date = date[:date.rfind('.')].strip()
duration = match[0][1]
title = date+" - "+title
item = {
'title':title,
'url':url,
'thumb':thumb,
'duration':int(duration)*60000
}
if debug == True: Log("Adding: " + title)
vo = sesamstrasseCreateVideoObject(item)
mediaList.add(vo)
return mediaList
except Exception as e:
if debug == True: Log("ERROR: " + str(e))
def sesamstrasseCreateVideoObject(item, container = False):
if debug == True: Log("Running sesamstrasseCreateVideoObject()...")
if debug == True: Log("Creating VideoObject: " + str(item))
try:
vo = VideoClipObject(
key = Callback(sesamstrasseCreateVideoObject, item = item, container = True),
title = item['title'],
thumb = item['thumb'],
duration = item['duration'],
rating_key = item['url'],
items = []
)
# Lookup URL and create MediaObject.
mo = MediaObject(parts = [PartObject(key = Callback(sesamstrasseGetStreamingUrl, url = item['url']))])
# Append mediaobject to clipobject.
vo.items.append(mo)
if container:
return ObjectContainer(objects = [vo])
else:
return vo
except Exception as e:
if debug == True: Log("ERROR: " + str(e))
def sesamstrasseGetStreamingUrl(url):
if debug == True: Log("Running sesamstrasseGetStreamingUrl()...")
try:
quality = 'hd'
if ',sesamstrasse' in url:
regex_suffix_id = ',sesamstrasse(.+?).html'
try: suffix_id = re.findall(regex_suffix_id, url)[0]
except: suffix_id = '3000'
else: suffix_id = '3000'
content = getURL(url)
json_uuid = re.findall('player_image-(.+?)_', content)[0]
json_url = 'http://www.sesamstrasse.de/sendungsinfos/sesamstrasse%s-ppjson_image-%s.json' % (suffix_id, json_uuid)
json = getURL(json_url)
regex_qualities = '\.,(.+?),\.'
qualities = re.findall(regex_qualities, json)[-1].split(',')
if not (quality in qualities): quality = qualities[-1]
regex_url = '"src": "http://(.+?)"'
urls = re.findall(regex_url, json)
stream_url = ''
for url in urls:
if url.endswith('.mp4'):
stream_url = 'http://' + url[:-6] + quality + '.mp4'
break
if not stream_url: return
if debug == True: Log("Playing video URL: " + stream_url)
return Redirect(stream_url)
except Exception as e:
if debug == True: Log("ERROR: " + str(e))
| realriot/KinderThek.bundle | Contents/Code/mod_sesamstrasse.py | Python | bsd-3-clause | 3,151 | 0.047287 |
import os
from pymco.test import ctxt
from . import base
class RabbitMQTestCase(base.IntegrationTestCase):
'''RabbitMQ integration test case.'''
CTXT = {
'connector': 'rabbitmq',
'plugin.rabbitmq.vhost': '/mcollective',
'plugin.rabbitmq.pool.size': '1',
'plugin.rabbitmq.pool.1.host': 'localhost',
'plugin.rabbitmq.pool.1.port': '61613',
'plugin.rabbitmq.pool.1.user': 'mcollective',
'plugin.rabbitmq.pool.1.password': 'marionette',
}
class TestWithRabbitMQMCo22x(base.MCollective22x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQMCo23x(base.MCollective23x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQMCo24x(base.MCollective24x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQSSLMCo23x(base.MCollective23x, RabbitMQTestCase):
"""MCollective integration test case."""
CTXT = {
'connector': 'rabbitmq',
'plugin.rabbitmq.vhost': '/mcollective',
'plugin.rabbitmq.pool.size': '1',
'plugin.rabbitmq.pool.1.host': 'localhost',
'plugin.rabbitmq.pool.1.port': 61612,
'plugin.rabbitmq.pool.1.user': 'mcollective',
'plugin.rabbitmq.pool.1.password': 'marionette',
'plugin.rabbitmq.pool.1.ssl': 'true',
'plugin.rabbitmq.pool.1.ssl.ca': os.path.join(ctxt.ROOT,
'fixtures/ca.pem'),
'plugin.rabbitmq.pool.1.ssl.key': os.path.join(
ctxt.ROOT,
'fixtures/activemq_private.pem'),
'plugin.rabbitmq.pool.1.ssl.cert': os.path.join(
ctxt.ROOT,
'fixtures/activemq_cert.pem',
),
}
| rafaduran/python-mcollective | tests/integration/test_with_rabbitmq.py | Python | bsd-3-clause | 1,771 | 0 |
# -*- coding: utf-8 -
#
# This file is part of socketpool.
# See the NOTICE for more information.
import eventlet
from eventlet.green import select
from eventlet.green import socket
from eventlet import queue
from socketpool.pool import ConnectionPool
sleep = eventlet.sleep
Socket = socket.socket
Select = select.select
Semaphore = eventlet.semaphore.BoundedSemaphore
class PriorityQueue(queue.PriorityQueue):
def __iter__(self):
return self
def next(self):
try:
result = self.get(block=False)
except queue.Empty:
raise StopIteration
return result
class ConnectionReaper(object):
running = False
def __init__(self, pool, delay=150):
self.pool = pool
self.delay = delay
def start(self):
self.running = True
g = eventlet.spawn(self._exec)
g.link(self._exit)
def _exit(self, g):
try:
g.wait()
except:
pass
self.running = False
def _exec(self):
while True:
eventlet.sleep(self.delay)
self.pool.murder_connections()
def ensure_started(self):
if not self.running:
self.start()
| emidln/django_roa | env/lib/python2.7/site-packages/socketpool/backend_eventlet.py | Python | bsd-3-clause | 1,213 | 0.002473 |
from __future__ import print_function
from __future__ import division
from builtins import input
import subprocess
from time import sleep
from pivotpi import *
try:
import wx
except ImportError:
raise ImportError,"The wxPython module is required to run this program"
total_servos = 8
horizontal_spacer = 20
vertical_spacer = 30
total_ids_per_line = 5
degree_str = " Deg"
class PivotControlApp(wx.App):
def OnInit(self):
self.frame = BoxSizerFrame(None, title="PivotPi Control")
self.frame.Show()
return True
class BoxSizerFrame(wx.Frame):
def __init__(self, *args, **kwargs):
super(BoxSizerFrame, self).__init__(*args,**kwargs)
self.panel = BoxSizerPanel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetInitialSize()
for i in range(total_servos):
self.Bind(wx.EVT_BUTTON, self.OnExit, self.panel.exit_btn)
self.Bind(wx.EVT_BUTTON, self.OnCode, self.panel.code_btn)
self.panel.slider[i].Bind(wx.EVT_LEFT_UP, self.on_left_click)
self.panel.slider[i].Bind(wx.EVT_SCROLL_THUMBTRACK, self.on_slide)
self.Bind(wx.EVT_CHECKBOX, self.OnLED, self.panel.led[i])
self.panel.txt[i].Bind(wx.EVT_CHAR, self.OnText, self.panel.txt[i])
self.Centre()
def OnExit(self, event):
exit()
def OnCode(self, event):
pivotpi_path_cmd = "pcmanfm /home/pi/Dexter/PivotPi"
subprocess.Popen(pivotpi_path_cmd, shell=True)
def on_left_click(self, event):
event_id = event.GetId()
event_obj = event.GetEventObject()
position = event_obj.GetValue()
servo_angle = int(event_obj.GetValue())
servo_id = int(event_id)//total_ids_per_line
print ("Setting Pivot {} to {}".format(servo_id+1, servo_angle))
p.angle(servo_id, servo_angle )
event.Skip()
def on_slide(self, event):
event_id = event.GetId()
event_obj = event.GetEventObject()
servo_id = int(event_id)//total_ids_per_line
position = event_obj.GetValue()
self.panel.txt[servo_id].SetValue(str(position))
event.Skip()
def OnText(self, event):
event_id = event.GetId()
event_obj = event.GetEventObject()
servo_id = int(event_id)//total_ids_per_line
key_code = (event.GetKeyCode())
if key_code == 13 or key_code == 9: # ENTER KEY or TAB
try:
# the try may fail on getting a servo_angle
# when the field is empty or not an int
servo_angle = int(event_obj.GetValue())
print ("Setting Pivot {} to {}".format(servo_id+1, servo_angle))
p.angle(servo_id, servo_angle )
self.panel.slider[servo_id].SetValue(servo_angle)
except:
pass
self.panel.txt[(servo_id+1)].SetFocus()
event.Skip()
def OnLED(self, event):
led_id = int(event.GetId()//total_ids_per_line)
led_status = event.GetEventObject().GetValue()
print("Setting LED {} to {}".format(led_id+1, led_status*254))
p.led(led_id,led_status*254)
class BoxSizerPanel(wx.Panel):
def __init__(self, *args, **kwargs):
super(BoxSizerPanel, self).__init__(*args, **kwargs)
self.txt = []
self.fields = []
self.field_lbl = []
self.servo = []
self.slider = []
self.led = []
self._DoLayout()
def _DoLayout(self):
self.vsizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, -1,
label="PivotPi Control Panel", style=wx.ALIGN_CENTRE)
title.SetFont(wx.Font( 20,
wx.FONTSTYLE_NORMAL,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD))
title_sizer = wx.BoxSizer(wx.HORIZONTAL)
bitmap = wx.Image("PivotPiIcon.jpg",wx.BITMAP_TYPE_ANY).ConvertToBitmap()
title_icon = wx.StaticBitmap(self, -1, bitmap)
title_sizer.Add(title, 1, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND, 20)
title_sizer.Add(title_icon,0,wx.ALIGN_CENTER_VERTICAL|wx.LEFT,40)
self.vsizer.AddSpacer(20)
self.vsizer.Add(title_sizer, 1, wx.ALIGN_CENTER_HORIZONTAL, 20)
for i in range(total_servos):
self.fields.append(wx.BoxSizer(wx.HORIZONTAL))
txt = wx.StaticText(self, label="Servo/Pivot {}:".format(i+1))
txt.SetFont(wx.Font( 14,
wx.FONTSTYLE_NORMAL,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD))
self.servo.append(txt)
self.fields[i].AddSpacer(horizontal_spacer)
self.fields[i].Add(self.servo[i])
self.slider.append(wx.Slider(self, id=i*total_ids_per_line, minValue=0, maxValue=180, size=(180,20)))
self.fields[i].AddSpacer(horizontal_spacer)
self.fields[i].Add(self.slider[i])
self.field_lbl.append(wx.StaticText(self, label="target angle:"))
self.txt.append(wx.TextCtrl(self, id=i*total_ids_per_line+3))
self.fields[i].AddSpacer(horizontal_spacer)
self.fields[i].Add(self.field_lbl[i])
self.fields[i].AddSpacer(5)
self.fields[i].Add(self.txt[i])
self.fields[i].AddSpacer(horizontal_spacer)
self.led.append(wx.CheckBox(self, id=i*total_ids_per_line+4, label="LED {}".format(i+1)))
self.fields[i].Add(self.led[i])
self.fields[i].AddSpacer(horizontal_spacer)
self.vsizer.AddSpacer(vertical_spacer)
for i in range(total_servos):
self.vsizer.Add(self.fields[i])
self.vsizer.AddSpacer(10)
self.vsizer.AddSpacer(vertical_spacer-10)
exit_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.exit_btn = wx.Button(self, label="Exit")
self.exit_txt = wx.StaticText(self, label=" ")
self.code_btn = wx.Button(self, label="Go to PivotPi Code Folder")
self.exit_btn.SetBackgroundColour("White")
self.code_btn.SetBackgroundColour("White")
exit_sizer.Add(self.exit_txt, 1, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 100)
exit_sizer.Add(self.code_btn, 0, wx.ALIGN_CENTER_VERTICAL|wx.CENTER, 60)
exit_sizer.Add(self.exit_txt, 1, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.LEFT, 100)
exit_sizer.Add(self.exit_btn, 0, wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, 10)
self.vsizer.Add(exit_sizer, 1, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT|wx.BOTTOM, 10)
self.SetSizer(self.vsizer)
if __name__ == "__main__":
try:
p = PivotPi()
app = PivotControlApp(False)
except:
class NoPivot(wx.App):
def OnInit(self):
dlg = wx.MessageBox("Unfortunately no PivotPi is Detected\nhttp://DexterIndustries.com/pivotpi for more details",
"ERROR", wx.ICON_WARNING)
return True
app = NoPivot(False)
app.MainLoop()
| karan259/PivotPi | Software/Python/Control_Panel/pivot_control_with_sliders.py | Python | mit | 7,313 | 0.008615 |
import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
ssh.connect('127.0.0.1', username='xxxxxxx',
password='xxxxxxxxxxx')
stdin, stdout, stderr = ssh.exec_command("uptime")
type(stdin)
stdout.readlines()
| eldie1984/Scripts | pg/para.py | Python | gpl-2.0 | 262 | 0.007634 |
# Moothedata command.
import click
import fiona as fio
from fiona.fio.cli import cli
from fio_metasay import moothedata
@cli.command(short_help="Cowsay some dataset metadata.")
@click.argument(
'inputfile',
type=click.Path(resolve_path=True),
required=True,
metavar="INPUT")
@click.option('--item', default=None, help="Select a metadata item.")
@click.pass_context
def metasay(ctx, inputfile, item):
"""Moo some dataset metadata to stdout."""
with fio.open(inputfile) as src:
meta = src.meta
click.echo(moothedata(meta, key=item))
| geowurster/fio-plugin-example | fio_metasay/scripts/cli.py | Python | mit | 575 | 0 |
"""
Powers of Three: Given a positive integer N, return the largest integer k such
that 3**k < N.
For example,
>>> largestPower(3)
0
>>> largestPower(4)
1
>>> largestPower(28)
3
>>> largestPower(80)
3
>>> largestPower(82)
4
>>> largestPower(20700)
9
>>> largestPower(10**7)
14
>>> largestPower(10**8)
16
"""
from math import ceil, log
def largestPower_v1(n):
k = -1
while 3**k < n:
if 3**(k+1) >= n:
break
k += 1
return k
def largestPower_v2(n):
return int(ceil(log(n, 3)) - 1)
if __name__ == "__main__":
import doctest
import timeit
for func in [largestPower_v1, largestPower_v2]:
largestPower = func
print("testing", func.__name__)
out = timeit.timeit('doctest.testmod()',
setup="from __main__ import doctest,largestPower",
number=1000)
print(out)
| FranzSchubert92/cw | python/powers_of_3.py | Python | bsd-3-clause | 908 | 0.007709 |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import optimizers
import numpy as np
from chainerrl.envs.abc import ABC
from chainerrl.explorers.epsilon_greedy import LinearDecayEpsilonGreedy
from chainerrl.links import Sequence
from chainerrl import policies
from chainerrl import q_function
from chainerrl import replay_buffer
from basetest_training import _TestTraining
class _TestPGTOnABC(_TestTraining):
def make_agent(self, env, gpu):
model = self.make_model(env)
policy = model['policy']
q_func = model['q_function']
actor_opt = optimizers.Adam(alpha=1e-4)
actor_opt.setup(policy)
critic_opt = optimizers.Adam(alpha=1e-3)
critic_opt.setup(q_func)
explorer = self.make_explorer(env)
rbuf = self.make_replay_buffer(env)
return self.make_pgt_agent(env=env, model=model,
actor_opt=actor_opt, critic_opt=critic_opt,
explorer=explorer, rbuf=rbuf, gpu=gpu)
def make_pgt_agent(self, env, model, actor_opt, critic_opt, explorer,
rbuf, gpu):
raise NotImplementedError()
def make_explorer(self, env):
def random_action_func():
a = env.action_space.sample()
if isinstance(a, np.ndarray):
return a.astype(np.float32)
else:
return a
return LinearDecayEpsilonGreedy(1.0, 0.2, 1000, random_action_func)
def make_replay_buffer(self, env):
return replay_buffer.ReplayBuffer(10 ** 5)
class _TestPGTOnContinuousPOABC(_TestPGTOnABC):
def make_model(self, env):
n_dim_obs = env.observation_space.low.size
n_dim_action = env.action_space.low.size
n_hidden_channels = 50
policy = Sequence(
L.Linear(n_dim_obs, n_hidden_channels),
F.relu,
L.Linear(n_hidden_channels, n_hidden_channels),
F.relu,
L.LSTM(n_hidden_channels, n_hidden_channels),
policies.FCGaussianPolicy(
n_input_channels=n_hidden_channels,
action_size=n_dim_action,
min_action=env.action_space.low,
max_action=env.action_space.high)
)
q_func = q_function.FCLSTMSAQFunction(
n_dim_obs=n_dim_obs,
n_dim_action=n_dim_action,
n_hidden_layers=2,
n_hidden_channels=n_hidden_channels)
return chainer.Chain(policy=policy, q_function=q_func)
def make_env_and_successful_return(self, test):
return ABC(discrete=False, partially_observable=True,
deterministic=test), 1
def make_replay_buffer(self, env):
return replay_buffer.EpisodicReplayBuffer(10 ** 5)
class _TestPGTOnContinuousABC(_TestPGTOnABC):
def make_model(self, env):
n_dim_obs = env.observation_space.low.size
n_dim_action = env.action_space.low.size
n_hidden_channels = 50
policy = policies.FCGaussianPolicy(
n_input_channels=n_dim_obs,
n_hidden_layers=2,
n_hidden_channels=n_hidden_channels,
action_size=n_dim_action,
min_action=env.action_space.low,
max_action=env.action_space.high)
q_func = q_function.FCSAQFunction(
n_dim_obs=n_dim_obs,
n_dim_action=n_dim_action,
n_hidden_layers=2,
n_hidden_channels=n_hidden_channels)
return chainer.Chain(policy=policy, q_function=q_func)
def make_env_and_successful_return(self, test):
return ABC(discrete=False, deterministic=test), 1
| toslunar/chainerrl | tests/agents_tests/basetest_pgt.py | Python | mit | 3,966 | 0 |
import pytest
class TestDmypy:
@pytest.mark.complete(
"dmypy ", require_cmd=True, xfail="! dmypy --help &>/dev/null"
)
def test_commands(self, completion):
assert "help" in completion
assert not any("," in x for x in completion)
@pytest.mark.complete("dmypy -", require_cmd=True, require_longopt=True)
def test_options(self, completion):
assert "--help" in completion
| algorythmic/bash-completion | test/t/test_dmypy.py | Python | gpl-2.0 | 423 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-26 04:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(blank=True, unique=True)),
('draft', models.BooleanField(default=False)),
('publish', models.DateField(blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| qrizan/moopy | moopy/genres/migrations/0001_initial.py | Python | mit | 1,137 | 0.001759 |
import numpy as np
class Site(object):
"""A class for general single site
Use this class to create a single site object. The site comes with identity
operator for a given dimension. To build specific site, additional operators
need be add with add_operator method.
"""
def __init__(self, dim):
"""Creates an empty site of dimension dim.
Parameters
----------
dim : an int
Size of the Hilbert space for single site. The dimension must be at
least 1. A site of dim = 1 is trival which represents the vaccum
operators : a dictionary of string and numpy array (with ndim = 2).
Operators for the site.
"""
super(Site, self).__init__()
self.dim = dim
self.states = {}
self.operators = { "id" : np.eye(self.dim, self.dim) }
def add_operator(self, operator_name):
"""Adds an operator to the site with zero matrix.
Parameters
----------
operator_name : string
The operator name.
"""
self.operators[str(operator_name)] = np.zeros((self.dim, self.dim))
def add_state(self, state_name):
"""Adds an state to the site with zero list.
Parameters
----------
operator_name : string
The operator name.
"""
self.states[str(state_name)] = np.zeros(self.dim)
class SpinlessFermionSite(Site):
"""A site for spinless fermion models.
Use this class for spinless fermion sites. The Hilbert space is ordered
such as:
- the first state is empty site
- the second state is occupied site.
Notes
-----
Postcondition : The site has already built-in the operators for
c, c_dag, n.
"""
def __init__(self):
"""Creates the spin one-half site.
Notes
-----
Postcond : the dimension is set to 2
"""
super(SpinlessFermionSite, self).__init__(2)
# add the operators
self.add_operator("c")
self.add_operator("c_dag")
self.add_operator("n")
# for clarity
c = self.operators["c"]
c_dag = self.operators["c_dag"]
n = self.operators["n"]
# set the matrix elements different from zero to the right values
c[0, 1] = 1
c_dag[1, 0] = 1
n[1, 1] = 1
# add the states
self.add_state("empty")
self.add_state("occupied")
# for clarity
state_empty = self.states["empty"]
state_occupied = self.states["occupied"]
# set the list elements different from zero to the right values
state_empty[0] = 1.0
state_occupied[1] = 1.0
class SpinOneHalfSite(Site):
"""A site for spin 1/2 models.
Use this class for spin one-half sites. The Hilbert space is ordered
such as the first state is the spin down, and the second state is the
spin up.
Notes
-----
Postcondition : The site has already built-in the spin operators for
s_x, s_y, s_z, s_p, s_m.
"""
def __init__(self):
"""Creates the spin one-half site.
Notes
-----
Postcond : the dimension is set to 2
"""
super(SpinOneHalfSite, self).__init__(2)
# add the operators
self.add_operator("s_x")
self.add_operator("s_y")
self.add_operator("s_z")
self.add_operator("s_p")
self.add_operator("s_m")
# for clarity
s_x = self.operators["s_x"]
s_y = self.operators["s_y"]
s_y = s_y.astype(np.complex)
s_z = self.operators["s_z"]
s_p = self.operators["s_p"]
s_m = self.operators["s_m"]
# set the matrix elements different from zero to the right values
s_x[0, 1] = 0.5
s_x[1, 0] = 0.5
s_y[0, 1] = 1j*(-0.5)
s_y[1, 0] = 1j*0.5
s_z[0, 0] = -0.5
s_z[1, 1] = 0.5
s_p[1, 0] = 1.0
s_m[0, 1] = 1.0
# add the states
self.add_state("spin_up")
self.add_state("spin_down")
self.add_state("empty")
self.add_state("occupied")
# for clarity
state_up = self.states["spin_up"]
state_down = self.states["spin_down"]
state_empty = self.states["empty"]
state_occupied = self.states["occupied"]
# set the list elements different from zero to the right values
state_up[1] = 1.0
state_down[0] = 1.0
state_occupied[1] = 1.0
state_empty[0] = 1.0
class ElectronicSite(Site):
"""A site for electronic models
You use this site for models where the single sites are electron
sites. The Hilbert space is ordered such as:
- the first state, labelled 0, is the empty site,
- the second, labelled 1, is spin down,
- the third, labelled 2, is spin up, and
- the fourth, labelled 3, is double occupancy.
Notes
-----
Postcond: The site has already built-in the spin operators for:
- c_up : destroys an spin up electron,
- c_up_dag, creates an spin up electron,
- c_down, destroys an spin down electron,
- c_down_dag, creates an spin down electron,
- s_z, component z of spin,
- s_p, raises the component z of spin,
- s_m, lowers the component z of spin,
- n_up, number of electrons with spin up,
- n_down, number of electrons with spin down,
- n, number of electrons, i.e. n_up+n_down, and
- u, number of double occupancies, i.e. n_up*n_down.
"""
def __init__(self):
super(ElectronicSite, self).__init__(4)
# add the operators
self.add_operator("c_up")
self.add_operator("c_up_dag")
self.add_operator("c_down")
self.add_operator("c_down_dag")
self.add_operator("s_z")
self.add_operator("n_up")
self.add_operator("n_down")
self.add_operator("n")
self.add_operator("u")
# for clarity
c_up = self.operators["c_up"]
c_up_dag = self.operators["c_up_dag"]
c_down = self.operators["c_down"]
c_down_dag = self.operators["c_down_dag"]
s_z = self.operators["s_z"]
n_up = self.operators["n_up"]
n_down = self.operators["n_down"]
n = self.operators["n"]
u = self.operators["u"]
# set the matrix elements different from zero to the right values
c_up[0,2] = 1.0
c_up[1,3] = 1.0
c_up_dag[2,0] = 1.0
c_up_dag[3,1] = 1.0
c_down[0,1] = 1.0
c_down[2,3] = 1.0
c_down_dag[1,0] = 1.0
c_down_dag[3,2] = 1.0
s_z[1,1] = -1.0
s_z[2,2] = 1.0
n_up[2,2] = 1.0
n_up[3,3] = 1.0
n_down[1,1] = 1.0
n_down[3,3] = 1.0
n[1,1] = 1.0
n[2,2] = 1.0
n[3,3] = 2.0
u[3,3] = 1.0
# add the states
self.add_state("empty")
self.add_state("spin_down")
self.add_state("spin_up")
self.add_state("double")
# for clarity
state_empty = self.states["empty"]
state_down = self.states["spin_down"]
state_up = self.states["spin_up"]
state_double = self.states["double"]
# set the list elements different from zero to the right values
state_empty[0] = 1.0
state_down[1] = 1.0
state_up[2] = 1.0
state_double[3] = 1.0
| fhqgfss/MoHa | moha/modelsystem/sites.py | Python | mit | 7,169 | 0.02176 |
import attr
import types
import binascii
from eliot import start_action
from eliot.twisted import DeferredContext
from zope.interface import implementer
from sphinxmixcrypto import SphinxParams, SphinxPacket, ReplyBlock
from sphinxmixcrypto import IMixPKI, IReader, SECURITY_PARAMETER
from txmix import IMixTransport, IRouteFactory
@attr.s
class ClientProtocol(object):
"""
I am a sphinx mix network client protocol which means I act as a
proxy between the client and the transport. I decrypt messages
before proxying them.
"""
params = attr.ib(validator=attr.validators.instance_of(SphinxParams))
pki = attr.ib(validator=attr.validators.provides(IMixPKI))
client_id = attr.ib(validator=attr.validators.instance_of(bytes))
rand_reader = attr.ib(validator=attr.validators.provides(IReader))
packet_received_handler = attr.ib(validator=attr.validators.instance_of(types.FunctionType))
def make_connection(self, transport):
"""
connect this protocol with the transport
and start the transport
"""
assert IMixTransport.providedBy(transport)
self._decryption_tokens = {}
transport.register_protocol(self)
d = transport.start()
self.transport = transport
return d
def received(self, packet):
"""
receive a client packet, a message ID
and an encrypted payload
"""
message_id = packet[:16]
payload = packet[16:]
assert len(payload) == self.params.payload_size
self.message_received(message_id, payload)
def message_received(self, message_id, ciphertext):
"""
decrypt the message and pass it to the message handler
"""
message = self._decryption_tokens[message_id].decrypt(ciphertext)
self.packet_received_handler(message)
def send(self, route, message):
"""
send a wrapped inside a forward sphinx packet
"""
first_hop_addr = self.pki.get_mix_addr(self.transport.name, route[0])
sphinx_packet = SphinxPacket.forward_message(self.params, route, self.pki, route[-1], message, self.rand_reader)
raw_sphinx_packet = sphinx_packet.get_raw_bytes()
return self.transport.send(first_hop_addr, raw_sphinx_packet)
def create_reply_block(self, route):
"""
given a route and a client ID
"""
message_id = self.rand_reader.read(SECURITY_PARAMETER)
decryption_token, reply_block = ReplyBlock.compose_reply_block(message_id,
self.params,
route,
self.pki,
self.client_id,
self.rand_reader)
self._decryption_tokens[decryption_token.message_id] = decryption_token
return reply_block
@implementer(IRouteFactory)
@attr.s
class RandomRouteFactory(object):
"""
I create random routes.
"""
params = attr.ib(validator=attr.validators.instance_of(SphinxParams))
pki = attr.ib(validator=attr.validators.provides(IMixPKI))
rand_reader = attr.ib(validator=attr.validators.provides(IReader))
def build_route(self):
"""
return a new random route
"""
# XXX todo: assert destination type
mixes = self.pki.identities()
assert len(mixes) >= self.params.max_hops
nodeids = [(self.rand_reader.read(8), x) for x in mixes]
nodeids.sort(key=lambda x: x[0])
return [x[1] for x in nodeids[:self.params.max_hops]]
@implementer(IRouteFactory)
@attr.s
class CascadeRouteFactory(object):
route = attr.ib(validator=attr.validators.instance_of(list))
def build_route(self):
return self.route
@attr.s
class MixClient(object):
"""
i am a client of the mixnet.
"""
params = attr.ib(validator=attr.validators.instance_of(SphinxParams))
pki = attr.ib(validator=attr.validators.provides(IMixPKI))
client_id = attr.ib(validator=attr.validators.instance_of(bytes))
rand_reader = attr.ib(validator=attr.validators.provides(IReader))
transport = attr.ib(validator=attr.validators.provides(IMixTransport))
message_received_handler = attr.ib(validator=attr.validators.instance_of(types.FunctionType))
route_factory = attr.ib(validator=attr.validators.provides(IRouteFactory))
def start(self):
"""
start the mix client
"""
self.protocol = ClientProtocol(self.params, self.pki, self.client_id, self.rand_reader,
packet_received_handler=lambda x: self.message_received(x))
d = self.protocol.make_connection(self.transport)
self.pki.set_client_addr("onion", self.protocol.client_id, self.transport.addr)
return d
def message_received(self, message):
"""
receive a message
"""
action = start_action(
action_type=u"mix client:message received",
client_id=binascii.hexlify(self.client_id),
)
with action.context():
self.message_received_handler(message)
def send(self, destination, message):
"""
send a message to the given destination
returns a deferred
"""
action = start_action(
action_type=u"mix client:message send",
client_id=binascii.hexlify(self.client_id),
)
with action.context():
d = self.protocol.send(self.route_factory.build_route(), message)
return DeferredContext(d).addActionFinish()
def create_reply_block(self):
"""
return a new reply block for the given destination
"""
return self.protocol.create_reply_block(self.route_factory.build_route())
def reply(self, reply_block, message):
"""
compose a reply with a payload of `message` using the given `reply_block`
"""
assert isinstance(reply_block, ReplyBlock)
sphinx_packet = reply_block.compose_forward_message(self.params, message)
dest_addr = self.pki.get_mix_addr("onion", reply_block.destination)
return self.protocol.transport.send(dest_addr, sphinx_packet.get_raw_bytes())
| applied-mixnetworks/txmix | txmix/client.py | Python | gpl-3.0 | 6,459 | 0.002322 |
"""URL Shortener backend for Zinnia Hashids"""
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from zinnia.settings import PROTOCOL
from zinnia_hashids.factory import hashids
def backend(entry):
"""
Hashids URL shortener backend for Zinnia.
"""
hashed_pk = hashids.encode(entry.pk)
url = '%s://%s%s' % (
PROTOCOL, Site.objects.get_current().domain,
reverse('entry_hashids', kwargs={'token': hashed_pk}))
return url
| django-blog-zinnia/zinnia-url-shortener-hashids | zinnia_hashids/backend.py | Python | bsd-3-clause | 501 | 0 |
import rdflib
g = rdflib.Graph()
g.parse('astronomical_database/data/rdf/astronomical_database.rdf')
result = g.query("""
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ontology: <urn://sedatar.org/astronomical_database#>
# All classes.
SELECT DISTINCT ?x1 WHERE {
?x0 rdf:type rdfs:Class.
?x0 rdfs:label ?x1.
}
# All properties.
# SELECT DISTINCT ?x2 WHERE {
# ?x0 rdfs:subClassOf* rdf:Property.
# ?x1 rdf:type ?x0.
# ?x1 rdfs:label ?x2.
# }
# All properties of planetary systems.
# SELECT DISTINCT ?x3 WHERE {
# ?x0 rdfs:label 'planetary system'.
# ?x0 ?x1 ?x2.
# ?x1 rdfs:label ?x3.
# }
# All terrestrial planets.
# SELECT DISTINCT ?x1 WHERE {
# # ?x0 rdfs:label 'terrestrial planet'.
# # ?x0 rdf:type ?x1.
# ?x0 rdf:type ontology:Terrestrial_Planet.
# ?x0 rdfs:label ?x1.
# }
# All things.
# SELECT DISTINCT ?x2 WHERE {
# ?x0 rdfs:subClassOf* ontology:Thing.
# ?x1 rdf:type ?x0.
# ?x1 rdfs:label ?x2.
# }
# Number of planets.
# SELECT DISTINCT ?x1 WHERE {
# ?x0 rdfs:label 'planet'.
# ?x0 ontology:numberOfInstances ?x1.
# }
# Size of Earth.
# SELECT DISTINCT ?x2 WHERE {
# ?x0 rdfs:label 'Earth'.
# ?x0 ?x1 ?x2.
# ?x1 rdfs:subPropertyOf* ontology:size.
# }
""")
for row in sorted(result):
print('%s' % row)
| mathiasuhlenbrock/sedatar | astronomical_database/scripts/python/test_query.py | Python | gpl-3.0 | 1,551 | 0 |
#!/bin/python3
import sys
fact = lambda n: 1 if n <= 1 else n * fact(n - 1)
n = int(input().strip())
fct = fact(n)
print(fct)
| lilsweetcaligula/Online-Judges | hackerrank/algorithms/implementation/medium/extra_long_factorials/py/solution.py | Python | mit | 134 | 0.022388 |
'''
Setup script for FuzzyFileFinder.
'''
import setuptools
from fff import __project__, __version__, CLI
README = 'README.md'
setuptools.setup(name='fff',
version=__version__,
description='Fuzzy File Finder.',
url="https://github.com/jkloo/fff",
author='Jeff Kloosterman',
author_email='kloosterman.jeff@gmail.com',
packages=setuptools.find_packages(),
entry_points={'console_scripts': [CLI + ' = fff.fffind:main']},
license='MIT',
long_description=open(README).read(),
install_requires=[]
)
| jkloo/fff | setup.py | Python | mit | 718 | 0.001393 |
__author__ = 'shuai'
class Solution:
# @param {integer[]} nums
# @return {string}
def largestNumber(self, nums):
ret = ""
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
str_i = str(nums[i])
str_j = str(nums[j])
if str_i + str_j < str_j + str_i:
tmp = nums[i]
nums[i] = nums[j]
nums[j] = tmp
# to check if max equals 0 ,return '0'
if i == 0 and nums[i] == 0:
return '0'
ret += str(nums[i])
return ret
sol = Solution()
print sol.largestNumber([3, 30, 34, 5, 9])
| shuaizi/leetcode | leetcode-python/num179.py | Python | apache-2.0 | 685 | 0.00146 |
import pyb
#from pyb import I2C, SPI, UART
#import staccel
import math
#import os
#import gc # garbage collection for writing?
#import microsnake
#from microsnake import MicroSnakeGame as Game
#from microsnake import move_arrow_pressed
import shared_globals
#from shared_globals import move_arrow_pressed as move_arrow_pressed
#from struct import unpack, pack # not interrupt safe = using heap
#import binascii as ba
#import lcd_i2c
#from dcmotor import DCMotor
import micropython
#import boot
#boot.print_version()
#micropython.alloc_emergency_exception_buf(100)
#print('Micropython alloc_emergency_exception_buffer set to 100')
#import operator # dict sorting
#try:
# print('try importing pins')
# import pins
#except ImportError:
# print('pins not found')
#from machine import Pins
#print('>>>>>>> shape assert')
#a = [[[1,2],[1,2]],[[1,2],[1,2]]]
#print(shared_globals.print_shape(a))
class FakePin():
def value(q, value):
pass
class DCMotor():
def __init__(q, name, in1_pin, in2_pin,
tim_num, tim_channel, tim_pin,
dir_en=1, tim_freq=10000):
# print(q.__dict__)
q.name = name.strip()
q.in1_pin = in1_pin
q.in2_pin = in2_pin
q.tim_num = int(tim_num)
q.tim_channel = int(tim_channel)
q.tim_pin = tim_pin
q.dir_en = int(dir_en)
q.tim_freq = float(tim_freq)
q.velocity = 0
if dir_en:
q.in1 = pyb.Pin(in1_pin, pyb.Pin.OUT_PP)
q.in2 = pyb.Pin(in2_pin, pyb.Pin.OUT_PP)
q.in1.value(0)
q.in2.value(0)
else:
q.in1 = FakePin()
q.in2 = FakePin()
q.tim = pyb.Timer(tim_num)
q.tim.init(freq=tim_freq)
# q.en = q.tim.channel(tim_channel, pyb.Timer.PWM, pin=tim_pin)
q.en = q.tim.channel(tim_channel, pyb.Timer.PWM, pin=pyb.Pin(tim_pin))
q.en.pulse_width_percent(0)
# MyMapperDict = { 'LeftMotorDir' : pyb.Pin.cpu.C12 }
# pyb.Pin.dict(MyMapperDict)
# g = pyb.Pin("LeftMotorDir", pyb.Pin.OUT_OD)
q.set_in = [None, None]
def vel(q, vel=0, info=True):
if vel < -100:
vel = -100
elif vel > 100:
vel = 100
if vel == 0:
to_set_in = [0, 0]
# q.dir_en = 0
else:
# q.dir_en = 1
if vel > 0:
to_set_in = [1, 0]
elif vel < 0:
to_set_in = [0, 1]
if vel != q.velocity:
# q.en.pulse_width_percent(abs(vel))
q.en.duty(abs(vel))
print(vel)
print(abs(vel))
if info:
print(q)
q.in1.value(to_set_in[0])
q.in2.value(to_set_in[1])
q.velocity = vel
q.set_in = to_set_in
def __str__(q):
txt = (
'DCm[{nam}] in1,in2[{in1},{in2}]={set_in}'
' tim,dir_en[{tim},{en}],'
' vel={vel}').format(
nam=q.name,
in1=q.in1_pin, in2=q.in2_pin, set_in=q.set_in,
tim=q.tim_pin, en=q.dir_en,
vel=q.velocity
)
# read_ins = [q.in1.value(), q.in2.value()]
return txt
| gr4viton/gr4Dalek | spine/dd/spine/dcmotor.py | Python | gpl-3.0 | 3,260 | 0.009202 |
#!/usr/local/sci/bin/python
#*****************************
#
# Cloud Coverage Logical Check (CCC)
#
#
#************************************************************************
# SVN Info
#$Rev:: 67 $: Revision of last commit
#$Author:: rdunn $: Author of last commit
#$Date:: 2015-05-01 16:18:52 +0100 (Fri, 01 May 2015) $: Date of last commit
#************************************************************************
import numpy as np
import scipy as sp
import datetime as dt
# RJHD routines
import qc_utils as utils
#************************************************************************
def unobservable(station, flag_col, logfile, diagnostics = False, plots = False):
'''
Cloud observation code given as unobservable (==9 or 10)
:param obj station: station object
:param list flag_col: flag columns to use
:param file logfile: logfile to store outpu
:param bool plots: to do any plots
:param bool diagnostics: to do any extra diagnostic output
:returns:
'''
# for each cloud variable, find bad locations and flag
for c, cloud in enumerate(['total_cloud_cover','low_cloud_cover','mid_cloud_cover','high_cloud_cover']):
cloud_obs = getattr(station, cloud)
bad_locs = np.ma.where(np.logical_or(cloud_obs.data == 9, cloud_obs.data == 10))
station.qc_flags[bad_locs, flag_col[c]] = 1
flag_locs = np.where(station.qc_flags[:, flag_col[c]] != 0)
if plots or diagnostics:
utils.print_flagged_obs_number(logfile, "Unobservable cloud", cloud, len(flag_locs[0]), noWrite = True)
else:
utils.print_flagged_obs_number(logfile, "Unobservable cloud", cloud, len(flag_locs[0]))
# copy flags into attribute
cloud_obs.flags[flag_locs] = 1
return # unobservable
#************************************************************************
def total_lt_max(station, flag_col, logfile, diagnostics = False, plots = False):
'''
Total cloud cover less than maximum of low, mid and high
:param obj station: station object
:param list flag_col: flag columns to use
:param file logfile: logfile to store outpu
:param bool plots: to do any plots
:param bool diagnostics: to do any extra diagnostic output
:returns:
'''
total = getattr(station, "total_cloud_cover")
low = getattr(station, "low_cloud_cover")
mid = getattr(station, "mid_cloud_cover")
high = getattr(station, "high_cloud_cover")
maximum = np.ma.max([low.data, mid.data, high.data], axis = 0)
bad_locs = np.ma.where(maximum > total.data)
station.qc_flags[bad_locs, flag_col] = 1
flag_locs = np.where(station.qc_flags[:, flag_col] != 0)
if plots or diagnostics:
utils.print_flagged_obs_number(logfile, "Total < Max cloud", "cloud", len(flag_locs[0]), noWrite = True)
else:
utils.print_flagged_obs_number(logfile, "Total < Max cloud", "cloud", len(flag_locs[0]))
# copy flags into attribute
total.flags[flag_locs] = 1
low.flags[flag_locs] = 1
mid.flags[flag_locs] = 1
high.flags[flag_locs] = 1
return # total_lt_max
#************************************************************************
def low_full(station, flag_col, logfile, diagnostics = False, plots = False):
'''
Low cloud full, but values in mid or high
:param obj station: station object
:param list flag_col: flag columns to use
:param file logfile: logfile to store outpu
:param bool plots: to do any plots
:param bool diagnostics: to do any extra diagnostic output
:returns:
'''
low = getattr(station, "low_cloud_cover")
mid = getattr(station, "mid_cloud_cover")
high = getattr(station, "high_cloud_cover")
low_full_locs = np.ma.where(low.data == 8)
bad_mid = np.where(mid.data.mask[low_full_locs] != True)
station.qc_flags[low_full_locs[0][bad_mid[0]], flag_col] = 1
bad_high = np.where(high.data.mask[low_full_locs] != True)
station.qc_flags[low_full_locs[0][bad_high[0]], flag_col] = 1
flag_locs = np.where(station.qc_flags[:, flag_col] != 0)
if plots or diagnostics:
utils.print_flagged_obs_number(logfile, "Low full cloud", "cloud", len(flag_locs[0]), noWrite = True)
else:
utils.print_flagged_obs_number(logfile, "Low full cloud", "cloud", len(flag_locs[0]))
# copy flags into attribute
mid.flags[flag_locs] = 1
high.flags[flag_locs] = 1
return # low_full
#************************************************************************
def mid_full(station, flag_col, logfile, diagnostics = False, plots = False):
'''
Mid cloud full, but values in high
:param obj station: station object
:param list flag_col: flag columns to use
:param file logfile: logfile to store outpu
:param bool plots: to do any plots
:param bool diagnostics: to do any extra diagnostic output
:returns:
'''
mid = getattr(station, "mid_cloud_cover")
high = getattr(station, "high_cloud_cover")
mid_full_locs = np.ma.where(mid.data == 8)
bad_high = np.where(high.data.mask[mid_full_locs] != True)
station.qc_flags[mid_full_locs[0][bad_high[0]], flag_col] = 1
flag_locs = np.where(station.qc_flags[:, flag_col] != 0)
if plots or diagnostics:
utils.print_flagged_obs_number(logfile, "Mid full cloud", "cloud", len(flag_locs[0]), noWrite = True)
else:
utils.print_flagged_obs_number(logfile, "Mid full cloud", "cloud", len(flag_locs[0]))
# copy flags into attribute
high.flags[flag_locs] = 1
return # mid_full
#************************************************************************
def fix_cloud_base(station):
'''
If cloud base is 22000ft, then set to missing
:param obj station: station object
:returns:
'''
cloud_base = getattr(station, "cloud_base")
bad_cb = np.where(cloud_base.data == 22000)
# no flag set on purpose - just set to missing (unobservable)
cloud_base.data[bad_cb] = cloud_base.mdi
cloud_base.data.mask[bad_cb] = True
return
#************************************************************************
def negative_cloud(station, flag_col, logfile, diagnostics = False, plots = False):
'''
Non-sensical cloud value
:param obj station: station object
:param list flag_col: flag columns to use
:param file logfile: logfile to store outpu
:param bool plots: to do any plots
:param bool diagnostics: to do any extra diagnostic output
:returns:
'''
# go through each cloud varaible and flag bad locations
for c, cloud in enumerate(['total_cloud_cover','low_cloud_cover','mid_cloud_cover','high_cloud_cover']):
cloud_obs = getattr(station, cloud)
bad_locs = np.ma.where(cloud_obs.data < 0)
station.qc_flags[bad_locs, flag_col] = 1
# copy flags into attribute
cloud_obs.flags[bad_locs] = 1
flag_locs = np.where(station.qc_flags[:, flag_col] != 0)
if plots or diagnostics:
utils.print_flagged_obs_number(logfile, "Negative Cloud", "cloud", len(flag_locs[0]), noWrite = True)
else:
utils.print_flagged_obs_number(logfile, "Negative Cloud", "cloud", len(flag_locs[0]))
return
#************************************************************************
def ccc(station, flag_col, logfile, diagnostics = False, plots = False):
'''
Call the logical cloud checks
:param obj station: station object
:param list flag_col: flag columns to use
:param file logfile: logfile to store output
:param bool diagnostics: diagnostic output (unused)
:param bool plots: do the plots (unused)
:returns:
'''
if len(flag_col) != 8:
print "insufficient flag columns given"
return
unobservable(station, flag_col[0:4], logfile, plots = plots, diagnostics = diagnostics)
total_lt_max(station, flag_col[4], logfile, plots = plots, diagnostics = diagnostics)
low_full(station, flag_col[5], logfile, plots = plots, diagnostics = diagnostics)
mid_full(station, flag_col[6], logfile, plots = plots, diagnostics = diagnostics)
fix_cloud_base(station)
negative_cloud(station, flag_col[7], logfile, plots = plots, diagnostics = diagnostics)
station = utils.append_history(station, "Cloud - Logical Cross Check")
return # ccc
#************************************************************************
if __name__ == "__main__":
print "cloud level cross checks"
| rjhd2/HadISD_v2 | qc_tests/clouds.py | Python | bsd-3-clause | 8,787 | 0.019119 |
# coding: utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import socket
import os
import re
import select
import time
import paramiko
import struct
import fcntl
import signal
import textwrap
import getpass
import fnmatch
import readline
import datetime
from multiprocessing import Pool
os.environ['DJANGO_SETTINGS_MODULE'] = 'jumpserver.settings'
from juser.models import User
from jlog.models import Log
from jumpserver.api import CONF, BASE_DIR, ServerError, user_perm_group_api, user_perm_group_hosts_api, get_user_host
from jumpserver.api import AssetAlias, get_connect_item
try:
import termios
import tty
except ImportError:
print '\033[1;31mOnly UnixLike supported.\033[0m'
time.sleep(3)
sys.exit()
CONF.read(os.path.join(BASE_DIR, 'jumpserver.conf'))
LOG_DIR = os.path.join(BASE_DIR, 'logs')
SSH_KEY_DIR = os.path.join(BASE_DIR, 'keys')
SERVER_KEY_DIR = os.path.join(SSH_KEY_DIR, 'server')
LOGIN_NAME = getpass.getuser()
def color_print(msg, color='blue'):
"""Print colorful string."""
color_msg = {'blue': '\033[1;36m%s\033[0m',
'green': '\033[1;32m%s\033[0m',
'red': '\033[1;31m%s\033[0m'}
print color_msg.get(color, 'blue') % msg
def color_print_exit(msg, color='red'):
"""Print colorful string and exit."""
color_print(msg, color=color)
time.sleep(2)
sys.exit()
def get_win_size():
"""This function use to get the size of the windows!"""
if 'TIOCGWINSZ' in dir(termios):
TIOCGWINSZ = termios.TIOCGWINSZ
else:
TIOCGWINSZ = 1074295912L # Assume
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(sys.stdout.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def set_win_size(sig, data):
"""This function use to set the window size of the terminal!"""
try:
win_size = get_win_size()
channel.resize_pty(height=win_size[0], width=win_size[1])
except:
pass
def log_record(username, host):
"""Logging user command and output."""
connect_log_dir = os.path.join(LOG_DIR, 'connect')
timestamp_start = int(time.time())
today = time.strftime('%Y%m%d', time.localtime(timestamp_start))
time_now = time.strftime('%H%M%S', time.localtime(timestamp_start))
today_connect_log_dir = os.path.join(connect_log_dir, today)
log_filename = '%s_%s_%s.log' % (username, host, time_now)
log_file_path = os.path.join(today_connect_log_dir, log_filename)
dept_name = User.objects.get(username=username).dept.name
pid = os.getpid()
pts = os.popen("ps axu | awk '$2==%s{ print $7 }'" % pid).read().strip()
ip_list = os.popen("who | awk '$2==\"%s\"{ print $5 }'" % pts).read().strip('()\n')
if not os.path.isdir(today_connect_log_dir):
try:
os.makedirs(today_connect_log_dir)
os.chmod(today_connect_log_dir, 0777)
except OSError:
raise ServerError('Create %s failed, Please modify %s permission.' % (today_connect_log_dir, connect_log_dir))
try:
log_file = open(log_file_path, 'a')
except IOError:
raise ServerError('Create logfile failed, Please modify %s permission.' % today_connect_log_dir)
log = Log(user=username, host=host, remote_ip=ip_list, dept_name=dept_name,
log_path=log_file_path, start_time=datetime.datetime.now(), pid=pid)
log_file.write('Starttime is %s\n' % datetime.datetime.now())
log.save()
return log_file, log
def posix_shell(chan, username, host):
"""
Use paramiko channel connect server interactive.
"""
log_file, log = log_record(username, host)
old_tty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
try:
r, w, e = select.select([chan, sys.stdin], [], [])
except:
pass
if chan in r:
try:
x = chan.recv(10240)
if len(x) == 0:
break
sys.stdout.write(x)
sys.stdout.flush()
log_file.write(x)
log_file.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = os.read(sys.stdin.fileno(), 1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
log_file.write('Endtime is %s' % datetime.datetime.now())
log_file.close()
log.is_finished = True
log.log_finished = False
log.end_time = datetime.datetime.now()
log.save()
print_prompt()
def get_user_hostgroup(username):
"""Get the hostgroups of under the user control."""
groups_attr = {}
group_all = user_perm_group_api(username)
for group in group_all:
groups_attr[group.name] = [group.id, group.comment]
return groups_attr
def get_user_hostgroup_host(username, gid):
"""Get the hostgroup hosts of under the user control."""
hosts_attr = {}
user = User.objects.get(username=username)
hosts = user_perm_group_hosts_api(gid)
for host in hosts:
alias = AssetAlias.objects.filter(user=user, host=host)
if alias and alias[0].alias != '':
hosts_attr[host.ip] = [host.id, host.ip, alias[0].alias]
else:
hosts_attr[host.ip] = [host.id, host.ip, host.comment]
return hosts_attr
def verify_connect(username, part_ip):
ip_matched = []
try:
hosts_attr = get_user_host(username)
hosts = hosts_attr.values()
except ServerError, e:
color_print(e, 'red')
return False
for ip_info in hosts:
if part_ip in ip_info[1:] and part_ip:
ip_matched = [ip_info[1]]
break
for info in ip_info[1:]:
if part_ip in info:
ip_matched.append(ip_info[1])
ip_matched = list(set(ip_matched))
if len(ip_matched) > 1:
for ip in ip_matched:
print '%-15s -- %s' % (ip, hosts_attr[ip][2])
elif len(ip_matched) < 1:
color_print('No Permission or No host.', 'red')
else:
username, password, host, port = get_connect_item(username, ip_matched[0])
connect(username, password, host, port, LOGIN_NAME)
def print_prompt():
msg = """\033[1;32m### Welcome Use JumpServer To Login. ### \033[0m
1) Type \033[32mIP or Part IP, Host Alias or Comments \033[0m To Login.
2) Type \033[32mP/p\033[0m To Print The Servers You Available.
3) Type \033[32mG/g\033[0m To Print The Server Groups You Available.
4) Type \033[32mG/g(1-N)\033[0m To Print The Server Group Hosts You Available.
5) Type \033[32mE/e\033[0m To Execute Command On Several Servers.
6) Type \033[32mQ/q\033[0m To Quit.
"""
print textwrap.dedent(msg)
def print_user_host(username):
try:
hosts_attr = get_user_host(username)
except ServerError, e:
color_print(e, 'red')
return
hosts = hosts_attr.keys()
hosts.sort()
for ip in hosts:
print '%-15s -- %s' % (ip, hosts_attr[ip][2])
print ''
def print_user_hostgroup(username):
group_attr = get_user_hostgroup(username)
groups = group_attr.keys()
for g in groups:
print "[%3s] %s -- %s" % (group_attr[g][0], g, group_attr[g][1])
def print_user_hostgroup_host(username, gid):
pattern = re.compile(r'\d+')
match = pattern.match(gid)
if match:
hosts_attr = get_user_hostgroup_host(username, gid)
hosts = hosts_attr.keys()
hosts.sort()
for ip in hosts:
print '%-15s -- %s' % (ip, hosts_attr[ip][2])
else:
color_print('No such group id, Please check it.', 'red')
def connect(username, password, host, port, login_name):
"""
Connect server.
"""
ps1 = "PS1='[\u@%s \W]\$ ' && TERM=xterm && export TERM\n" % host
login_msg = "clear;echo -e '\\033[32mLogin %s done. Enjoy it.\\033[0m'\n" % host
# Make a ssh connection
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(host, port=port, username=username, password=password, compress=True)
except paramiko.ssh_exception.AuthenticationException, paramiko.ssh_exception.SSHException:
raise ServerError('Authentication Error.')
except socket.error:
raise ServerError('Connect SSH Socket Port Error, Please Correct it.')
# Make a channel and set windows size
global channel
win_size = get_win_size()
channel = ssh.invoke_shell(height=win_size[0], width=win_size[1])
try:
signal.signal(signal.SIGWINCH, set_win_size)
except:
pass
# Set PS1 and msg it
channel.send(ps1)
channel.send(login_msg)
# Make ssh interactive tunnel
posix_shell(channel, login_name, host)
# Shutdown channel socket
channel.close()
ssh.close()
def remote_exec_cmd(ip, port, username, password, cmd):
try:
time.sleep(5)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port, username, password, timeout=5)
stdin, stdout, stderr = ssh.exec_command("bash -l -c '%s'" % cmd)
out = stdout.readlines()
err = stderr.readlines()
color_print('%s:' % ip, 'blue')
for i in out:
color_print(" " * 4 + i.strip(), 'green')
for j in err:
color_print(" " * 4 + j.strip(), 'red')
ssh.close()
except Exception as e:
color_print(ip + ':', 'blue')
color_print(str(e), 'red')
def multi_remote_exec_cmd(hosts, username, cmd):
pool = Pool(processes=5)
for host in hosts:
username, password, ip, port = get_connect_item(username, host)
pool.apply_async(remote_exec_cmd, (ip, port, username, password, cmd))
pool.close()
pool.join()
def exec_cmd_servers(username):
color_print("You can choose in the following IP(s), Use glob or ips split by comma. q/Q to PreLayer.", 'green')
print_user_host(LOGIN_NAME)
while True:
hosts = []
inputs = raw_input('\033[1;32mip(s)>: \033[0m')
if inputs in ['q', 'Q']:
break
get_hosts = get_user_host(username).keys()
if ',' in inputs:
ips_input = inputs.split(',')
for host in ips_input:
if host in get_hosts:
hosts.append(host)
else:
for host in get_hosts:
if fnmatch.fnmatch(host, inputs):
hosts.append(host.strip())
if len(hosts) == 0:
color_print("Check again, Not matched any ip!", 'red')
continue
else:
print "You matched ip: %s" % hosts
color_print("Input the Command , The command will be Execute on servers, q/Q to quit.", 'green')
while True:
cmd = raw_input('\033[1;32mCmd(s): \033[0m')
if cmd in ['q', 'Q']:
break
exec_log_dir = os.path.join(LOG_DIR, 'exec_cmds')
if not os.path.isdir(exec_log_dir):
os.mkdir(exec_log_dir)
os.chmod(exec_log_dir, 0777)
filename = "%s/%s.log" % (exec_log_dir, time.strftime('%Y%m%d'))
f = open(filename, 'a')
f.write("DateTime: %s User: %s Host: %s Cmds: %s\n" %
(time.strftime('%Y/%m/%d %H:%M:%S'), username, hosts, cmd))
multi_remote_exec_cmd(hosts, username, cmd)
if __name__ == '__main__':
print_prompt()
gid_pattern = re.compile(r'^g\d+$')
try:
while True:
try:
option = raw_input("\033[1;32mOpt or IP>:\033[0m ")
except EOFError:
print
continue
except KeyboardInterrupt:
sys.exit(0)
if option in ['P', 'p']:
print_user_host(LOGIN_NAME)
continue
elif option in ['G', 'g']:
print_user_hostgroup(LOGIN_NAME)
continue
elif gid_pattern.match(option):
gid = option[1:].strip()
print_user_hostgroup_host(LOGIN_NAME, gid)
continue
elif option in ['E', 'e']:
exec_cmd_servers(LOGIN_NAME)
elif option in ['Q', 'q', 'exit']:
sys.exit()
else:
try:
verify_connect(LOGIN_NAME, option)
except ServerError, e:
color_print(e, 'red')
except IndexError:
pass
| watchsky126/jumpserver | connect.py | Python | gpl-2.0 | 12,846 | 0.002802 |
import unittest
import env
from linalg.vector import Vector
class TestVectorOperations(unittest.TestCase):
def test_vector_equality(self):
a = Vector([1, 2, 3])
b = Vector([1, 2, 3])
self.assertEqual(a, b)
def test_vector_inequality(self):
a = Vector([1, 2, 3])
b = Vector([4, 5, 6])
self.assertNotEqual(a, b)
def test_vector_addition(self):
a = Vector([8.218, -9.341])
b = Vector([-1.129, 2.111])
self.assertEqual(a + b, Vector([7.089, -7.229999999999999]))
def test_vector_subtraction(self):
a = Vector([7.119, 8.215])
b = Vector([-8.223, 0.878])
self.assertEqual(a - b, Vector([15.342, 7.337]))
def test_scalar_multiplication(self):
a = Vector([1.671, -1.012, -0.318])
c = 7.41
self.assertEqual(a * c, Vector([12.38211, -7.49892, -2.35638]))
def test_vector_rounding(self):
v = Vector([1.2345, 6.6789])
self.assertEqual(v.round(2), Vector([1.23, 6.68]))
def test_vector_magnitude(self):
v = Vector([-0.221, 7.437])
self.assertEqual(round(v.magnitude(), 3), 7.440)
def test_vector_normalization(self):
w = Vector([1.996, 3.108, -4.554])
self.assertEqual(w.normalized(), Vector([0.3404012959433014,
0.5300437012984873,
-0.7766470449528028]))
def test_dot_product(self):
v = Vector([7.887, 4.138])
w = Vector([-8.802, 6.776])
self.assertEqual(round(v.dot(w), 3), -41.382)
def test_dot_product_association(self):
"""
The dot product is associative, meaning it shouldn't matter what
order the vectors go in.
"""
v = Vector([7, 4])
w = Vector([-8, 6.776])
self.assertEqual(v.dot(w), w.dot(v))
def test_inner_angle_radians(self):
v = Vector([3.183, -7.627])
w = Vector([-2.668, 5.319])
self.assertEqual(round(v.inner_angle(w), 3), 3.072)
def test_inner_angle_degrees(self):
v = Vector([7.35, 0.221, 5.188])
w = Vector([2.751, 8.259, 3.985])
self.assertEqual(round(v.inner_angle(w, degrees=True), 3), 60.276)
def test_orthogonality(self):
v = Vector([-7.579, -7.88])
w = Vector([22.737, 23.64])
self.assertFalse(v.is_orthogonal(w))
v = Vector([-2.029, 9.97, 4.172])
w = Vector([-9.231, -6.639, -7.245])
self.assertFalse(v.is_orthogonal(w))
v = Vector([-2.328, -7.284, -1.214])
w = Vector([-1.821, 1.072, -2.94])
self.assertTrue(v.is_orthogonal(w))
v = Vector([2.118, 4.827])
w = Vector([0, 0])
self.assertTrue(v.is_orthogonal(w))
def test_parallelism(self):
v = Vector([-7.579, -7.88])
w = Vector([22.737, 23.64])
self.assertTrue(v.is_parallel(w))
v = Vector([-2.029, 9.97, 4.172])
w = Vector([-9.231, -6.639, -7.245])
self.assertFalse(v.is_parallel(w))
v = Vector([-2.328, -7.284, -1.214])
w = Vector([-1.821, 1.072, -2.94])
self.assertFalse(v.is_parallel(w))
v = Vector([2.118, 4.827])
w = Vector([0, 0])
self.assertTrue(v.is_parallel(w))
def test_identity_vector_orthogonality_and_parallelism(self):
"""
The zero vector is the only vector that is both orthogonal and
parallel to itself.
"""
v = Vector([0, 0, 0])
self.assertTrue(v.is_orthogonal(v))
self.assertTrue(v.is_parallel(v))
w = Vector([4, 5, 6])
self.assertFalse(w.is_orthogonal(w))
self.assertTrue(w.is_parallel(w))
def test_vector_projections(self):
"""
Testing vector projection, orthogonal components, and
decomposition.
"""
v = Vector([3.039, 1.879])
b = Vector([0.825, 2.036])
proj = v.project(b)
self.assertEqual(proj.round(3), Vector([1.083, 2.672]))
v = Vector([-9.88, -3.264, -8.159])
b = Vector([-2.155, -9.353, -9.473])
orth = v.orthogonal_component(b)
try:
self.assertEqual(orth.round(3), Vector([-8.350, 3.376, -1.434]))
except AssertionError as e:
print(orth.round(3))
print(Vector([-8.350, 3.376, -1.434]))
raise(e)
v = Vector([3.009, -6.172, 3.692, -2.51])
b = Vector([6.404, -9.144, 2.759, 8.718])
v_decomposed = (v.project(b) +
v.orthogonal_component(b))
self.assertEqual(v, v_decomposed.round(3))
def test_cross_product(self):
"""
Testing the calculation of cross products, as well as the areas
of parallelograms and triangles spanned by different vectors.
"""
v = Vector([8.462, 7.893, -8.187])
w = Vector([6.984, -5.975, 4.778])
cross = v.cross(w)
self.assertEqual(cross.round(3), Vector([-11.205,
-97.609,
-105.685]))
v = Vector([-8.987, -9.838, 5.031])
w = Vector([-4.268, -1.861, -8.866])
par_area = v.parallelogram_area(w)
self.assertEqual(round(par_area, 3), 142.122)
v = Vector([1.5, 9.547, 3.691])
w = Vector([-6.007, 0.124, 5.772])
tri_area = v.triangle_area(w)
self.assertEqual(round(tri_area, 3), 42.565)
if __name__ == '__main__':
unittest.main()
| jeancochrane/learning | linear-algebra/tests/test_vector_operations.py | Python | mit | 5,540 | 0.000181 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup script for building clr.pyd and dependencies using mono and into
an egg or wheel.
"""
import collections
import fnmatch
import glob
import os
import subprocess
import sys
import sysconfig
from distutils import spawn
from distutils.command import build_ext, install_data, install_lib
from setuptools import Extension, setup
# Allow config/verbosity to be set from cli
# http://stackoverflow.com/a/4792601/5208670
CONFIG = "Release" # Release or Debug
VERBOSITY = "minimal" # quiet, minimal, normal, detailed, diagnostic
is_64bits = sys.maxsize > 2**32
DEVTOOLS = "MsDev" if sys.platform == "win32" else "Mono"
ARCH = "x64" if is_64bits else "x86"
PY_MAJOR = sys.version_info[0]
PY_MINOR = sys.version_info[1]
###############################################################################
# Windows Keys Constants for MSBUILD tools
RegKey = collections.namedtuple('RegKey', 'sdk_name key value_name suffix')
vs_python = "Programs\\Common\\Microsoft\\Visual C++ for Python\\9.0\\WinSDK"
vs_root = "SOFTWARE\\Microsoft\\MSBuild\\ToolsVersions\\{0}"
sdks_root = "SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\v{0}Win32Tools"
kits_root = "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots"
kits_suffix = os.path.join("bin", ARCH)
WIN_SDK_KEYS = (
RegKey(sdk_name="Windows Kit 10.0", key=kits_root,
value_name="KitsRoot10", suffix=kits_suffix),
RegKey(sdk_name="Windows Kit 8.1", key=kits_root,
value_name="KitsRoot81", suffix=kits_suffix),
RegKey(sdk_name="Windows Kit 8.0", key=kits_root,
value_name="KitsRoot", suffix=kits_suffix),
RegKey(sdk_name="Windows SDK 7.1A", key=sdks_root.format("7.1A\\WinSDK-"),
value_name="InstallationFolder", suffix=""),
RegKey(sdk_name="Windows SDK 7.1", key=sdks_root.format("7.1\\WinSDK"),
value_name="InstallationFolder", suffix=""),
RegKey(sdk_name="Windows SDK 7.0A", key=sdks_root.format("7.0A\\WinSDK-"),
value_name="InstallationFolder", suffix=""),
RegKey(sdk_name="Windows SDK 7.0", key=sdks_root.format("7.0\\WinSDK"),
value_name="InstallationFolder", suffix=""),
RegKey(sdk_name="Windows SDK 6.0A", key=sdks_root.format("6.0A\\WinSDK"),
value_name="InstallationFolder", suffix=""),
)
VS_KEYS = (
RegKey(sdk_name="MSBuild 14", key=vs_root.format("14.0"),
value_name="MSBuildToolsPath", suffix=""),
RegKey(sdk_name="MSBuild 12", key=vs_root.format("12.0"),
value_name="MSBuildToolsPath", suffix=""),
RegKey(sdk_name="MSBuild 4", key=vs_root.format("4.0"),
value_name="MSBuildToolsPath", suffix=""),
RegKey(sdk_name="MSBuild 3.5", key=vs_root.format("3.5"),
value_name="MSBuildToolsPath", suffix=""),
RegKey(sdk_name="MSBuild 2.0", key=vs_root.format("2.0"),
value_name="MSBuildToolsPath", suffix=""),
)
###############################################################################
def _check_output(*args, **kwargs):
"""Check output wrapper for py2/py3 compatibility"""
output = subprocess.check_output(*args, **kwargs)
if PY_MAJOR == 2:
return output
return output.decode("ascii")
def _get_interop_filename():
"""interopXX.cs is auto-generated as part of the build.
For common windows platforms pre-generated files are included
as most windows users won't have Clang installed, which is
required to generate the file.
"""
interop_filename = "interop{0}{1}{2}.cs".format(
PY_MAJOR, PY_MINOR, getattr(sys, "abiflags", ""))
return os.path.join("src", "runtime", interop_filename)
def _get_source_files():
"""Walk project and collect the files needed for ext_module"""
for ext in (".sln", ):
for path in glob.glob("*" + ext):
yield path
for root, dirnames, filenames in os.walk("src"):
for ext in (".cs", ".csproj", ".snk", ".config",
".py", ".c", ".h", ".ico"):
for filename in fnmatch.filter(filenames, "*" + ext):
yield os.path.join(root, filename)
for root, dirnames, filenames in os.walk("tools"):
for ext in (".exe", ".py", ".c", ".h"):
for filename in fnmatch.filter(filenames, "*" + ext):
yield os.path.join(root, filename)
def _get_long_description():
"""Helper to populate long_description for pypi releases"""
try:
import pypandoc
return pypandoc.convert('README.md', 'rst')
except ImportError:
return '.Net and Mono integration for Python'
class BuildExtPythonnet(build_ext.build_ext):
def build_extension(self, ext):
"""Builds the .pyd file using msbuild or xbuild"""
if ext.name != "clr":
return build_ext.build_ext.build_extension(self, ext)
# install packages using nuget
self._install_packages()
dest_file = self.get_ext_fullpath(ext.name)
dest_dir = os.path.dirname(dest_file)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
# Up to Python 3.2 sys.maxunicode is used to determine the size of
# Py_UNICODE, but from 3.3 onwards Py_UNICODE is a typedef of wchar_t.
# TODO: Is this doing the right check for Py27?
if sys.version_info[:2] <= (3, 2):
unicode_width = 2 if sys.maxunicode < 0x10FFFF else 4
else:
import ctypes
unicode_width = ctypes.sizeof(ctypes.c_wchar)
defines = [
"PYTHON{0}{1}".format(PY_MAJOR, PY_MINOR),
"PYTHON{0}".format(PY_MAJOR), # Python Major Version
"UCS{0}".format(unicode_width),
]
if CONFIG == "Debug":
defines.extend(["DEBUG", "TRACE"])
if sys.platform != "win32" and DEVTOOLS == "Mono":
on_darwin = sys.platform == "darwin"
defines.append("MONO_OSX" if on_darwin else "MONO_LINUX")
# Check if --enable-shared was set when Python was built
enable_shared = sysconfig.get_config_var("Py_ENABLE_SHARED")
if enable_shared:
# Double-check if libpython is linked dynamically with python
ldd_cmd = ["otool", "-L"] if on_darwin else ["ldd"]
lddout = _check_output(ldd_cmd + [sys.executable])
if 'libpython' not in lddout:
enable_shared = False
if not enable_shared:
defines.append("PYTHON_WITHOUT_ENABLE_SHARED")
if hasattr(sys, "abiflags"):
if "d" in sys.abiflags:
defines.append("PYTHON_WITH_PYDEBUG")
if "m" in sys.abiflags:
defines.append("PYTHON_WITH_PYMALLOC")
# check the interop file exists, and create it if it doesn't
interop_file = _get_interop_filename()
if not os.path.exists(interop_file):
self.debug_print("Creating {0}".format(interop_file))
geninterop = os.path.join("tools", "geninterop", "geninterop.py")
subprocess.check_call([sys.executable, geninterop, interop_file])
if DEVTOOLS == "MsDev":
_xbuild = '"{0}"'.format(self._find_msbuild_tool("msbuild.exe"))
_config = "{0}Win".format(CONFIG)
elif DEVTOOLS == "Mono":
_xbuild = "xbuild"
_config = "{0}Mono".format(CONFIG)
else:
raise NotImplementedError(
"DevTool {0} not supported (use MsDev/Mono)".format(DEVTOOLS))
cmd = [
_xbuild,
'pythonnet.sln',
'/p:Configuration={}'.format(_config),
'/p:Platform={}'.format(ARCH),
'/p:DefineConstants="{}"'.format(','.join(defines)),
'/p:PythonBuildDir="{}"'.format(os.path.abspath(dest_dir)),
'/p:PythonInteropFile="{}"'.format(os.path.basename(interop_file)),
'/verbosity:{}'.format(VERBOSITY),
]
manifest = self._get_manifest(dest_dir)
if manifest:
cmd.append('/p:PythonManifest="{0}"'.format(manifest))
self.debug_print("Building: {0}".format(" ".join(cmd)))
use_shell = True if DEVTOOLS == "Mono" else False
subprocess.check_call(" ".join(cmd + ["/t:Clean"]), shell=use_shell)
subprocess.check_call(" ".join(cmd + ["/t:Build"]), shell=use_shell)
if DEVTOOLS == "Mono":
self._build_monoclr()
def _get_manifest(self, build_dir):
if DEVTOOLS != "MsDev":
return
mt = self._find_msbuild_tool("mt.exe", use_windows_sdk=True)
manifest = os.path.abspath(os.path.join(build_dir, "app.manifest"))
cmd = [mt, '-inputresource:"{0}"'.format(sys.executable),
'-out:"{0}"'.format(manifest)]
self.debug_print("Extracting manifest from {}".format(sys.executable))
subprocess.check_call(" ".join(cmd), shell=False)
return manifest
def _build_monoclr(self):
mono_libs = _check_output("pkg-config --libs mono-2", shell=True)
mono_cflags = _check_output("pkg-config --cflags mono-2", shell=True)
glib_libs = _check_output("pkg-config --libs glib-2.0", shell=True)
glib_cflags = _check_output("pkg-config --cflags glib-2.0", shell=True)
cflags = mono_cflags.strip() + " " + glib_cflags.strip()
libs = mono_libs.strip() + " " + glib_libs.strip()
# build the clr python module
clr_ext = Extension(
"clr",
sources=[
"src/monoclr/pynetinit.c",
"src/monoclr/clrmod.c"
],
extra_compile_args=cflags.split(" "),
extra_link_args=libs.split(" ")
)
build_ext.build_ext.build_extension(self, clr_ext)
def _install_packages(self):
"""install packages using nuget"""
nuget = os.path.join("tools", "nuget", "nuget.exe")
use_shell = False
if DEVTOOLS == "Mono":
nuget = "mono {0}".format(nuget)
use_shell = True
cmd = "{0} update -self".format(nuget)
self.debug_print("Updating NuGet: {0}".format(cmd))
subprocess.check_call(cmd, shell=use_shell)
cmd = "{0} restore pythonnet.sln -o packages".format(nuget)
self.debug_print("Installing packages: {0}".format(cmd))
subprocess.check_call(cmd, shell=use_shell)
def _find_msbuild_tool(self, tool="msbuild.exe", use_windows_sdk=False):
"""Return full path to one of the Microsoft build tools"""
# Search in PATH first
path = spawn.find_executable(tool)
if path:
return path
# Search within registry to find build tools
try: # PY2
import _winreg as winreg
except ImportError: # PY3
import winreg
keys_to_check = WIN_SDK_KEYS if use_windows_sdk else VS_KEYS
hklm = winreg.HKEY_LOCAL_MACHINE
for rkey in keys_to_check:
try:
with winreg.OpenKey(hklm, rkey.key) as hkey:
val, type_ = winreg.QueryValueEx(hkey, rkey.value_name)
if type_ != winreg.REG_SZ:
continue
path = os.path.join(val, rkey.suffix, tool)
if os.path.exists(path):
self.debug_print("Using {0} from {1}".format(
tool, rkey.sdk_name))
return path
except WindowsError:
# Key doesn't exist
pass
# Add Visual C++ for Python as a fall-back in case one
# of the other Windows SDKs isn't installed.
# TODO: Extend checking by using setuptools/msvc.py?
if use_windows_sdk:
sdk_name = "Visual C++ for Python"
localappdata = os.environ["LOCALAPPDATA"]
suffix = "Bin\\x64" if ARCH == "x64" else "Bin"
path = os.path.join(localappdata, vs_python, suffix, tool)
if os.path.exists(path):
self.debug_print("Using {0} from {1}".format(tool, sdk_name))
return path
raise RuntimeError("{0} could not be found".format(tool))
class InstallLibPythonnet(install_lib.install_lib):
def install(self):
if not os.path.isdir(self.build_dir):
self.warn("'{0}' does not exist -- no Python modules"
" to install".format(self.build_dir))
return
if not os.path.exists(self.install_dir):
self.mkpath(self.install_dir)
# only copy clr.pyd/.so
for srcfile in glob.glob(os.path.join(self.build_dir, "clr.*")):
destfile = os.path.join(
self.install_dir, os.path.basename(srcfile))
self.copy_file(srcfile, destfile)
class InstallDataPythonnet(install_data.install_data):
def run(self):
build_cmd = self.get_finalized_command("build_ext")
install_cmd = self.get_finalized_command("install")
build_lib = os.path.abspath(build_cmd.build_lib)
install_platlib = os.path.relpath(
install_cmd.install_platlib, self.install_dir)
for i, data_files in enumerate(self.data_files):
if isinstance(data_files, str):
self.data_files[i] = data_files[i].format(build_lib=build_lib)
else:
for j, filename in enumerate(data_files[1]):
data_files[1][j] = filename.format(build_lib=build_lib)
dest = data_files[0].format(install_platlib=install_platlib)
self.data_files[i] = dest, data_files[1]
return install_data.install_data.run(self)
###############################################################################
setupdir = os.path.dirname(__file__)
if setupdir:
os.chdir(setupdir)
setup_requires = []
if not os.path.exists(_get_interop_filename()):
setup_requires.append("pycparser")
setup(
name="pythonnet",
version="2.4.0.dev0",
description=".Net and Mono integration for Python",
url='https://pythonnet.github.io/',
license='MIT',
author="The Python for .Net developers",
author_email="pythondotnet@python.org",
setup_requires=setup_requires,
long_description=_get_long_description(),
ext_modules=[
Extension("clr", sources=list(_get_source_files()))
],
data_files=[
("{install_platlib}", [
"{build_lib}/Python.Runtime.dll",
]),
],
cmdclass={
"build_ext": BuildExtPythonnet,
"install_lib": InstallLibPythonnet,
"install_data": InstallDataPythonnet,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: C#',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
],
zip_safe=False,
)
| vmuriart/pythonnet | setup.py | Python | mit | 15,375 | 0 |
def bucketsort(arr, k):
"""
Input:
arr: A list of small ints
k: Upper bound of the size of the ints in arr (not inclusive)
Precondition:
all(isinstance(x, int) and 0 <= x < k for x in arr)
Output:
The elements of arr in sorted order
"""
counts = [0] * k
for x in arr:
counts[x] += 1
sorted_arr = []
for i, count in enumerate(counts):
sorted_arr.extend([i] * count)
return sorted_arr
| evandrix/Splat | code/demo/quixey/bucket_sort.py | Python | mit | 471 | 0 |
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import datetime
import time
from neutron_lib import constants
from oslo_config import cfg
from oslo_utils import uuidutils
from webob import exc
from neutron.api.v2 import attributes
from neutron.common import constants as n_const
from neutron import context
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2
from neutron.extensions import agent
from neutron.tests.common import helpers
from neutron.tests import tools
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.db import test_db_base_plugin_v2
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
L3_HOSTA = 'hosta'
DHCP_HOSTA = 'hosta'
L3_HOSTB = 'hostb'
DHCP_HOSTC = 'hostc'
LBAAS_HOSTA = 'hosta'
LBAAS_HOSTB = 'hostb'
class AgentTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
return agent.Agent.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# This plugin class is just for testing
class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2,
agents_db.AgentDbMixin):
supported_extension_aliases = ["agent"]
class AgentDBTestMixIn(object):
def _list_agents(self, expected_res_status=None,
neutron_context=None,
query_string=None):
agent_res = self._list('agents',
neutron_context=neutron_context,
query_params=query_string)
if expected_res_status:
self.assertEqual(expected_res_status, agent_res.status_int)
return agent_res
def _register_agent_states(self, lbaas_agents=False):
"""Register two L3 agents and two DHCP agents."""
l3_hosta = helpers._get_l3_agent_dict(
L3_HOSTA, n_const.L3_AGENT_MODE_LEGACY)
l3_hostb = helpers._get_l3_agent_dict(
L3_HOSTB, n_const.L3_AGENT_MODE_LEGACY)
dhcp_hosta = helpers._get_dhcp_agent_dict(DHCP_HOSTA)
dhcp_hostc = helpers._get_dhcp_agent_dict(DHCP_HOSTC)
helpers.register_l3_agent(host=L3_HOSTA)
helpers.register_l3_agent(host=L3_HOSTB)
helpers.register_dhcp_agent(host=DHCP_HOSTA)
helpers.register_dhcp_agent(host=DHCP_HOSTC)
res = [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc]
if lbaas_agents:
lbaas_hosta = {
'binary': 'neutron-loadbalancer-agent',
'host': LBAAS_HOSTA,
'topic': 'LOADBALANCER_AGENT',
'configurations': {'device_drivers': ['haproxy_ns']},
'agent_type': constants.AGENT_TYPE_LOADBALANCER}
lbaas_hostb = copy.deepcopy(lbaas_hosta)
lbaas_hostb['host'] = LBAAS_HOSTB
callback = agents_db.AgentExtRpcCallback()
callback.report_state(
self.adminContext,
agent_state={'agent_state': lbaas_hosta},
time=datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT))
callback.report_state(
self.adminContext,
agent_state={'agent_state': lbaas_hostb},
time=datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT))
res += [lbaas_hosta, lbaas_hostb]
return res
def _register_dvr_agents(self):
dvr_snat_agent = helpers.register_l3_agent(
host=L3_HOSTA, agent_mode=n_const.L3_AGENT_MODE_DVR_SNAT)
dvr_agent = helpers.register_l3_agent(
host=L3_HOSTB, agent_mode=n_const.L3_AGENT_MODE_DVR)
return [dvr_snat_agent, dvr_agent]
def _register_l3_agent(self, host):
helpers.register_l3_agent(host)
class AgentDBTestCase(AgentDBTestMixIn,
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
fmt = 'json'
def setUp(self):
plugin = 'neutron.tests.unit.extensions.test_agent.TestAgentPlugin'
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
self.useFixture(tools.AttributeMapMemento())
ext_mgr = AgentTestExtensionManager()
super(AgentDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
self.adminContext = context.get_admin_context()
def test_create_agent(self):
data = {'agent': {}}
_req = self.new_create_request('agents', data, self.fmt)
_req.environ['neutron.context'] = context.Context(
'', 'tenant_id')
res = _req.get_response(self.ext_api)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_list_agent(self):
agents = self._register_agent_states()
res = self._list('agents')
self.assertEqual(len(agents), len(res['agents']))
def test_show_agent(self):
self._register_agent_states()
agents = self._list_agents(
query_string='binary=neutron-l3-agent')
self.assertEqual(2, len(agents['agents']))
agent = self._show('agents', agents['agents'][0]['id'])
self.assertEqual('neutron-l3-agent', agent['agent']['binary'])
def test_update_agent(self):
self._register_agent_states()
agents = self._list_agents(
query_string='binary=neutron-l3-agent&host=' + L3_HOSTB)
self.assertEqual(1, len(agents['agents']))
com_id = agents['agents'][0]['id']
agent = self._show('agents', com_id)
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = False
new_agent['agent']['description'] = 'description'
self._update('agents', com_id, new_agent)
agent = self._show('agents', com_id)
self.assertFalse(agent['agent']['admin_state_up'])
self.assertEqual('description', agent['agent']['description'])
def test_dead_agent(self):
cfg.CONF.set_override('agent_down_time', 1)
self._register_agent_states()
time.sleep(1.5)
agents = self._list_agents(
query_string='binary=neutron-l3-agent&host=' + L3_HOSTB)
self.assertFalse(agents['agents'][0]['alive'])
| bigswitch/neutron | neutron/tests/unit/extensions/test_agent.py | Python | apache-2.0 | 7,074 | 0 |
# Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from lxml import etree
from nova.api.openstack import compute
from nova.api.openstack.compute.contrib import server_diagnostics
from nova.api.openstack import wsgi
import nova.compute
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
import nova.utils
UUID = 'abc'
def fake_get_diagnostics(self, _context, instance_uuid):
return {'data': 'Some diagnostic info'}
def fake_instance_get(self, _context, instance_uuid):
if instance_uuid != UUID:
raise Exception("Invalid UUID")
return {'uuid': instance_uuid}
class ServerDiagnosticsTest(test.TestCase):
def setUp(self):
super(ServerDiagnosticsTest, self).setUp()
self.flags(verbose=True)
self.stubs.Set(nova.compute.API, 'get_diagnostics',
fake_get_diagnostics)
self.stubs.Set(nova.compute.API, 'get', fake_instance_get)
self.router = compute.APIRouter()
def test_get_diagnostics(self):
req = fakes.HTTPRequest.blank('/fake/servers/%s/diagnostics' % UUID)
res = req.get_response(self.router)
output = jsonutils.loads(res.body)
self.assertEqual(output, {'data': 'Some diagnostic info'})
class TestServerDiagnosticsXMLSerializer(unittest.TestCase):
namespace = wsgi.XMLNS_V11
def _tag(self, elem):
tagname = elem.tag
self.assertEqual(tagname[0], '{')
tmp = tagname.partition('}')
namespace = tmp[0][1:]
self.assertEqual(namespace, self.namespace)
return tmp[2]
def test_index_serializer(self):
serializer = server_diagnostics.ServerDiagnosticsTemplate()
exemplar = dict(diag1='foo', diag2='bar')
text = serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('diagnostics', self._tag(tree))
self.assertEqual(len(tree), len(exemplar))
for child in tree:
tag = self._tag(child)
self.assertTrue(tag in exemplar)
self.assertEqual(child.text, exemplar[tag])
| tylertian/Openstack | openstack F/nova/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py | Python | apache-2.0 | 2,735 | 0 |
import bpy
# -----------------------------------------------------------------------------
# Draw UI, use an function to be append into 3D View Header
# -----------------------------------------------------------------------------
def ui_3D(self, context):
layout = self.layout
row = layout.row(align=True)
row.operator("view.grid_control", text='', icon='GRID')
icon = 'CURSOR'
row.operator("object.center_pivot_mesh_obj", text='', icon=icon)
icon = 'SMOOTH'
row.operator("object.smooth_shading", text='', icon=icon)
row = layout.row(align=True)
icon = 'FORCE_TEXTURE'
row.operator("unwrap.uv_checker", text='', icon=icon)
icon = 'EDITMODE_HLT'
row.operator("object.retopo_shading", text='', icon=icon)
# -----------------------------------------------------------------------------
# Draw UI, use an function to be append into UV/Image Editor View Header
# -----------------------------------------------------------------------------
def ui_UV(self, context):
layout = self.layout
row = layout.row(align=True)
icon = 'CURSOR'
row.operator("unwrap.reset_cursor", text='', icon=icon)
icon = 'FORCE_TEXTURE'
row.operator("unwrap.uv_checker", text='', icon=icon)
def register():
bpy.types.VIEW3D_HT_header.append(ui_3D)
bpy.types.IMAGE_HT_header.append(ui_UV)
def unregister():
bpy.types.VIEW3D_HT_header.remove(ui_3D)
bpy.types.IMAGE_HT_header.remove(ui_UV)
| stilobique/Icon-Header | views/header.py | Python | gpl-3.0 | 1,454 | 0.002063 |
import bountyfunding
from bountyfunding.core.const import *
from bountyfunding.core.data import clean_database
from test import to_object
from nose.tools import *
USER = "bountyfunding"
class Email_Test:
def setup(self):
self.app = bountyfunding.app.test_client()
clean_database()
def test_email(self):
eq_(len(self.get_emails()), 0)
r = self.app.post('/issues', data=dict(ref=1, status='READY',
title='Title', link='/issue/1'))
eq_(r.status_code, 200)
r = self.app.post('/issue/1/sponsorships',
data=dict(user=USER, amount=10))
eq_(r.status_code, 200)
r = self.app.get("/issue/1")
eq_(r.status_code, 200)
r = self.app.put('/issue/1', data=dict(
status=IssueStatus.to_string(IssueStatus.STARTED)))
eq_(r.status_code, 200)
emails = self.get_emails()
eq_(len(emails), 1)
email = emails[0]
eq_(email.recipient, USER)
ok_(email.issue_id)
ok_(email.body)
r = self.app.delete("/email/%s" % email.id)
eq_(r.status_code, 200)
def get_emails(self):
r = self.app.get("/emails")
eq_(r.status_code, 200)
return to_object(r).data
| bountyfunding/bountyfunding | test/integration_test/email_test.py | Python | agpl-3.0 | 1,266 | 0.006319 |
import logging
from mongoengine import *
from flask.ext.security import RoleMixin
from flask.ext.security import UserMixin
log = logging.getLogger(__name__)
class AuthRole(Document, RoleMixin):
name = StringField(max_length=80, unique=True)
description = StringField(max_length=255)
def __str__(self):
return self.name
class AuthUser(DynamicDocument, UserMixin):
id = StringField()
email = StringField(max_length=255)
password = StringField(max_length=255)
name = StringField()
family_name = StringField()
given_name = StringField()
picture = StringField()
hd = StringField()
verified_email = BooleanField()
google_token = StringField()
roles = ListField(ReferenceField(AuthRole), default=[])
@classmethod
def get_current_user(cls, google_token):
user = cls.objects(google_token=google_token)
if not user:
log.warning("User with google_token {0} "
"wasn't found in DB".format(google_token))
return user
def __str__(self):
return '{0} {0} ({1})'.format(self.name, self.family_name, self.email)
| romansalin/testrail-reporting | testrail_reporting/auth/models.py | Python | apache-2.0 | 1,147 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging as loggers
import numpy as np
import theano.tensor as T
from theano.tensor.nnet import conv
from theano.tensor.signal import downsample
from deepy.utils import build_activation, UniformInitializer
from deepy.layers.layer import NeuralLayer
logging = loggers.getLogger(__name__)
class Convolution(NeuralLayer):
"""
Convolution layer with max-pooling.
"""
def __init__(self, filter_shape, pool_size=(2, 2),
reshape_input=False, border_mode="valid", flatten_output=False,
disable_pooling=False, activation='linear', init=None):
super(Convolution, self).__init__("convolution")
self.filter_shape = filter_shape
self.output_dim = filter_shape[0]
self.pool_size = pool_size
self.reshape_input = reshape_input
self.flatten_output = flatten_output
self.activation = activation
self.disable_pooling = disable_pooling
self.border_mode = border_mode
self.initializer = init if init else self._default_initializer()
def setup(self):
self._setup_params()
self._setup_functions()
def output(self, x):
if self.reshape_input:
img_width = T.cast(T.sqrt(x.shape[1]), "int32")
x = x.reshape((x.shape[0], 1, img_width, img_width), ndim=4)
conv_out = conv.conv2d(
input=x,
filters=self.W_conv,
filter_shape=self.filter_shape,
image_shape=None,
border_mode=self.border_mode
)
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=self.pool_size,
ignore_border=True
)
if self.disable_pooling:
pooled_out = conv_out
output = self._activation_func(pooled_out + self.B_conv.dimshuffle('x', 0, 'x', 'x'))
if self.flatten_output:
output = output.flatten(2)
return output
def _setup_functions(self):
self._activation_func = build_activation(self.activation)
def _setup_params(self):
self.W_conv = self.create_weight(suffix="conv", initializer=self.initializer, shape=self.filter_shape)
self.B_conv = self.create_bias(self.filter_shape[0], suffix="conv")
self.register_parameters(self.W_conv, self.B_conv)
def _default_initializer(self):
fan_in = np.prod(self.filter_shape[1:])
fan_out = (self.filter_shape[0] * np.prod(self.filter_shape[2:]) /
np.prod(self.pool_size))
weight_scale = np.sqrt(6. / (fan_in + fan_out))
return UniformInitializer(scale=weight_scale) | ZhangAustin/deepy | deepy/layers/conv.py | Python | mit | 2,682 | 0.001491 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import uuid
import eventlet
import greenlet
from oslo.config import cfg
from trove.openstack.common import excutils
from trove.openstack.common.gettextutils import _ # noqa
from trove.openstack.common import importutils
from trove.openstack.common import jsonutils
from trove.openstack.common import log as logging
from trove.openstack.common.rpc import amqp as rpc_amqp
from trove.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"name": link_name,
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
addr_opts["link"]["x-declare"].update(link_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the reciever on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
super(DirectConsumer, self).__init__(
session, callback,
"%s/%s" % (msg_id, msg_id),
{"type": "direct"},
msg_id,
{
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
})
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
super(TopicConsumer, self).__init__(
session, callback,
"%s/%s" % (exchange_name, topic),
{}, name or topic,
{
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
})
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
super(FanoutConsumer, self).__init__(
session, callback,
"%s_fanout" % topic,
{"durable": False, "type": "fanout"},
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
{"exclusive": True})
def reconnect(self, session):
topic = self.get_node_name().rpartition('_fanout')[0]
params = {
'session': session,
'topic': topic,
'callback': self.callback,
}
self.__init__(conf=self.conf, **params)
super(FanoutConsumer, self).reconnect(session)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
super(DirectPublisher, self).__init__(session, msg_id,
{"type": "direct"})
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic))
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""init a 'fanout' publisher.
"""
super(FanoutPublisher, self).__init__(
session,
"%s_fanout" % topic, {"type": "fanout"})
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, topic):
"""init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
super(NotifyPublisher, self).__init__(session,
"%s/%s" % (exchange_name, topic),
{"durable": True})
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in consumers.itervalues():
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError) as e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
| citrix-openstack-build/trove | trove/openstack/common/rpc/impl_qpid.py | Python | apache-2.0 | 26,203 | 0 |
from os import path
from collections import namedtuple
from subprocess import Popen, PIPE
def sys(cmd):
return Popen(cmd, stdout=PIPE, shell=True).stdout.read()
class USB:
'''
Depends on findmnt to find source from target and extra information like fs type
'''
def __init__(self, target):
try:
assert path.exists(target) and not path.isfile(target) # do i even exist?
except AssertionError:
raise AssertionError('Needs target (mounted to)')
if path.isdir(target):
self.data = self._get_info(target, 'T')
else:
self.data = self._get_info(target, 'S')
def _get_info(self, d, v, splitchar='|'):
for i, x in enumerate(sys("findmnt -%s \"%s\"" % (v, d)).split('\n')):
if i > 0:
return namedtuple('USB_info', 'target source fstype options')(*splitchar.join(x.split()).split(splitchar, 3))
def __repr__(self):
return '<%s | %s>' % (self.data.source, self.data.target)
# print USB('/dev/sdb1').data
| ixtabinnovations/USB_Cryptor | USB.py | Python | gpl-3.0 | 993 | 0.021148 |
from django.test import TestCase, Client
from jpspapp.models import Club, Activity,UserProfile
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
import datetime
# Create your tests here.
class ClubTestCase(TestCase):
def setUp(self):
User.objects.create_user(username="clubtest", email='123@123.com', password='jp123456')
Club.objects.create(ClubObject=User.objects.get(username='clubtest'), ClubName="测试社团", ClubId=601, Type='1',
ShezhangName="社长", ShezhangQq="12345678", ShezhangGrade='1', ShezhangClass='1',
IfRecruit=True, EnrollGroupQq='12345678')
def test_club_update(self):
club = Club.objects.get(ClubName="测试社团")
club.ShezhangName = "社长姓名"
club.save()
self.assertEqual(club.ShezhangName, "社长姓名")
def test_club_del(selfs):
club = Club.objects.get(ClubName="测试社团")
club.delete()
user = User.objects.get(username="clubtest")
user.delete()
class ActivityModelTest(TestCase):
def setUp(self):
User.objects.create_user(username="clubtest", email='123@123.com', password='jp123456')
Club.objects.create(ClubObject=User.objects.get(username='clubtest'), ClubName="测试社团", ClubId=601, Type='1',
ShezhangName="社长", ShezhangQq="12345678", ShezhangGrade='1', ShezhangClass='1',
IfRecruit=True, EnrollGroupQq='12345678')
Activity.objects.create(Name="活动名称", Region="活动地点", ClubObject=Club.objects.get(ClubName="测试社团"),
Content="活动内容", Date1=datetime.datetime.now(),
Date2=datetime.datetime.now() + datetime.timedelta(days=1), State='0', Type='普通')
def test_update(self):
activity = Activity.objects.get(Name="活动名称")
activity.Content = "活动内容测试"
activity.save()
self.assertEqual(activity.Content, '活动内容测试')
def test_delete(self):
Activity.objects.get(Region="活动地点").delete()
Club.objects.get(ShezhangName='社长').delete()
User.objects.get(username='clubtest').delete()
class UserProfileModelTest(TestCase):
def setUp(self):
User.objects.create(username='userprofiletest',email='123@123.com',password='jp123456')
UserProfile.objects.create(UserObject=User.objects.get(username='userprofiletest'),UserName='测试用户',Class=1,Grade=1,AttendYear='2017',QQ='12345678',Phone='12345678901',Email='123@123.com')
def test_update(self):
user = UserProfile.objects.get(UserName='测试用户')
user.Class= 2
user.save()
self.assertEqual(user.Class,2)
def test_delete(self):
user = UserProfile.objects.get(UserName='测试用户')
user.delete()
class UserModelTest(TestCase):
def create(self):
pass
def update(selfs):
pass
def delete(self):
pass
class PostModelTest(TestCase):
def test(self):
pass
def update(selfs):
pass
def delete(self):
pass
| AlienStudio/jpsp_python | jpsp/jpspapp/tests.py | Python | mit | 3,241 | 0.00779 |
#!/usr/bin/env python
from unittest import TestCase
from before_after import before, after, before_after
from before_after.tests import test_functions
class TestBeforeAfter(TestCase):
def setUp(self):
test_functions.reset_test_list()
super(TestBeforeAfter, self).setUp()
def test_before(self):
def before_fn(*a):
test_functions.test_list.append(1)
with before('before_after.tests.test_functions.sample_fn', before_fn):
test_functions.sample_fn(2)
self.assertEqual(test_functions.test_list, [1, 2])
def test_after(self):
def after_fn(*a):
test_functions.test_list.append(2)
with after('before_after.tests.test_functions.sample_fn', after_fn):
test_functions.sample_fn(1)
self.assertEqual(test_functions.test_list, [1, 2])
def test_before_and_after(self):
def before_fn(*a):
test_functions.test_list.append(1)
def after_fn(*a):
test_functions.test_list.append(3)
with before_after(
'before_after.tests.test_functions.sample_fn',
before_fn=before_fn, after_fn=after_fn):
test_functions.sample_fn(2)
self.assertEqual(test_functions.test_list, [1, 2, 3])
def test_before_once(self):
def before_fn(*a):
test_functions.test_list.append(1)
with before(
'before_after.tests.test_functions.sample_fn',
before_fn, once=True):
test_functions.sample_fn(2)
test_functions.sample_fn(3)
self.assertEqual(test_functions.test_list, [1, 2, 3])
def test_after_once(self):
def after_fn(*a):
test_functions.test_list.append(2)
with after(
'before_after.tests.test_functions.sample_fn',
after_fn, once=True):
test_functions.sample_fn(1)
test_functions.sample_fn(3)
self.assertEqual(test_functions.test_list, [1, 2, 3])
def test_before_and_after_once(self):
def before_fn(*a):
test_functions.test_list.append(1)
def after_fn(*a):
test_functions.test_list.append(3)
with before_after(
'before_after.tests.test_functions.sample_fn',
before_fn=before_fn, after_fn=after_fn, once=True):
test_functions.sample_fn(2)
test_functions.sample_fn(4)
self.assertEqual(test_functions.test_list, [1, 2, 3, 4])
def test_before_method(self):
sample_instance = test_functions.Sample()
def before_fn(self, *a):
sample_instance.instance_list.append(1)
with before('before_after.tests.test_functions.Sample.method', before_fn):
sample_instance.method(2)
self.assertEqual(sample_instance.instance_list, [1, 2])
| c-oreills/before_after | before_after/tests/test_before_after.py | Python | gpl-2.0 | 2,890 | 0.000346 |
#! /usr/bin/env python
"""
Simulate DSR over a network of nodes.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-10-26 21:51:40 -0500 (Wed, 26 Oct 2011) $
* $LastChangedRevision: 5314 $
:author: Ketan Mandke <kmandke@mail.utexas.edu>
:copyright:
Copyright 2009-2011 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
from SimPy.Simulation import *
from scapy.all import *
from wins import *
from wins.ieee80211 import *
from copy import copy, deepcopy
from wins.backend import RNG_init
from wins.backend import *
from wins.mac import RBAR, ARF
from wins.net import DSR
from wins.traffic import Agent
import sys
from optparse import OptionParser
import numpy as np
import struct
import gc
import time
RNG_INIT = 1
EXIT_WITH_TRACE = 1
class Node(Element):
name = "node"
tracename = "NODE"
def __init__(self, **kwargs):
Element.__init__(self, **kwargs)
def configure(self, pos=None, # motion \
useshared=False, # arp \
cfocorrection=True, # phy \
usecsma=False, # mac \
rreqrate=None, datarate=None, # net \
dest=None, plen=None, delay=None, mode=None, # agent \
**kwargs):
cif = self.newchild('cif', Dot11NRadio)
phy = self.newchild('phy', Dot11NPHY, radio=cif, cfocorrection=cfocorrection)
mac = self.newchild('mac', DCF, usecsma=usecsma, phy=phy)
net = self.newchild('net', DSR, rreqrate=rreqrate, datarate=datarate)
arp = self.newchild('arp', ARP, useshared=useshared)
agt = self.newchild('agent', Agent, dest=dest, plen=plen, \
delay=delay, mode=mode)
mobi = self.newchild('motion', Motion, pos=pos)
# connect ports
agt.connect(net)
arp.connect(net, mac)
mac.connect(phy)
phy.connect(cif)
def read_topo(options, topofile):
"""Read topology layout from file."""
f = file(topofile, 'r')
s = f.readline()
topo = {'border':None, 'layout':None}
done = not (s)
while not done:
# convert s to dict (check for border and layout)
try:
d = eval(s)
assert isinstance(d, dict)
assert ('border' in d) and ('layout' in d)
except:
d = None
# add dict to topo
if d: topo.update(d)
# get next input
s = f.readline()
done = not(s)
f.close()
return topo
def read_route(options, routefile):
"""Read routing tables from file."""
f = file(routefile, 'r')
s = f.readline()
routedata = {}
done = not (s)
while not done:
# convert s to dict
try:
d = eval(s)
assert isinstance(d, dict)
for x,y in d.items():
# maps src x -> routing table y
assert isinstance(y, dict)
for a,b in y.items():
# maps dst a -> info b (for route table y)
assert ('index' in b)
assert ('list' in b)
except:
d = None
# add dict to routedata
if d: routedata.update(d)
# get next input
s = f.readline()
done = not(s)
f.close()
return routedata
def get_topology(options, numnodes):
"""Get/create topology."""
# load topology from file
if options.usetopo:
topofile = options.usetopo
topo = read_topo(options, topofile)
border = topo['border']
layout = topo['layout']
xmin, xmax, ymin, ymax = border[:4]
assert (len(layout)>=numnodes)
return topo
# create new topology
assert (options.xmin<=options.xmax)
assert (options.ymin<=options.ymax)
xmin, xmax = options.xmin, options.xmax
ymin, ymax = options.ymin, options.ymax
border = (xmin, xmax, ymin, ymax)
# use uniform distribution for layout
xpos = np.random.uniform(xmin, xmax, numnodes)
ypos = np.random.uniform(ymin, ymax, numnodes)
layout = [(xpos[k],ypos[k]) for k in range(numnodes)]
# verify layout parameters
assert (len(layout)>=numnodes)
topo = {'border':border, 'layout':layout}
return topo
def set_routing(options, nodelist):
"""Set routing tables if needed."""
if not options.useroute: return
routefile = options.useroute
rdata = read_route(options, routefile)
for n in nodelist:
addr = n.net.address
if addr not in rdata: continue
for dst, data in rdata[addr].items():
paths = data['list']
for c,ts,nh in paths:
n.net.addroute(dst, nexthop=nh, cost=c)
return rdata
def run_experiment(options):
# record start time
starttime = time.time()
# initialize RNG
if RNG_INIT: RNG_init()
# set SIMULATION parameters
mon = Element(tracename="MON")
verbose = options.verbose
stoptime = 2.0
if not (options.stop<0): stoptime = options.stop
stoptime *= 1.05 # allow events around stoptime to finish
simargs = {'verbose':verbose}
# set EXPERIMENT parameters
ntx, nrx = 1, 1
numnodes = options.numnodes
nconnect = options.nconnect
assert (nconnect>0)
assert (numnodes>=2*nconnect)
# set CHANNEL parameters
alpha = options.alpha
modeltype = options.tgnmodel # default -> LOS Channel
usedoppler = options.usedoppler
usefading = options.usefading
envspeed = options.envspeed
chargs = {'modeltype':modeltype, 'n':alpha, \
'usedoppler':usedoppler, 'usefading':usefading, \
'environmentspeed': envspeed}
chargs.update(simargs)
# set AGENT parameters
mode = options.agent_mode
plen = Agent.DefaultPacketLength
rate = options.rate # transmission rate in packets/second
delay = None
if mode is None: mode = "cbr"
if options.plen>0: plen = options.plen
if (rate>0): delay = 1.0/rate
# set agent delay if not already specified
if delay is None:
cm = Dot11NChannel(**chargs)
chan = Dot11N_Channel(cm.modelnum, nrx, ntx, cm.flags)
delay = 2*chan.coherencetime()
if rate is None: rate = 1.0/delay
agtargs = {'plen': plen, 'mode':mode, 'delay':delay}
# set DSR parameters
rreqrate, datarate = None, None
if 0<=options.rreqrate<8*ntx: rreqrate=options.rreqrate
if 0<=options.datarate<8*ntx: datarate=options.datarate
netargs = {'rreqrate':rreqrate, 'datarate':datarate}
# set other protocol parameters (MAC, ARP, etc.)
useshared = True
arpargs = {'useshared':useshared}
usecsma = False
macargs = {'usecsma':usecsma}
# set phy parameters
Dot11NPHY.usewaveform = options.usewaveform
Dot11NRadio.Ntx, Dot11NRadio.Nrx = ntx, nrx
Dot11NRadio.fomax = options.fomax
cfocorrection = True
if options.disable_cfo_correction: cfocorrection = False
phyargs = {'cfocorrection':cfocorrection}
# set node parameters
nodeargs = {}
nodeargs.update(agtargs)
nodeargs.update(netargs)
nodeargs.update(arpargs)
nodeargs.update(macargs)
nodeargs.update(phyargs)
nodeargs.update(simargs)
############################
# Set Up Simulation
############################
initialize()
# create channel
bidirectional = options.bidirectional
ch = Channel(model=Dot11NChannel, bidirectional=bidirectional, **simargs)
# get topology
topo = get_topology(options, numnodes)
border = topo['border']
layout = topo['layout']
# create nodes
nodelist = []
for k in range(numnodes):
pos = layout[k]
n = Node(pos=pos, **nodeargs)
nodelist.append(n)
n.motion.log("pos", pos=["%.3f"%(p) for p in n.motion.position] )
# connect source/destination pairs
assert (nconnect<len(nodelist))
for k in range(nconnect):
src = nodelist[k] # first N are sources
dst = nodelist[-k-1] # last N are destinations
src.agent.dest = dst.net.address
# set routing tables
set_routing(options, nodelist)
# connect all nodes via channel
for n in nodelist:
for m in nodelist:
if (n is not m):
ch.add_edge(n.cif, m.cif, **chargs)
# create monitor
if options.monitor:
mon = Monitor(period=stoptime/1e4)
mon.start()
############################
# Run Simulation
############################
if options.usetopo:
mon.log("topo", topofile=options.usetopo)
mon.log("model", **chargs)
mon.log("rate", rate="%.5g"%(rate) )
simerror = None
if EXIT_WITH_TRACE:
try:
simulate(until=stoptime)
except Exception, e:
mon.log("SIMERR", error=str(e))
simerror = e
else:
simulate(until=stoptime)
# log remaining trace information
mon.log("stoptime", stoptime="%.6f"%(stoptime))
n = gc.collect()
mon.log("GC", collected=n)
totaltime = time.time() - starttime
t = time.gmtime(totaltime)
mon.log("runtime", runtime="%02d:%02d:%02d (h/m/s)"%(t.tm_hour, t.tm_min, t.tm_sec) )
############################
# Teardown/Cleanup
############################
# print output
sys.stdout.flush()
if options.trace: ch.trace.output()
# write tracefile
if options.output is not None: ch.trace.write(options.output)
# write topofile
if options.savetopo:
f = file(options.savetopo, 'w')
f.write("%s\n"%(topo) )
f.close()
# write routefile
if options.saveroute:
# write data
f = file(options.saveroute, 'w')
for n in nodelist:
addr = n.net.address
rdata = {addr: n.net.table.data.copy()}
f.write("%s\n"%(rdata))
f.close()
# if Exception occurred during simulation ...
if simerror: raise simerror
def main():
usage = "%prog [OPTIONS]"
parser = OptionParser(usage=usage)
# simulation parameters
parser.add_option("-v", "--verbose", dest="verbose", type="int", \
default=ROUTING_VERBOSE+1, help="Set verbose level [default=%default].")
parser.add_option("-t", "--trace", dest="trace", action="store_true", \
default=False, help="Output formatted trace to stdout")
parser.add_option("-o", "--output", dest="output", \
default=None, help="Name of output file for trace")
parser.add_option("-s", "--stop", dest="stop", \
type="float", default=2.0, \
help="Run simulation until stop time [default=%default]")
parser.add_option("-m", "--monitor", dest="monitor", action="store_true", \
default=False, help="Enable simulation montior")
# experiment parameters
parser.add_option("-n", "--num-nodes", dest="numnodes", type="int", \
default=50, help="Set number of nodes [default=%default]")
parser.add_option("-c", "--num-connections", dest="nconnect", type="int", \
default=1, help="Set number of active connections [default=%default]")
# agent parameters
parser.add_option("-r", "--rate", dest="rate", type="float", \
default=None, help="Packets/second generated by a source [default=%default]")
parser.add_option("-l", "--packet-length", dest="plen", type="int", \
default=1024, help="Set packet size in bytes [default=%default]")
parser.add_option("", "--agent-mode", dest="agent_mode", \
default=None, help="Specify traffic mode [options=%s]."%(Agent.TrafficModes))
# net parameters
parser.add_option("", "--rreqrate", dest="rreqrate", type="int", \
default=None, help="Set rate index for RREQ in DSR [default=%default]")
parser.add_option("", "--datarate", dest="datarate", type="int", \
default=None, help="Set rate index for non-RREQ packets in DSR [default=%default]")
# mac parameters
# phy parameters
parser.add_option("", "--mcs", dest="mcs", type="int", \
default=0, help="Set rate index for MCS [default=%default]")
parser.add_option("", "--fomax", dest="fomax", \
type="float", default=0.0, \
help="Specify maximum frequency offset in ppm [default=%default]")
parser.add_option("", "--use-waveform", dest="usewaveform", action="store_true", \
default=False, help="Enable waveform-level simulation [default=%default]")
parser.add_option("", "--disable-cfo-correction", \
dest="disable_cfo_correction", action="store_true", \
default=False, help="Disable CFO correction in waveform-level simulation [default=%default]")
# channel parameters
parser.add_option("", "--tgn-model", dest="tgnmodel", \
default=None, help="Specify TGn model.")
parser.add_option("", "--alpha", dest="alpha", type="float", \
default=2.0, help="Specify pathloss exponent [default=%default]")
parser.add_option("", "--use-doppler", dest="usedoppler", action="store_true", \
default=False, help="Enable doppler filter for fading in TGn channel model.")
parser.add_option("", "--disable-fading", dest="usefading", action="store_false", \
default=True, help="Normalize channel and remove impact of fading on pathloss in TGn channel model.")
parser.add_option("-E", "--environment-speed", dest="envspeed", type="float", \
default=1.2, help="Environmental speed in (km/hr) [default=%default]")
parser.add_option("", "--bidirectional-channel", dest="bidirectional", action="store_true", \
default=False, help="Use bidirectional links in channel [default=%default]")
# topology/layout parameters
parser.add_option("", "--xmin", dest="xmin", type="float", \
default=0.0, help="Set x-axis left boundary [default=%default]")
parser.add_option("", "--xmax", dest="xmax", type="float", \
default=500.0, help="Set x-axis right boundary [default=%default]")
parser.add_option("", "--ymin", dest="ymin", type="float", \
default=0.0, help="Set y-axis lower boundary [default=%default]")
parser.add_option("", "--ymax", dest="ymax", type="float", \
default=500.0, help="Set y-axis upper boundary [default=%default]")
parser.add_option("", "--use-topo", dest="usetopo", \
default=None, help="Specify topology file instead of generating random topology.")
parser.add_option("", "--save-topo", dest="savetopo", \
default=None, help="Save topology to file.")
# routing parameters
parser.add_option("", "--use-route", dest="useroute", \
default=None, help="Specify routing file to initialize route tables.")
parser.add_option("", "--save-route", dest="saveroute", \
default=None, help="Save route tables to file.")
(options, args) = parser.parse_args()
if len(args)>0:
print "Invalid number of arguments."
parser.print_help()
raise SystemExit
run_experiment(options)
if __name__ == '__main__':
main()
| reidlindsay/wins | sandbox/experiments/dsr/icc/test.py | Python | apache-2.0 | 15,846 | 0.011612 |
# #
# Copyright 2012-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Support for Parastation MPI as toolchain MPI library.
:author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.mpi.mpich import Mpich
class Psmpi(Mpich):
"""Parastation MPI class"""
MPI_MODULE_NAME = ['psmpi']
def _set_mpi_compiler_variables(self):
"""Set the MPICH_{CC, CXX, F77, F90, FC} variables."""
# hardwire MPI wrapper commands (otherwise Mpich parent class sets them based on MPICH version)
self.MPI_COMPILER_MPIF77 = 'mpif77'
self.MPI_COMPILER_MPIF90 = 'mpif90'
self.MPI_COMPILER_MPIFC = 'mpif90'
super(Psmpi, self)._set_mpi_compiler_variables()
| gppezzi/easybuild-framework | easybuild/toolchains/mpi/psmpi.py | Python | gpl-2.0 | 1,693 | 0.001772 |
"""
Common test utilities for courseware functionality
"""
from abc import ABCMeta, abstractmethod
from datetime import datetime
import ddt
from mock import patch
from lms.djangoapps.courseware.url_helpers import get_redirect_url
from student.tests.factories import AdminFactory, UserFactory, CourseEnrollmentFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
@ddt.ddt
class RenderXBlockTestMixin(object):
"""
Mixin for testing the courseware.render_xblock function.
It can be used for testing any higher-level endpoint that calls this method.
"""
__metaclass__ = ABCMeta
# DOM elements that appear in the LMS Courseware,
# but are excluded from the xBlock-only rendering.
COURSEWARE_CHROME_HTML_ELEMENTS = [
'<header id="open_close_accordion"',
'<ol class="course-tabs"',
'<footer id="footer-openedx"',
'<div class="window-wrap"',
'<div class="preview-menu"',
]
# DOM elements that appear in an xBlock,
# but are excluded from the xBlock-only rendering.
XBLOCK_REMOVED_HTML_ELEMENTS = [
'<div class="wrap-instructor-info"',
]
@abstractmethod
def get_response(self):
"""
Abstract method to get the response from the endpoint that is being tested.
"""
pass # pragma: no cover
def login(self):
"""
Logs in the test user.
"""
self.client.login(username=self.user.username, password='test')
def setup_course(self, default_store=None):
"""
Helper method to create the course.
"""
if not default_store:
default_store = self.store.default_modulestore.get_modulestore_type()
with self.store.default_store(default_store):
self.course = CourseFactory.create() # pylint: disable=attribute-defined-outside-init
chapter = ItemFactory.create(parent=self.course, category='chapter')
self.html_block = ItemFactory.create( # pylint: disable=attribute-defined-outside-init
parent=chapter,
category='html',
data="<p>Test HTML Content<p>"
)
def setup_user(self, admin=False, enroll=False, login=False):
"""
Helper method to create the user.
"""
self.user = AdminFactory() if admin else UserFactory() # pylint: disable=attribute-defined-outside-init
if enroll:
CourseEnrollmentFactory(user=self.user, course_id=self.course.id)
if login:
self.login()
def verify_response(self, expected_response_code=200):
"""
Helper method that calls the endpoint, verifies the expected response code, and returns the response.
"""
response = self.get_response()
if expected_response_code == 200:
self.assertContains(response, self.html_block.data, status_code=expected_response_code)
for chrome_element in [self.COURSEWARE_CHROME_HTML_ELEMENTS + self.XBLOCK_REMOVED_HTML_ELEMENTS]:
self.assertNotContains(response, chrome_element)
else:
self.assertNotContains(response, self.html_block.data, status_code=expected_response_code)
return response
@ddt.data(
(ModuleStoreEnum.Type.mongo, 8),
(ModuleStoreEnum.Type.split, 5),
)
@ddt.unpack
def test_courseware_html(self, default_store, mongo_calls):
"""
To verify that the removal of courseware chrome elements is working,
we include this test here to make sure the chrome elements that should
be removed actually exist in the full courseware page.
If this test fails, it's probably because the HTML template for courseware
has changed and COURSEWARE_CHROME_HTML_ELEMENTS needs to be updated.
"""
with self.store.default_store(default_store):
self.setup_course(default_store)
self.setup_user(admin=True, enroll=True, login=True)
with check_mongo_calls(mongo_calls):
url = get_redirect_url(self.course.id, self.html_block.location)
response = self.client.get(url)
for chrome_element in self.COURSEWARE_CHROME_HTML_ELEMENTS:
self.assertContains(response, chrome_element)
@ddt.data(
(ModuleStoreEnum.Type.mongo, 5),
(ModuleStoreEnum.Type.split, 5),
)
@ddt.unpack
def test_success_enrolled_staff(self, default_store, mongo_calls):
with self.store.default_store(default_store):
self.setup_course(default_store)
self.setup_user(admin=True, enroll=True, login=True)
# The 5 mongoDB calls include calls for
# Old Mongo:
# (1) fill_in_run
# (2) get_course in get_course_with_access
# (3) get_item for HTML block in get_module_by_usage_id
# (4) get_parent when loading HTML block
# (5) edx_notes descriptor call to get_course
# Split:
# (1) course_index - bulk_operation call
# (2) structure - get_course_with_access
# (3) definition - get_course_with_access
# (4) definition - HTML block
# (5) definition - edx_notes decorator (original_get_html)
with check_mongo_calls(mongo_calls):
self.verify_response()
def test_success_unenrolled_staff(self):
self.setup_course()
self.setup_user(admin=True, enroll=False, login=True)
self.verify_response()
def test_success_enrolled_student(self):
self.setup_course()
self.setup_user(admin=False, enroll=True, login=True)
self.verify_response()
def test_fail_unauthenticated(self):
self.setup_course()
self.setup_user(admin=False, enroll=True, login=False)
self.verify_response(expected_response_code=302)
def test_unenrolled_student(self):
self.setup_course()
self.setup_user(admin=False, enroll=False, login=True)
self.verify_response(expected_response_code=302)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_fail_block_unreleased(self):
self.setup_course()
self.setup_user(admin=False, enroll=True, login=True)
self.html_block.start = datetime.max
modulestore().update_item(self.html_block, self.user.id) # pylint: disable=no-member
self.verify_response(expected_response_code=404)
def test_fail_block_nonvisible(self):
self.setup_course()
self.setup_user(admin=False, enroll=True, login=True)
self.html_block.visible_to_staff_only = True
modulestore().update_item(self.html_block, self.user.id) # pylint: disable=no-member
self.verify_response(expected_response_code=404)
| rhndg/openedx | lms/djangoapps/courseware/testutils.py | Python | agpl-3.0 | 7,021 | 0.002564 |
from uuid import uuid4
from Firefly import logging, scheduler
from Firefly.components.virtual_devices import AUTHOR
from Firefly.const import (COMMAND_UPDATE, DEVICE_TYPE_THERMOSTAT, LEVEL)
from Firefly.helpers.action import Command
from Firefly.helpers.device.device import Device
from Firefly.helpers.metadata.metadata import action_button_group, action_button_object, action_level, action_text
# TODO(zpriddy): Add more delayed setters to help with rate limits.
TITLE = 'Nest Thermostat'
DEVICE_TYPE = DEVICE_TYPE_THERMOSTAT
AUTHOR = AUTHOR
COMMANDS = [COMMAND_UPDATE, LEVEL, 'temperature', 'mode', 'away', 'home']
REQUESTS = ['temperature', 'humidity', 'mode', 'away', 'target', 'last_refresh']
INITIAL_VALUES = {
'_temperature': -1,
'_humidity': -1,
'_target': -1,
'_mode': 'unknown',
'_away': 'unknown',
'_last_refresh': -1,
}
MODE_LIST = ['off', 'eco', 'cool', 'heat', 'heat-cool']
def Setup(firefly, package, **kwargs):
logging.message('Entering %s setup' % TITLE)
thermostat = Thermostat(firefly, package, **kwargs)
firefly.install_component(thermostat)
refresh_command = Command('service_firebase', 'nest', 'refresh')
firefly.send_command(refresh_command)
return thermostat.id
class Thermostat(Device):
""" Nest Thermostat device.
"""
def __init__(self, firefly, package, **kwargs):
if kwargs.get('initial_values'):
INITIAL_VALUES.update(kwargs.get('initial_values'))
kwargs['initial_values'] = INITIAL_VALUES
super().__init__(firefly, package, TITLE, AUTHOR, COMMANDS, REQUESTS, DEVICE_TYPE, **kwargs)
self.__dict__.update(kwargs['initial_values'])
self.thermostat = kwargs.get('thermostat')
self.add_command(COMMAND_UPDATE, self.update_thermostat)
self.add_command('temperature', self.set_temperature)
self.add_command('mode', self.set_mode)
self.add_command('away', self.set_away)
self.add_command('home', self.set_home)
self.add_request('temperature', self.get_temperature)
self.add_request('target', self.get_target)
self.add_request('humidity', self.get_humidity)
self.add_request('mode', self.get_mode)
self.add_request('away', self.get_away)
self.add_request('last_refresh', self.get_last_refresh)
# self.add_action('temperature', metaSlider(min=50, max=90, request_param='target', set_command='temperature', command_param='temperature', title='Target Temperature'))
self.add_action('current_temperature', action_text(title='Current Temperature', context='Current temperature', request='temperature', primary=True))
# eco_button = metaButtonObject('Eco', 'mode', 'mode', 'eco')
# heat_button = metaButtonObject('Heat', 'mode', 'mode', 'heat')
# cool_button = metaButtonObject('Cool', 'mode', 'mode', 'cool')
# off_button = metaButtonObject('Off', 'mode', 'mode', 'off')
# TODO: Enable range when supported
# range_button = metaButtonObject('Range ', 'mode', 'mode', 'off')
# buttons = [eco_button, cool_button, heat_button, off_button]
# self.add_action('mode_buttons', metaButtons(title='AC Modes', buttons=buttons, request_val='mode', context='Change AC Mode'))
# Buttons for Home/Away
# home_button = metaButtonObject('Home', 'away', 'away', 'home')
# away_button = metaButtonObject('Away', 'away', 'away', 'away')
# self.add_action('home_away_buttons', metaButtons(title='Home Mode (nest)', buttons=[home_button, away_button], request_val='away', context='Set Nest to Home/Away'))
# New Buttons
eco_button = action_button_object('Eco', 'mode', 'mode', 'eco', 'eco')
heat_button = action_button_object('Heat', 'mode', 'mode', 'heat', 'heat')
cool_button = action_button_object('Cool', 'mode', 'mode', 'cool', 'cool')
off_button = action_button_object('Off', 'mode', 'mode', 'off', 'off')
self.add_action('mode_buttons', action_button_group(title='Set Mode', request='mode', buttons=[cool_button, heat_button, eco_button, off_button]))
home_button = action_button_object('Home', 'away', 'away', 'home', 'home')
away_button = action_button_object('Away', 'away', 'away', 'away', 'away')
self.add_action('home_away_buttons', action_button_group(title='Set Home/Away', request='away', buttons=[home_button, away_button]))
self.add_action('set_temperature', action_level(title='Set Temperature', command='temperature', command_prop='temperature', request='target', context='Set target temperature'))
self._alexa_export = False
self.timer_id = str(uuid4())
def get_temperature(self, **kwargs):
return self.temperature
def get_target(self, **kwargs):
return self.target
def get_humidity(self, **kwargs):
return self.humidity
def get_mode(self, **kwargs):
return self.mode
def get_away(self, **kwargs):
return self.away
def get_last_refresh(self, **kwargs):
return self._last_refresh
def set_away(self, **kwargs):
'''set_away will set to away by default, if 'away' is in kwargs it will set to the value of 'away'
'''
away = kwargs.get('away')
if away is None:
away = 'away'
if away not in ['away', 'home']:
return
self.away = away
def set_home(self, **kwargs):
self.away = 'home'
def set_temperature(self, **kwargs):
t = kwargs.get('temperature')
if t is None:
return
try:
t = int(t)
except:
return
self.temperature = t
def set_mode(self, **kwargs):
m = kwargs.get('mode')
if m is None:
logging.error('no mode provided')
return
m = m.lower()
if m not in MODE_LIST:
logging.error('Invalid Mode')
return
self.mode = m
def update_thermostat(self, **kwargs):
thermostat = kwargs.get('thermostat')
logging.info('[NEST] updating thermostat: %s' % str(thermostat))
self._last_refresh = self.firefly.location.now.timestamp()
if thermostat is not None:
self.thermostat = thermostat
@property
def temperature(self):
if self.thermostat:
self._temperature = self.thermostat.temperature
return self._temperature
@temperature.setter
def temperature(self, value):
if self.thermostat:
self._temperature = value
scheduler.runInS(5, self.set_temperature_delayed, job_id=self.timer_id, temperature=value)
else:
logging.error('thermostat not set yet')
def set_temperature_delayed(self, temperature=None):
"""Set the mode after a 5 second delay. This helps with rate limiting.
Args:
mode: mode to be set to.
"""
if temperature is not None:
try:
self.thermostat.temperature = temperature
except Exception as e:
logging.error('Error setting thermostat temperature: %s' % e)
@property
def humidity(self):
if self.thermostat:
self._humidity = self.thermostat.humidity
return self._humidity
@property
def mode(self):
if self.thermostat:
self._mode = self.thermostat.mode
return self._mode
@mode.setter
def mode(self, value):
if self.thermostat:
self._mode = value
scheduler.runInS(5, self.set_mode_delayed, job_id=self.timer_id, mode=value)
else:
logging.error('thermostat not set yet')
def set_mode_delayed(self, mode=None):
"""Set the mode after a 5 second delay. This helps with rate limiting.
Args:
mode: mode to be set to.
"""
if mode is not None:
try:
self.thermostat.mode = mode
except Exception as e:
logging.error('Error setting thermostat mode: %s' % e)
@property
def away(self):
if self.thermostat:
self._away = self.thermostat.structure.away
return self._away
@away.setter
def away(self, value):
if self.thermostat:
self.thermostat.structure.away = value
self._away = value
else:
logging.error('thermostat not set yet')
@property
def target(self):
if self.thermostat:
self._target = self.thermostat.target
return self._target
| Firefly-Automation/Firefly | Firefly/components/nest/thermostat.py | Python | apache-2.0 | 7,969 | 0.010415 |
from __future__ import absolute_import
import itertools
from time import time
from . import Errors
from . import DebugFlags
from . import Options
from .Visitor import CythonTransform
from .Errors import CompileError, InternalError, AbortError
from . import Naming
#
# Really small pipeline stages
#
def dumptree(t):
# For quick debugging in pipelines
print(t.dump())
return t
def abort_on_errors(node):
# Stop the pipeline if there are any errors.
if Errors.num_errors != 0:
raise AbortError("pipeline break")
return node
def parse_stage_factory(context):
def parse(compsrc):
source_desc = compsrc.source_desc
full_module_name = compsrc.full_module_name
initial_pos = (source_desc, 1, 0)
saved_cimport_from_pyx, Options.cimport_from_pyx = Options.cimport_from_pyx, False
scope = context.find_module(full_module_name, pos = initial_pos, need_pxd = 0)
Options.cimport_from_pyx = saved_cimport_from_pyx
tree = context.parse(source_desc, scope, pxd = 0, full_module_name = full_module_name)
tree.compilation_source = compsrc
tree.scope = scope
tree.is_pxd = False
return tree
return parse
def parse_pxd_stage_factory(context, scope, module_name):
def parse(source_desc):
tree = context.parse(source_desc, scope, pxd=True,
full_module_name=module_name)
tree.scope = scope
tree.is_pxd = True
return tree
return parse
def generate_pyx_code_stage_factory(options, result):
def generate_pyx_code_stage(module_node):
module_node.process_implementation(options, result)
result.compilation_source = module_node.compilation_source
return result
return generate_pyx_code_stage
def inject_pxd_code_stage_factory(context):
def inject_pxd_code_stage(module_node):
for name, (statlistnode, scope) in context.pxds.items():
module_node.merge_in(statlistnode, scope)
return module_node
return inject_pxd_code_stage
def use_utility_code_definitions(scope, target, seen=None):
if seen is None:
seen = set()
for entry in scope.entries.values():
if entry in seen:
continue
seen.add(entry)
if entry.used and entry.utility_code_definition:
target.use_utility_code(entry.utility_code_definition)
for required_utility in entry.utility_code_definition.requires:
target.use_utility_code(required_utility)
elif entry.as_module:
use_utility_code_definitions(entry.as_module, target, seen)
def inject_utility_code_stage_factory(context):
def inject_utility_code_stage(module_node):
use_utility_code_definitions(context.cython_scope, module_node.scope)
added = []
# Note: the list might be extended inside the loop (if some utility code
# pulls in other utility code, explicitly or implicitly)
for utilcode in module_node.scope.utility_code_list:
if utilcode in added: continue
added.append(utilcode)
if utilcode.requires:
for dep in utilcode.requires:
if not dep in added and not dep in module_node.scope.utility_code_list:
module_node.scope.utility_code_list.append(dep)
tree = utilcode.get_tree()
if tree:
module_node.merge_in(tree.body, tree.scope, merge_scope=True)
return module_node
return inject_utility_code_stage
class UseUtilityCodeDefinitions(CythonTransform):
# Temporary hack to use any utility code in nodes' "utility_code_definitions".
# This should be moved to the code generation phase of the relevant nodes once
# it is safe to generate CythonUtilityCode at code generation time.
def __call__(self, node):
self.scope = node.scope
return super(UseUtilityCodeDefinitions, self).__call__(node)
def process_entry(self, entry):
if entry:
for utility_code in (entry.utility_code, entry.utility_code_definition):
if utility_code:
self.scope.use_utility_code(utility_code)
def visit_AttributeNode(self, node):
self.process_entry(node.entry)
return node
def visit_NameNode(self, node):
self.process_entry(node.entry)
self.process_entry(node.type_entry)
return node
#
# Pipeline factories
#
def create_pipeline(context, mode, exclude_classes=()):
assert mode in ('pyx', 'py', 'pxd')
from .Visitor import PrintTree
from .ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse
from .ParseTreeTransforms import ForwardDeclareTypes, AnalyseDeclarationsTransform
from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes
from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform
from .ParseTreeTransforms import InterpretCompilerDirectives, TransformBuiltinMethods
from .ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform
from .ParseTreeTransforms import CalculateQualifiedNamesTransform
from .TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic
from .ParseTreeTransforms import AdjustDefByDirectives, AlignFunctionDefinitions
from .ParseTreeTransforms import RemoveUnreachableCode, GilCheck
from .FlowControl import ControlFlowAnalysis
from .AnalysedTreeTransforms import AutoTestDictTransform
from .AutoDocTransforms import EmbedSignature
from .Optimize import FlattenInListTransform, SwitchTransform, IterationTransform
from .Optimize import EarlyReplaceBuiltinCalls, OptimizeBuiltinCalls
from .Optimize import InlineDefNodeCalls
from .Optimize import ConstantFolding, FinalOptimizePhase
from .Optimize import DropRefcountingTransform
from .Optimize import ConsolidateOverflowCheck
from .Buffer import IntroduceBufferAuxiliaryVars
from .ModuleNode import check_c_declarations, check_c_declarations_pxd
if mode == 'pxd':
_check_c_declarations = check_c_declarations_pxd
_specific_post_parse = PxdPostParse(context)
else:
_check_c_declarations = check_c_declarations
_specific_post_parse = None
if mode == 'py':
_align_function_definitions = AlignFunctionDefinitions(context)
else:
_align_function_definitions = None
# NOTE: This is the "common" parts of the pipeline, which is also
# code in pxd files. So it will be run multiple times in a
# compilation stage.
stages = [
NormalizeTree(context),
PostParse(context),
_specific_post_parse,
InterpretCompilerDirectives(context, context.compiler_directives),
ParallelRangeTransform(context),
AdjustDefByDirectives(context),
WithTransform(context),
MarkClosureVisitor(context),
_align_function_definitions,
RemoveUnreachableCode(context),
ConstantFolding(),
FlattenInListTransform(),
DecoratorTransform(context),
ForwardDeclareTypes(context),
AnalyseDeclarationsTransform(context),
AutoTestDictTransform(context),
EmbedSignature(context),
EarlyReplaceBuiltinCalls(context), ## Necessary?
TransformBuiltinMethods(context),
MarkParallelAssignments(context),
ControlFlowAnalysis(context),
RemoveUnreachableCode(context),
# MarkParallelAssignments(context),
MarkOverflowingArithmetic(context),
IntroduceBufferAuxiliaryVars(context),
_check_c_declarations,
InlineDefNodeCalls(context),
AnalyseExpressionsTransform(context),
FindInvalidUseOfFusedTypes(context),
ExpandInplaceOperators(context),
IterationTransform(context),
SwitchTransform(context),
OptimizeBuiltinCalls(context), ## Necessary?
CreateClosureClasses(context), ## After all lookups and type inference
CalculateQualifiedNamesTransform(context),
ConsolidateOverflowCheck(context),
DropRefcountingTransform(),
FinalOptimizePhase(context),
GilCheck(),
UseUtilityCodeDefinitions(context),
]
filtered_stages = []
for s in stages:
if s.__class__ not in exclude_classes:
filtered_stages.append(s)
return filtered_stages
def create_pyx_pipeline(context, options, result, py=False, exclude_classes=()):
if py:
mode = 'py'
else:
mode = 'pyx'
test_support = []
if options.evaluate_tree_assertions:
from ..TestUtils import TreeAssertVisitor
test_support.append(TreeAssertVisitor())
if options.gdb_debug:
from ..Debugger import DebugWriter # requires Py2.5+
from .ParseTreeTransforms import DebugTransform
context.gdb_debug_outputwriter = DebugWriter.CythonDebugWriter(
options.output_dir)
debug_transform = [DebugTransform(context, options, result)]
else:
debug_transform = []
return list(itertools.chain(
[parse_stage_factory(context)],
create_pipeline(context, mode, exclude_classes=exclude_classes),
test_support,
[inject_pxd_code_stage_factory(context),
inject_utility_code_stage_factory(context),
abort_on_errors],
debug_transform,
[generate_pyx_code_stage_factory(options, result)]))
def create_pxd_pipeline(context, scope, module_name):
from .CodeGeneration import ExtractPxdCode
# The pxd pipeline ends up with a CCodeWriter containing the
# code of the pxd, as well as a pxd scope.
return [
parse_pxd_stage_factory(context, scope, module_name)
] + create_pipeline(context, 'pxd') + [
ExtractPxdCode()
]
def create_py_pipeline(context, options, result):
return create_pyx_pipeline(context, options, result, py=True)
def create_pyx_as_pxd_pipeline(context, result):
from .ParseTreeTransforms import AlignFunctionDefinitions, \
MarkClosureVisitor, WithTransform, AnalyseDeclarationsTransform
from .Optimize import ConstantFolding, FlattenInListTransform
from .Nodes import StatListNode
pipeline = []
pyx_pipeline = create_pyx_pipeline(context, context.options, result,
exclude_classes=[
AlignFunctionDefinitions,
MarkClosureVisitor,
ConstantFolding,
FlattenInListTransform,
WithTransform
])
for stage in pyx_pipeline:
pipeline.append(stage)
if isinstance(stage, AnalyseDeclarationsTransform):
# This is the last stage we need.
break
def fake_pxd(root):
for entry in root.scope.entries.values():
if not entry.in_cinclude:
entry.defined_in_pxd = 1
if entry.name == entry.cname and entry.visibility != 'extern':
# Always mangle non-extern cimported entries.
entry.cname = entry.scope.mangle(Naming.func_prefix, entry.name)
return StatListNode(root.pos, stats=[]), root.scope
pipeline.append(fake_pxd)
return pipeline
def insert_into_pipeline(pipeline, transform, before=None, after=None):
"""
Insert a new transform into the pipeline after or before an instance of
the given class. e.g.
pipeline = insert_into_pipeline(pipeline, transform,
after=AnalyseDeclarationsTransform)
"""
assert before or after
cls = before or after
for i, t in enumerate(pipeline):
if isinstance(t, cls):
break
if after:
i += 1
return pipeline[:i] + [transform] + pipeline[i:]
#
# Running a pipeline
#
def run_pipeline(pipeline, source, printtree=True):
from .Visitor import PrintTree
error = None
data = source
try:
try:
for phase in pipeline:
if phase is not None:
if DebugFlags.debug_verbose_pipeline:
t = time()
print("Entering pipeline phase %r" % phase)
if not printtree and isinstance(phase, PrintTree):
continue
data = phase(data)
if DebugFlags.debug_verbose_pipeline:
print(" %.3f seconds" % (time() - t))
except CompileError as err:
# err is set
Errors.report_error(err)
error = err
except InternalError as err:
# Only raise if there was not an earlier error
if Errors.num_errors == 0:
raise
error = err
except AbortError as err:
error = err
return (error, data)
| achernet/cython | Cython/Compiler/Pipeline.py | Python | apache-2.0 | 13,086 | 0.003897 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
QGIS Server HTTP wrapper for testing purposes
================================================================================
This script launches a QGIS Server listening on port 8081 or on the port
specified on the environment variable QGIS_SERVER_PORT.
Hostname is set by environment variable QGIS_SERVER_HOST (defaults to 127.0.0.1)
The server can be configured to support any of the following auth systems
(mutually exclusive):
* PKI
* HTTP Basic
* OAuth2 (requires python package oauthlib, installable with:
with "pip install oauthlib")
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
SECURITY WARNING:
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
This script was developed for testing purposes and was not meant to be secure,
please do not use in a production server any of the authentication systems
implemented here.
HTTPS
--------------------------------------------------------------------------------
HTTPS is automatically enabled for PKI and OAuth2
HTTP Basic
--------------------------------------------------------------------------------
A XYZ map service is also available for multithreading testing:
?MAP=/path/to/projects.qgs&SERVICE=XYZ&X=1&Y=0&Z=1&LAYERS=world
Note that multithreading in QGIS server is not officially supported and
it is not supposed to work in any case
Set MULTITHREADING environment variable to 1 to activate.
For testing purposes, HTTP Basic can be enabled by setting the following
environment variables:
* QGIS_SERVER_HTTP_BASIC_AUTH (default not set, set to anything to enable)
* QGIS_SERVER_USERNAME (default ="username")
* QGIS_SERVER_PASSWORD (default ="password")
PKI
--------------------------------------------------------------------------------
PKI authentication with HTTPS can be enabled with:
* QGIS_SERVER_PKI_CERTIFICATE (server certificate)
* QGIS_SERVER_PKI_KEY (server private key)
* QGIS_SERVER_PKI_AUTHORITY (root CA)
* QGIS_SERVER_PKI_USERNAME (valid username)
OAuth2 Resource Owner Grant Flow
--------------------------------------------------------------------------------
OAuth2 Resource Owner Grant Flow with HTTPS can be enabled with:
* QGIS_SERVER_OAUTH2_AUTHORITY (no default)
* QGIS_SERVER_OAUTH2_KEY (server private key)
* QGIS_SERVER_OAUTH2_CERTIFICATE (server certificate)
* QGIS_SERVER_OAUTH2_USERNAME (default ="username")
* QGIS_SERVER_OAUTH2_PASSWORD (default ="password")
* QGIS_SERVER_OAUTH2_TOKEN_EXPIRES_IN (default = 3600)
Available endpoints:
- /token (returns a new access_token),
optionally specify an expiration time in seconds with ?ttl=<int>
- /refresh (returns a new access_token from a refresh token),
optionally specify an expiration time in seconds with ?ttl=<int>
- /result (check the Bearer token and returns a short sentence if it validates)
Sample runs
--------------------------------------------------------------------------------
PKI:
QGIS_SERVER_PKI_USERNAME=Gerardus QGIS_SERVER_PORT=47547 QGIS_SERVER_HOST=localhost \
QGIS_SERVER_PKI_KEY=/home/$USER/dev/QGIS/tests/testdata/auth_system/certs_keys/localhost_ssl_key.pem \
QGIS_SERVER_PKI_CERTIFICATE=/home/$USER/dev/QGIS/tests/testdata/auth_system/certs_keys/localhost_ssl_cert.pem \
QGIS_SERVER_PKI_AUTHORITY=/home/$USER/dev/QGIS/tests/testdata/auth_system/certs_keys/chains_subissuer-issuer-root_issuer2-root2.pem \
python3 /home/$USER/dev/QGIS/tests/src/python/qgis_wrapped_server.py
OAuth2:
QGIS_SERVER_PORT=8443 \
QGIS_SERVER_HOST=127.0.0.1 \
QGIS_SERVER_OAUTH2_AUTHORITY=/home/$USER/dev/QGIS/tests/testdata/auth_system/certs_keys/chains_subissuer-issuer-root_issuer2-root2.pem \
QGIS_SERVER_OAUTH2_CERTIFICATE=/home/$USER/dev/QGIS/tests/testdata/auth_system/certs_keys/127_0_0_1_ssl_cert.pem \
QGIS_SERVER_OAUTH2_KEY=/home/$USER/dev/QGIS/tests/testdata/auth_system/certs_keys/127_0_0_1_ssl_key.pem \
python3 \
/home/$USER/dev/QGIS/tests/src/python/qgis_wrapped_server.py
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import copy
import os
import signal
import ssl
import sys
import urllib.parse
from http.server import BaseHTTPRequestHandler, HTTPServer
from qgis.core import QgsApplication
from qgis.server import (QgsBufferServerRequest, QgsBufferServerResponse,
QgsServer, QgsServerRequest)
__author__ = 'Alessandro Pasotti'
__date__ = '05/15/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# Needed on Qt 5 so that the serialization of XML is consistent among all
# executions
os.environ['QT_HASH_SEED'] = '1'
import sys
import signal
import ssl
import math
import copy
import urllib.parse
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
import threading
from qgis.core import QgsApplication, QgsCoordinateTransform, QgsCoordinateReferenceSystem
from qgis.server import QgsServer, QgsServerRequest, QgsBufferServerRequest, QgsBufferServerResponse, QgsServerFilter
QGIS_SERVER_PORT = int(os.environ.get('QGIS_SERVER_PORT', '8081'))
QGIS_SERVER_HOST = os.environ.get('QGIS_SERVER_HOST', '127.0.0.1')
# HTTP Basic
QGIS_SERVER_HTTP_BASIC_AUTH = os.environ.get(
'QGIS_SERVER_HTTP_BASIC_AUTH', False)
QGIS_SERVER_USERNAME = os.environ.get('QGIS_SERVER_USERNAME', 'username')
QGIS_SERVER_PASSWORD = os.environ.get('QGIS_SERVER_PASSWORD', 'password')
# PKI authentication
QGIS_SERVER_PKI_CERTIFICATE = os.environ.get('QGIS_SERVER_PKI_CERTIFICATE')
QGIS_SERVER_PKI_KEY = os.environ.get('QGIS_SERVER_PKI_KEY')
QGIS_SERVER_PKI_AUTHORITY = os.environ.get('QGIS_SERVER_PKI_AUTHORITY')
QGIS_SERVER_PKI_USERNAME = os.environ.get('QGIS_SERVER_PKI_USERNAME')
# OAuth2 authentication
QGIS_SERVER_OAUTH2_CERTIFICATE = os.environ.get(
'QGIS_SERVER_OAUTH2_CERTIFICATE')
QGIS_SERVER_OAUTH2_KEY = os.environ.get('QGIS_SERVER_OAUTH2_KEY')
QGIS_SERVER_OAUTH2_AUTHORITY = os.environ.get('QGIS_SERVER_OAUTH2_AUTHORITY')
QGIS_SERVER_OAUTH2_USERNAME = os.environ.get(
'QGIS_SERVER_OAUTH2_USERNAME', 'username')
QGIS_SERVER_OAUTH2_PASSWORD = os.environ.get(
'QGIS_SERVER_OAUTH2_PASSWORD', 'password')
QGIS_SERVER_OAUTH2_TOKEN_EXPIRES_IN = os.environ.get(
'QGIS_SERVER_OAUTH2_TOKEN_EXPIRES_IN', 3600)
# Check if PKI is enabled
QGIS_SERVER_PKI_AUTH = (
QGIS_SERVER_PKI_CERTIFICATE is not None and
os.path.isfile(QGIS_SERVER_PKI_CERTIFICATE) and
QGIS_SERVER_PKI_KEY is not None and
os.path.isfile(QGIS_SERVER_PKI_KEY) and
QGIS_SERVER_PKI_AUTHORITY is not None and
os.path.isfile(QGIS_SERVER_PKI_AUTHORITY) and
QGIS_SERVER_PKI_USERNAME)
# Check if OAuth2 is enabled
QGIS_SERVER_OAUTH2_AUTH = (
QGIS_SERVER_OAUTH2_CERTIFICATE is not None and
os.path.isfile(QGIS_SERVER_OAUTH2_CERTIFICATE) and
QGIS_SERVER_OAUTH2_KEY is not None and
os.path.isfile(QGIS_SERVER_OAUTH2_KEY) and
QGIS_SERVER_OAUTH2_AUTHORITY is not None and
os.path.isfile(QGIS_SERVER_OAUTH2_AUTHORITY) and
QGIS_SERVER_OAUTH2_USERNAME and QGIS_SERVER_OAUTH2_PASSWORD)
HTTPS_ENABLED = QGIS_SERVER_PKI_AUTH or QGIS_SERVER_OAUTH2_AUTH
qgs_app = QgsApplication([], False)
qgs_server = QgsServer()
if QGIS_SERVER_HTTP_BASIC_AUTH:
from qgis.server import QgsServerFilter
import base64
class HTTPBasicFilter(QgsServerFilter):
def requestReady(self):
handler = self.serverInterface().requestHandler()
auth = self.serverInterface().requestHandler().requestHeader('HTTP_AUTHORIZATION')
if auth:
username, password = base64.b64decode(auth[6:]).split(b':')
if (username.decode('utf-8') == os.environ.get('QGIS_SERVER_USERNAME', 'username') and
password.decode('utf-8') == os.environ.get('QGIS_SERVER_PASSWORD', 'password')):
return
handler.setParameter('SERVICE', 'ACCESS_DENIED')
def responseComplete(self):
handler = self.serverInterface().requestHandler()
auth = handler.requestHeader('HTTP_AUTHORIZATION')
if auth:
username, password = base64.b64decode(auth[6:]).split(b':')
if (username.decode('utf-8') == os.environ.get('QGIS_SERVER_USERNAME', 'username') and
password.decode('utf-8') == os.environ.get('QGIS_SERVER_PASSWORD', 'password')):
return
# No auth ...
handler.clear()
handler.setResponseHeader('Status', '401 Authorization required')
handler.setResponseHeader(
'WWW-Authenticate', 'Basic realm="QGIS Server"')
handler.appendBody(b'<h1>Authorization required</h1>')
filter = HTTPBasicFilter(qgs_server.serverInterface())
qgs_server.serverInterface().registerFilter(filter)
def num2deg(xtile, ytile, zoom):
"""This returns the NW-corner of the square. Use the function with xtile+1 and/or ytile+1
to get the other corners. With xtile+0.5 & ytile+0.5 it will return the center of the tile."""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
class XYZFilter(QgsServerFilter):
"""XYZ server, example: ?MAP=/path/to/projects.qgs&SERVICE=XYZ&X=1&Y=0&Z=1&LAYERS=world"""
def requestReady(self):
handler = self.serverInterface().requestHandler()
if handler.parameter('SERVICE') == 'XYZ':
x = int(handler.parameter('X'))
y = int(handler.parameter('Y'))
z = int(handler.parameter('Z'))
# NW corner
lat_deg, lon_deg = num2deg(x, y, z)
# SE corner
lat_deg2, lon_deg2 = num2deg(x + 1, y + 1, z)
handler.setParameter('SERVICE', 'WMS')
handler.setParameter('REQUEST', 'GetMap')
handler.setParameter('VERSION', '1.3.0')
handler.setParameter('SRS', 'EPSG:4326')
handler.setParameter('HEIGHT', '256')
handler.setParameter('WIDTH', '256')
handler.setParameter('BBOX', "{},{},{},{}".format(lat_deg2, lon_deg, lat_deg, lon_deg2))
xyzfilter = XYZFilter(qgs_server.serverInterface())
qgs_server.serverInterface().registerFilter(xyzfilter)
if QGIS_SERVER_OAUTH2_AUTH:
from qgis.server import QgsServerFilter
from oauthlib.oauth2 import RequestValidator, LegacyApplicationServer
import base64
from datetime import datetime
# Naive token storage implementation
_tokens = {}
class SimpleValidator(RequestValidator):
"""Validate username and password
Note: does not support scopes or client_id"""
def validate_client_id(self, client_id, request):
return True
def authenticate_client(self, request, *args, **kwargs):
"""Wide open"""
request.client = type("Client", (), {'client_id': 'my_id'})
return True
def validate_user(self, username, password, client, request, *args, **kwargs):
if username == QGIS_SERVER_OAUTH2_USERNAME and password == QGIS_SERVER_OAUTH2_PASSWORD:
return True
return False
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
# Clients should only be allowed to use one type of grant.
return grant_type in ('password', 'refresh_token')
def get_default_scopes(self, client_id, request, *args, **kwargs):
# Scopes a client will authorize for if none are supplied in the
# authorization request.
return ('my_scope', )
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""Wide open"""
return True
def save_bearer_token(self, token, request, *args, **kwargs):
# Remember to associate it with request.scopes, request.user and
# request.client. The two former will be set when you validate
# the authorization code. Don't forget to save both the
# access_token and the refresh_token and set expiration for the
# access_token to now + expires_in seconds.
_tokens[token['access_token']] = copy.copy(token)
_tokens[token['access_token']]['expiration'] = datetime.now(
).timestamp() + int(token['expires_in'])
def validate_bearer_token(self, token, scopes, request):
"""Check the token"""
return token in _tokens and _tokens[token]['expiration'] > datetime.now().timestamp()
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""Ensure the Bearer token is valid and authorized access to scopes."""
for t in _tokens.values():
if t['refresh_token'] == refresh_token:
return True
return False
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token."""
return []
validator = SimpleValidator()
oauth_server = LegacyApplicationServer(
validator, token_expires_in=QGIS_SERVER_OAUTH2_TOKEN_EXPIRES_IN)
class OAuth2Filter(QgsServerFilter):
"""This filter provides testing endpoint for OAuth2 Resource Owner Grant Flow
Available endpoints:
- /token (returns a new access_token),
optionally specify an expiration time in seconds with ?ttl=<int>
- /refresh (returns a new access_token from a refresh token),
optionally specify an expiration time in seconds with ?ttl=<int>
- /result (check the Bearer token and returns a short sentence if it validates)
"""
def responseComplete(self):
handler = self.serverInterface().requestHandler()
def _token(ttl):
"""Common code for new and refresh token"""
handler.clear()
body = bytes(handler.data()).decode('utf8')
old_expires_in = oauth_server.default_token_type.expires_in
# Hacky way to dynamically set token expiration time
oauth_server.default_token_type.expires_in = ttl
headers, payload, code = oauth_server.create_token_response(
'/token', 'post', body, {})
oauth_server.default_token_type.expires_in = old_expires_in
for k, v in headers.items():
handler.setResponseHeader(k, v)
handler.setStatusCode(code)
handler.appendBody(payload.encode('utf-8'))
# Token expiration
ttl = handler.parameterMap().get('TTL', QGIS_SERVER_OAUTH2_TOKEN_EXPIRES_IN)
# Issue a new token
if handler.url().find('/token') != -1:
_token(ttl)
return
# Refresh token
if handler.url().find('/refresh') != -1:
_token(ttl)
return
# Check for valid token
auth = handler.requestHeader('HTTP_AUTHORIZATION')
if auth:
result, response = oauth_server.verify_request(
urllib.parse.quote_plus(handler.url(), safe='/:?=&'), 'post', '', {'Authorization': auth})
if result:
# This is a test endpoint for OAuth2, it requires a valid
# token
if handler.url().find('/result') != -1:
handler.clear()
handler.appendBody(b'Valid Token: enjoy OAuth2')
# Standard flow
return
else:
# Wrong token, default response 401
pass
# No auth ...
handler.clear()
handler.setStatusCode(401)
handler.setResponseHeader('Status', '401 Unauthorized')
handler.setResponseHeader(
'WWW-Authenticate', 'Bearer realm="QGIS Server"')
handler.appendBody(b'Invalid Token: Authorization required.')
filter = OAuth2Filter(qgs_server.serverInterface())
qgs_server.serverInterface().registerFilter(filter)
class Handler(BaseHTTPRequestHandler):
def do_GET(self, post_body=None):
# CGI vars:
headers = {}
for k, v in self.headers.items():
headers['HTTP_%s' % k.replace(' ', '-').replace('-', '_').replace(' ', '-').upper()] = v
if not self.path.startswith('http'):
self.path = "%s://%s:%s%s" % ('https' if HTTPS_ENABLED else 'http', QGIS_SERVER_HOST, self.server.server_port, self.path)
request = QgsBufferServerRequest(
self.path, (QgsServerRequest.PostMethod if post_body is not None else QgsServerRequest.GetMethod), headers, post_body)
response = QgsBufferServerResponse()
qgs_server.handleRequest(request, response)
headers_dict = response.headers()
try:
self.send_response(int(headers_dict['Status'].split(' ')[0]))
except:
self.send_response(200)
for k, v in headers_dict.items():
self.send_header(k, v)
self.end_headers()
self.wfile.write(response.body())
return
def do_POST(self):
content_len = int(self.headers.get('content-length', 0))
post_body = self.rfile.read(content_len)
return self.do_GET(post_body)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
pass
if __name__ == '__main__':
if os.environ.get('MULTITHREADING') == '1':
server = ThreadedHTTPServer((QGIS_SERVER_HOST, QGIS_SERVER_PORT), Handler)
else:
server = HTTPServer((QGIS_SERVER_HOST, QGIS_SERVER_PORT), Handler)
# HTTPS is enabled if any of PKI or OAuth2 are enabled too
if HTTPS_ENABLED:
if QGIS_SERVER_OAUTH2_AUTH:
server.socket = ssl.wrap_socket(
server.socket,
certfile=QGIS_SERVER_OAUTH2_CERTIFICATE,
ca_certs=QGIS_SERVER_OAUTH2_AUTHORITY,
keyfile=QGIS_SERVER_OAUTH2_KEY,
server_side=True,
# cert_reqs=ssl.CERT_REQUIRED, # No certs for OAuth2
ssl_version=ssl.PROTOCOL_TLSv1)
else:
server.socket = ssl.wrap_socket(
server.socket,
certfile=QGIS_SERVER_PKI_CERTIFICATE,
keyfile=QGIS_SERVER_PKI_KEY,
ca_certs=QGIS_SERVER_PKI_AUTHORITY,
cert_reqs=ssl.CERT_REQUIRED,
server_side=True,
ssl_version=ssl.PROTOCOL_TLSv1)
print('Starting server on %s://%s:%s, use <Ctrl-C> to stop' %
('https' if HTTPS_ENABLED else 'http', QGIS_SERVER_HOST, server.server_port), flush=True)
def signal_handler(signal, frame):
global qgs_app
print("\nExiting QGIS...")
qgs_app.exitQgis()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
server.serve_forever()
| minorua/QGIS | tests/src/python/qgis_wrapped_server.py | Python | gpl-2.0 | 19,452 | 0.00257 |
# -*- coding: utf-8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
__author__ = "Ole Christian Weidner"
__copyright__ = "Copyright 2012, Ole Christian Weidner"
__license__ = "MIT"
import sys, time, uuid
import getpass
import bliss.saga as saga
def run(remote_base_url, local_file_to_copy):
"""Test if we can lists a (remote) directory
"""
try:
failed = False
tmpdirname = "sagaproj-%s" % uuid.uuid1()
remote_tdir = saga.filesystem.Directory(remote_base_url)
remote_tdir.make_dir(tmpdirname)
print "Size: %s" %str(remote_tdir.get_size())
mylocalfile = saga.filesystem.File("sftp://localhost/%s" % local_file_to_copy)
print "File Size: %s" %str(mylocalfile.get_size())
mylocalfile.copy("%s/%s/" % (remote_base_url, tmpdirname))
mylocalfile.copy("%s/%s/bh-copy" % (remote_base_url, tmpdirname))
remote_tdir = saga.filesystem.Directory("%s/%s/" % (remote_base_url, tmpdirname))
remote_tdir.make_dir("A")
remote_tdir.make_dir("B")
print remote_tdir.list()
print "Size: %s" %str(remote_tdir.get_size())
remote_tdir.remove("A")
remote_tdir.remove("B")
print remote_tdir.list()
print "Size: %s" %str(remote_tdir.get_size())
remote_tdir.remove()
remote_tdir.close()
except saga.Exception, ex:
failed = True
why = str(ex)
if failed == True:
print ""
print "============================================"
print "File / directory tests seems to have FAILED!"
print "============================================"
print " "
print "%s" % (why)
print "Please run this test again with SAGA_VERBOSE=5 "
print "and report the results at: "
print ""
print "https://github.com/saga-project/bliss/issues\n"
else:
print ""
print "============================================"
print "File / directory tests have passed!"
print "============================================"
print " "
return failed
def usage():
print 'Usage: python %s ' % __file__
print ' <REMOTEURL> (e.g., sftp://oweidner@qb.loni.org)'
print ' <LOCAL_FILE_TO_COPY>'
def main():
remoteusername = getpass.getuser()
args = sys.argv[1:]
if len(args) != 2:
usage()
sys.exit(-1)
else:
remoteurl = args[0]
local_file_to_copy = args[1]
return run(remoteurl, local_file_to_copy)
if __name__ == '__main__':
sys.exit(main())
| saga-project/bliss | test/compliance/file/03_copy_local_remote_etc.py | Python | mit | 2,709 | 0.014766 |
#! /usr/bin/env python
import numpy as np
def writeDataToFile(filename, data,
fieldNames=[],
constantsNames=[],
constantsValues=[],
appendFile=False,
addTimeField=False,
dataFormat='%10.5f'):
commentsStr = '#! '
delimiterStr =' '
if(addTimeField):
time = np.arange(1, len(data[0])+1, dtype=np.float64)
data.insert(0,time)
if len(fieldNames)==len(data)-1:
fieldNames.insert(0,'time')
fieldsStr = ''
if len(fieldNames)==len(data):
fieldsStr += 'FIELDS '
for f in fieldNames: fieldsStr += f + ' '
for i in range(len(constantsNames)):
str1 = '\nSET {0:} {1:'+dataFormat[1:]+'}'
fieldsStr += str1.format(constantsNames[i],constantsValues[i])
data2 = np.column_stack(data)
if appendFile:
file = open(filename,'a')
else:
file = open(filename,'w')
np.savetxt(file, data2 , header=fieldsStr, delimiter=delimiterStr, fmt=dataFormat, comments=commentsStr)
file.close()
#-------------------------------------------------------------------------------
def getCommentsFromFile(filename,findString=''):
# assume that the comments are in the first 100 lines
MaxLines = 100
commentsPrefix='#'
comments = []
file = open(filename,'r')
for i in range(MaxLines):
line = file.readline()
if line[0:1]==commentsPrefix and line.find(findString) != -1:
comments.append(line)
file.close()
return comments
#-------------------------------------------------------------------------------
def getFieldNames(filename):
fieldsLine = getCommentsFromFile(filename,findString='FIELDS')[0]
fields = fieldsLine.split()[2:]
return fields
#-------------------------------------------------------------------------------
def getDataFromFile(filename,ignoreFieldNames=False):
if ignoreFieldNames:
fields = []
else:
fields = getFieldNames(filename)
data = np.loadtxt(filename)
numColumn = data.shape[1]
data = np.hsplit(data,numColumn)
for i in range(numColumn):
data[i] = data[i].reshape( (data[i].size,) )
return (data, fields)
#-------------------------------------------------------------------------------
def calculateAutocorrelation(data):
# assert len(data.shape) == 1
mean = np.mean(data)
NumSamples = data.size
autocorr = np.zeros(NumSamples)
data = data-mean
for i in range(NumSamples):
sum = 0.0
for k in range(NumSamples-i):
sum += data[k]*data[k+i]
autocorr[i] = sum/np.float(NumSamples-i)
autocorr = autocorr/autocorr[0]
return autocorr
#-------------------------------------------------------------------------------
def getCorrelationTime(autocorr):
NumSamples = autocorr.size
integrated_autocorr = np.zeros(NumSamples)
sum = 0.0
for i in range(NumSamples):
sum += 2.0*autocorr[i]
integrated_autocorr[i] = 1.0 + sum/np.float(i+1)
return integrated_autocorr
#-------------------------------------------------------------------------------
| valsson/MD-MC-Codes-2016 | LJ7-2D_MD-sampling/DataTools.py | Python | mit | 3,140 | 0.014013 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-22 21:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('statusboard', '0015_merge_20170222_2058'),
]
operations = [
migrations.AddField(
model_name='service',
name='position',
field=models.PositiveIntegerField(default=0),
),
]
| edigiacomo/django-statusboard | statusboard/migrations/0016_service_position.py | Python | gpl-2.0 | 464 | 0 |
# -*- encoding: utf-8 -*-
from dateutil.relativedelta import relativedelta
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.utils import timezone
from reversion import revisions as reversion
from base.model_utils import TimeStampedModel
from base.singleton import SingletonModel
def default_permission():
return Permission.objects.get(slug=Permission.PUBLIC).pk
class BookingSettings(SingletonModel):
display_categories = models.BooleanField(
default=False,
help_text=("Does this project use 'Categories'?")
)
display_permissions = models.BooleanField(
default=False,
help_text="Display permissions on the list of bookings."
)
display_locations = models.BooleanField(
default=False,
help_text=("Does this project use 'Locations'?")
)
display_rota = models.BooleanField(
default=False,
help_text=("Does this project use 'Rotas'?")
)
notes_user_staff = models.BooleanField(
default=False,
help_text=(
"Allow a member of staff to edit notes for logged "
"in users (and members of staff)"
)
)
pdf_heading = models.CharField(max_length=200, blank=True)
class Meta:
verbose_name = 'Booking settings'
def __str__(self):
return "Booking settings (permissions: {}, notes: {})".format(
self.display_permissions,
self.notes_user_staff,
)
@property
def edit_from_detail(self):
"""Do we edit events from the detail page?"""
return self.notes_user_staff or self.display_rota
reversion.register(BookingSettings)
class CategoryManager(models.Manager):
def create_category(self, description):
category = self.model(
description=description,
)
category.save()
return category
class Category(TimeStampedModel):
description = models.CharField(max_length=200)
promote = models.BooleanField(default=False)
routine = models.BooleanField(default=True)
objects = CategoryManager()
class Meta:
ordering = ('description',)
verbose_name = 'Event type'
verbose_name_plural = 'Event types'
def __str__(self):
return '{}'.format(self.description)
reversion.register(Category)
class LocationManager(models.Manager):
def create_location(self, title):
location = self.model(
title=title,
)
location.save()
return location
class Location(TimeStampedModel):
title = models.CharField(max_length=200)
address = models.TextField(blank=True)
url = models.URLField(blank=True, null=True)
url_map = models.URLField(blank=True, null=True)
description = models.TextField(blank=True)
picture = models.ImageField(upload_to='booking', blank=True)
objects = LocationManager()
class Meta:
ordering = ('title',)
verbose_name = 'Location'
verbose_name_plural = 'Locations'
def __str__(self):
return '{}'.format(self.title)
reversion.register(Location)
class PermissionManager(models.Manager):
def create_permission(self, slug, description):
permission = self.model(
slug=slug,
description=description,
)
permission.save()
return permission
def init_permission(self, slug, description):
try:
permission = self.model.objects.get(slug=slug)
permission.description = description
permission.save()
except self.model.DoesNotExist:
permission = self.create_permission(slug, description)
return permission
class Permission(TimeStampedModel):
PUBLIC = 'public'
STAFF = 'staff'
USER = 'user'
slug = models.SlugField(unique=True)
description = models.CharField(max_length=200)
objects = PermissionManager()
class Meta:
ordering = ('slug',)
verbose_name = 'Permission'
verbose_name_plural = 'Permissions'
def __str__(self):
return '{}'.format(self.description)
reversion.register(Permission)
class BookingManager(models.Manager):
def _current(self):
"""Return all current bookings."""
return self.model.objects.exclude(deleted=True)
def _eight_months(self):
today = timezone.now().date()
return today + relativedelta(months=8)
def _filter_by_date(self, qs, start_date, end_date):
"""
Filter booking objects which are in this date ranage.
If the start date or end date are in the range, then include them.
"""
return qs.filter(
(Q(start_date__gte=start_date) & Q(start_date__lte=end_date))
|
(Q(end_date__lte=end_date) & Q(end_date__gte=start_date))
)
def _filter_by_month(self, qs, month, year):
"""
Find booking objects which are in the month.
If the start date or end date are in the month, then include them.
"""
return qs.filter(
(Q(start_date__month=month) & Q(start_date__year=year))
|
(Q(end_date__month=month) & Q(end_date__year=year))
)
def _two_months(self):
today = timezone.now().date()
return today + relativedelta(months=2)
def _public(self):
return self._current().filter(
permission__slug=Permission.PUBLIC,
#status__publish=True,
)
def _public_calendar(self):
return self._filter_by_date(
self._public(), timezone.now().date(), self._two_months()
)
def _public_month(self, month, year):
"""Public bookings for this month."""
return self._filter_by_month(self._public(), month, year)
def _staff_calendar(self):
return self._filter_by_date(
self._current(), timezone.now().date(), self._two_months()
)
def _staff_month(self, month, year):
return self._filter_by_month(self._current(), month, year)
def _user(self):
return self._current().filter(
permission__slug__in=(Permission.PUBLIC, Permission.USER),
)
def _user_calendar(self):
return self._filter_by_date(
self._user(), timezone.now().date(), self._two_months()
)
def _user_month(self, month, year):
return self._filter_by_month(self._user(), month, year)
def calendar(self, user):
if user.is_staff:
result = self._staff_calendar()
elif user.is_authenticated():
result = self._user_calendar()
else:
result = self._public_calendar()
return result
def month(self, user, month, year):
if user.is_staff:
result = self._staff_month(month, year)
elif user.is_authenticated():
result = self._user_month(month, year)
else:
result = self._public_month(month, year)
return result
def public_calendar_widget(self, start_date, end_date):
return self._filter_by_date(self._public(), start_date,end_date)
def public_promoted(self):
return self._public().filter(
start_date__gt=self._two_months(),
start_date__lte=self._eight_months(),
category__promote=True,
)
class Booking(TimeStampedModel):
permission= models.ForeignKey(Permission, default=default_permission)
category = models.ForeignKey(Category, blank=True, null=True)
title = models.CharField(max_length=200, blank=True)
start_date = models.DateField(help_text='(dd/mm/yyyy)')
start_time = models.TimeField(
blank=True, null=True,
help_text="Please enter in 24 hour format e.g. 19:00",
)
end_date = models.DateField(
blank=True, null=True,
help_text='(dd/mm/yyyy)'
)
end_time = models.TimeField(
blank=True, null=True,
help_text="Please enter in 24 hour format e.g. 21:00",
)
location = models.ForeignKey(Location, blank=True, null=True)
description = models.TextField(blank=True)
picture = models.ImageField(upload_to='booking', blank=True)
notes_user = models.TextField(
blank=True,
help_text="Notes for your users who are logged into the site.",
)
notes_staff = models.TextField(
blank=True,
help_text="Notes for members of staff.",
)
deleted = models.BooleanField(default=False)
objects = BookingManager()
class Meta:
ordering = ('start_date', 'start_time')
verbose_name = 'Booking'
verbose_name_plural = 'Bookings'
def __str__(self):
end = ''
if self.end_date:
end = '-{}'.format(self.end_date.strftime("%a %d %b %Y"))
return '{}{}: {}'.format(
self.start_date.strftime("%a %d %b %Y"), end, self.title)
def _is_in_the_past(self):
return self.end_date and self.end_date < timezone.now().date()
def clean(self):
if self.end_date:
if self.start_date > self.end_date:
raise ValidationError(
'A booking cannot end before it has started.'
)
if self.start_date == self.end_date:
raise ValidationError(
'A booking cannot start and end on the same day.'
)
if self._is_in_the_past():
raise ValidationError(
'You cannot make a booking in the past.'
)
def is_current(self):
return not self._is_in_the_past()
def rota(self):
return self.rota_set.exclude(deleted=True)
reversion.register(Booking)
class RotaType(TimeStampedModel):
name = models.CharField(max_length=200)
order = models.IntegerField()
class Meta:
ordering = ('order',)
verbose_name = 'Rota type'
verbose_name_plural = 'Rota types'
def __str__(self):
return '{}'.format(self.name)
reversion.register(RotaType)
class Rota(TimeStampedModel):
booking = models.ForeignKey(Booking)
rota = models.ForeignKey(RotaType)
name = models.CharField(max_length=200)
deleted = models.BooleanField(default=False)
class Meta:
ordering = ('booking', 'rota__order', 'name')
verbose_name = 'Rota'
verbose_name_plural = 'Rotas'
def __str__(self):
return '{}'.format(self.booking, self.rota.name, self.name)
reversion.register(Rota)
| pkimber/booking | booking/models.py | Python | apache-2.0 | 10,565 | 0.000947 |
# -*- coding: utf-8 -*-
#
# This file is part of CERN Document Server.
# Copyright (C) 2016 CERN.
#
# CERN Document Server is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Document Server is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Document Server; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Python wrappers for the ffmpeg command-line utility."""
from __future__ import absolute_import
from subprocess import check_output
import pexpect
def ff_probe(input_filename, field):
"""Retrieve requested field from the output of ffprobe.
**OPTIONS**
* *-v error* show all errors
* *-select_streams v:0* select only video stream
* *-show_entries stream=<field>* show only requested field
* *-of default=noprint_wrappers=1:nokey=1* extract only values
"""
return check_output([
'ffprobe', '-v', 'error',
'-select_streams', 'v:0',
'-show_entries', 'stream={}'.format(field),
'-of', 'default=noprint_wrappers=1:nokey=1',
'{}'.format(input_filename)
]).rstrip()
def ff_probe_all(input_filename):
"""Retrieve all video metadata from the output of ffprobe.
**OPTIONS**
* *-v error* show all errors
* *-show_format -print_format json* output in JSON format
* *-show_streams -select_streams v:0* show information for video streams
"""
return check_output([
'ffprobe', '-v', 'error',
'-show_format', '-print_format', 'json',
'-show_streams', '-select_streams', 'v:0',
'{}'.format(input_filename)
]).decode('utf-8')
def ff_frames(input_file, start, end, step, output, progress_callback=None):
"""Extract requested frames from video.
:param input_file:
:param start: percentage of the video to begin extracting frames.
:param end: percentage of the video to stop extracting frames.
:param step: percentage between of the video between frames.
:param output: output folder and format for the file names as in ``ffmpeg``,
i.e /path/to/somewhere/frames-%d.jpg
:param progress_callback: function taking as first parameter the number of seconds
processed and as second parameter the total duration of the video.
"""
duration = float(ff_probe(input_file, 'duration'))
# Calculate time step
start_time = (duration * start / 100)
end_time = (duration * end / 100)
time_step = duration * step / 100
cmd = 'ffmpeg -i {0} -ss {1} -to {2} -vf fps=1/{3} {4}'.format(
input_file, start_time, end_time, time_step, output
)
thread = pexpect.spawn(cmd)
regex = thread.compile_pattern_list(
[pexpect.EOF, 'time=(\d\d:\d\d:\d\d).\d\d']
)
while True:
index = thread.expect_list(regex, timeout=None)
if index == 0:
break
elif progress_callback:
progress_callback(sum(
int(amount) * 60 ** power for power, amount in
enumerate(reversed(thread.match.group(1).split(b':')))
), duration)
| drjova/cds-demosite | cds/modules/ffmpeg/ffmpeg.py | Python | gpl-2.0 | 3,735 | 0.002677 |
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import views as auth_views
from django.contrib.auth import logout
from django.views.decorators.csrf import csrf_protect
from django.shortcuts import render_to_response
from django.template import RequestContext
from .forms import LoginForm, RegistrationForm, SearchForm, ParentalMetricScoreModelForm, StudentInfoModelForm
from .models import ParentalMetric, LanguageDisability, MathematicalDisability
from django.contrib.auth.models import User
from django.urls import reverse
from django.shortcuts import redirect
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import PasswordChangeForm
from django.views.generic import FormView
from django.contrib.auth.mixins import LoginRequiredMixin
from .multiforms import MultiFormsView
from django.contrib.admin.views.decorators import staff_member_required
from .log import Logger
logger = Logger().function_logger('views')
# def login(request):
# # if this is a POST request we need to process the form data
# if request.method == 'POST':
# # create a form instance and populate it with data from the request:
# login_form = LoginForm(request.POST)
# signup_form = SignUpForm(request.POST)
# # check whether it's valid:
# if login_form.is_valid():
# # process the data in form.cleaned_data as required
# # ...
# # redirect to a new URL:
# return HttpResponseRedirect('/loggedin/')
# # check whether it's valid:
# if signup_form.is_valid():
# # process the data in form.cleaned_data as required
# if signup_form.cleaned_data['password'] == signup_form.cleaned_data['confirm_password']:
# # coolz
# # redirect to a new URL:
# return HttpResponseRedirect('/registered/')
# else:
# error_message = "Passwords don't match"
# return HttpResponseRedirect('/login/', args=(error_message, ))
#
# # if a GET (or any other method) we'll create a blank form
# else:
# login_form = LoginForm()
# signup_form = SignUpForm()
#
# return render(request, 'home.html', {'login_form': login_form, 'signup_form': signup_form})
class HomeView(MultiFormsView, LoginRequiredMixin):
login_url = "login/"
template_name = 'user_profile.html'
form_classes = {'student': StudentInfoModelForm,
'search': SearchForm}
success_url = '/'
# def get_student_initial(self):
# return {'email': 'dave@dave.com'}
#
# def get_search_initial(self):
# return {'email': 'dave@dave.com'}
# def get_context_data(self, **kwargs):
# context = super(HomeView, self).get_context_data(**kwargs)
# context.update({"some_context_value": 'blah blah blah',
# "some_other_context_value": 'blah'})
# return context
def student_form_valid(self, form):
logger.info("in student valid")
pmmodels = []
for metric in ParentalMetric.objects.all():
logger.info("in for loop")
pmform = ParentalMetricScoreModelForm(self.request.POST, prefix=metric.metric_name)
if pmform.is_valid():
logger.info("inside if statement")
pmmodel = pmform.save(commit=False)
pmmodel.metric_type = metric
pmmodels.append(pmmodel)
else:
logger.info("in else statement")
return False
student = form.save()
for pmmodel in pmmodels:
logger.info("in second for loop")
pmmodel.student = student
pmmodel.save()
logger.info("post save")
return HttpResponseRedirect(self.success_url)
# return form.login(self.request, redirect_url=self.get_success_url())
def search_form_valid(self, form):
return HttpResponseRedirect(self.success_url)
# user = form.save(self.request)
# return form.signup(self.request, user, self.get_success_url())
def get_context_data(self, **kwargs):
logger.info("in context data")
context = super(HomeView, self).get_context_data(**kwargs)
# Parental metric
metric_dict = {}
for metric in ParentalMetric.objects.all():
pmform = ParentalMetricScoreModelForm(prefix=metric.metric_name)
metric_dict.update({
metric.metric_name: pmform
})
context.update({
'metrics': metric_dict
})
return context
def forms_invalid(self, forms):
logger.info("in forms invalid")
ret = super(HomeView, self).forms_invalid(forms)
return ret
def forms_valid(self, forms, form_name):
logger.info("in forms valid")
ret = super(HomeView, self).forms_valid(forms, form_name)
return ret
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
user = User.objects.create_user(
username=form.cleaned_data['username'],
password=form.cleaned_data['password'],
email=form.cleaned_data['email'],
first_name=form.cleaned_data['first_name'],
last_name=form.cleaned_data['last_name']
)
# message = 'Registered successfully'
return redirect('home')
else:
form = RegistrationForm()
return render(request, 'home.html', {'form':LoginForm(),'signup_form':form})
# def register_success(request):
# return render( request, 'home.html', {'message': 'Registered successfully'} )
# def logout_page(request):
# logout(request)
# return HttpResponseRedirect('/logout')
class LoginSignupView(auth_views.LoginView):
def get_context_data(self, **kwargs):
context = super(LoginSignupView, self).get_context_data(**kwargs)
context.update({
'signup_form': RegistrationForm(),
'message':''
})
return context
# def change_password(request):
# if request.method == 'POST':
# form = PasswordChangeForm(request.user, request.POST)
# if form.is_valid():
# user = form.save()
# update_session_auth_hash(request, user) # Important!
# messages.success(request, 'Your password was successfully updated!')
# return redirect('login:change_password')
# else:
# messages.error(request, 'Please correct the error below.')
# else:
# form = PasswordChangeForm(request.user)
# response = render(request, 'password_change.html', {
# 'form': form
# })
# response.set_cookie('password_changed', 'true')
# return response
class CustomPasswordChangeView(auth_views.PasswordChangeView):
def dispatch(self, request, *args, **kwargs):
response = super(CustomPasswordChangeView, self).dispatch(request, *args, **kwargs)
response.set_cookie('password_changed', 'true')
return response
class CustomPasswordChangeDoneView(auth_views.PasswordChangeDoneView):
def dispatch(self, request, *args, **kwargs):
if 'password_changed' in request.COOKIES:
response = super().dispatch(request, *args, **kwargs)
response.delete_cookie('password_changed')
return response
else:
return HttpResponseRedirect("/")
class CustomPasswordResetView(auth_views.PasswordResetView):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
response.set_cookie('password_reset', 'true')
return response
class CustomPasswordResetDoneView(auth_views.PasswordResetDoneView):
def dispatch(self, request, *args, **kwargs):
if 'password_reset' in request.COOKIES:
response = super().dispatch(request, *args, **kwargs)
response.delete_cookie('password_reset')
return response
else:
return HttpResponseRedirect("/")
class CustomPasswordResetConfirmView(auth_views.PasswordResetConfirmView):
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
response.set_cookie('password_reset_initiated', 'true')
return response
class CustomPasswordResetCompleteView(auth_views.PasswordResetCompleteView):
def dispatch(self, request, *args, **kwargs):
if 'password_reset_initiated' in request.COOKIES:
response = super().dispatch(request, *args, **kwargs)
response.delete_cookie('password_reset_initiated')
return response
else:
return HttpResponseRedirect("/")
| Ignoramuss/LDERP | LDERPdjango/login/views.py | Python | apache-2.0 | 8,998 | 0.002223 |
# -*- coding: utf-8 -*-
"""
Abstract: Creates RDF for earthquake objects.
Gets an Array of Python Earthquake objects and turns them to RDF using RDFlib.
The RDF can either be outputted or written to a file.
This class does not inherit from RDFWriter!
"""
__author__ = "Marc Tim Thiemann"
__copyright__ = "Copyright 2015"
__credits__ = ["Marc Tim Thiemann"]
__license__ = ""
__version__ = "0.1"
__maintainer__ = ""
__email__ = ""
__date__ = "February 2015"
__status__ = "Development"
from rdflib import Graph, BNode, Namespace, RDF, XSD, Literal, URIRef
import os.path
import json
class EarthquakeRdfWriter2():
def __init__(self, bindings = None):
"""
Initialize Graph and setup namespaces
@param bindings The path to a json configuration file.
"""
self.g = Graph()
if bindings is not None:
self.bindNamespaces(bindings)
def create(self, earthquakeArr, format, destination = None, eqNamespace = None):
"""
Create Output for an array of earthquake objects
@param earthquakeArr An array of earthquake objects
@param format The output format. Supported formats: ‘xml’, ‘n3’, ‘turtle’, ‘nt’, ‘pretty-xml’, trix’
@param destination The destination for the output file
@param eqNamespace The uri for these RDF objects
"""
for o in earthquakeArr:
self.add(o, eqNamespace)
if destination is None:
print self.g.serialize(format=format)
else:
self.g.serialize(destination = destination + '.' + self.getExtension(format), format=format)
def add(self, earthquake, uri):
"""
Add RDF for this earthquake to the graph.
@param earthquake The earthquake object
@param uri The uri for that earthquake
"""
randomId = os.urandom(16).encode('hex')
eq = URIRef(uri + randomId)
self.g.add( (eq, RDF.type, self.eq.Earthquake) )
self.g.add( (eq, self.geo.lat, Literal(earthquake.latitude, datatype=XSD.float) ) )
self.g.add( (eq, self.geo.long, Literal(earthquake.longitude, datatype=XSD.float) ) )
self.g.add( (eq, self.qudt.vectorMagnitude, Literal(earthquake.magnitude, datatype=XSD.float) ) )
self.g.add( (eq, self.lode.atPlace, Literal(earthquake.place) ) )
self.g.add( (eq, self.lode.atTime, Literal(earthquake.atTime.isoformat(), datatype=XSD.dateTime) ) )
def getExtension(self, format):
"""
Get the file extension for a given format
@param format The format
"""
if format == "xml":
return "rdf"
elif format == "turtle":
return "ttl"
elif format == "pretty-xml":
return "xml"
else:
return format
def bindNamespaces(self, bindings):
"""
Binds namespaces to the graph
@param bindings The path to a json configuration file. The json file should have an array called "bindings".
Each object in this array should have an attribute called "prefix" for the prefix and an attribute called "namespace" for the namespace uri.
"""
json_data = open(bindings).read()
data = json.loads(json_data)
for obj in data['bindings']:
setattr(self, obj['prefix'], Namespace(obj['namespace']))
self.g.bind(obj['prefix'], getattr(self, obj['prefix']))
| liangcun/ConceptsOfSpatialInformation | CoreConceptsPy/GdalPy/examples/events/earthquake/EarthquakeRdfWriter2.py | Python | apache-2.0 | 3,451 | 0.010207 |
#!/usr/bin/env python
# Copyright (C) 2012 Andrea Valle
#
# This file is part of swgit.
#
# swgit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# swgit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with swgit. If not, see <http://www.gnu.org/licenses/>.
from Defines import *
from Utils import *
from ObjEnv import *
from ObjCfg import *
class ObjMailBase( ObjCfgMail ):
DEFAULT_MAIL_CFG = """\
#
# Inside this file user can provide sensible defaults for mail delivery
#
# Please run
# swgit --tutorial-mailcfg
# for more informations
#
#[%s]
#mailserver-sshuser =
#mailserver-sshaddr =
#from =
#to =
#to-1 =
#to-2 =
#cc =
#cc-1 =
#cc-2 =
#bcc =
#bcc-1 =
#bcc-2 =
#subject =
#body-header =
#body-footer =
#
#[%s]
#mailserver-sshuser =
#mailserver-sshaddr =
#from =
#to =
#to-1 =
#to-2 =
#cc =
#cc-1 =
#cc-2 =
#bcc =
#bcc-1 =
#bcc-2 =
#subject =
#body-header =
#body-footer =
""" % ( SWCFG_STABILIZE_SECT, SWCFG_MAIL_PUSH_SECT )
CMD_SEND_MAIL_TEMPL = "echo -e \"%s\" | /bin/mail \"%s\" -s \"%s\" %s %s %s"
def __init__( self, file, section ):
super(ObjMailBase, self ).__init__( file, section )
def dump( self ):
retstr = "\n"
if self.isValid_ == False:
retstr += "INVALID "
retstr += "Mail configuration for %s\n" % self.section_
retstr += super(ObjMailBase, self ).dump()
return retstr
def sanitize_message( self, mess ):
for clean in [ "'", '"' ]:
mess = mess.replace( clean, ' ' )
return mess
def get_all_body( self, body ):
allbody = self.sanitize_message( self.bodyH_ )
if self.bodyH_ != "":
allbody += "\n"
allbody += body
if self.bodyF_ != "":
allbody += "\n" + self.sanitize_message( self.bodyF_ )
return allbody
def get_cc_opt( self ):
cc_opt = ""
if self.cc_ != "":
cc_opt = " -c \"%s\" " % ( ",".join(self.cc_) )
return cc_opt
def get_bcc_opt( self ):
bcc_opt = ""
if self.bcc_ != "":
bcc_opt = " -b \"%s\" " % ( ",".join(self.bcc_) )
return bcc_opt
def get_from_opt( self ):
from_opt = ""
if self.from_ != "":
from_opt = " -- -f \"%s\" " % ( self.from_ )
return from_opt
def get_mail_cmd( self ):
if self.isValid_ == False:
return ""
cmd_send_mail = self.CMD_SEND_MAIL_TEMPL % \
( self.get_all_body( "BODY_HERE" ),
",".join(self.to_),
"SUBJECT_HERE",
self.get_cc_opt(),
self.get_bcc_opt(),
self.get_from_opt()
)
if self.sshaddr_ != "":
return "ssh %s@%s '%s'" % (self.sshuser_, self.sshaddr_, cmd_send_mail )
return cmd_send_mail
def sendmail( self, body, debug ):
if self.isValid_ == False:
return self.dump(), 1
cmd_send_mail = self.CMD_SEND_MAIL_TEMPL % \
( self.get_all_body( body ),
",".join(self.to_),
self.subj_,
self.get_cc_opt(),
self.get_bcc_opt(),
self.get_from_opt()
)
if self.sshaddr_ != "":
if debug == True:
return "%s@%s:\n%s" % (self.sshuser_, self.sshaddr_, cmd_send_mail ), 0
else:
return mySSHCommand_fast( cmd_send_mail, self.sshuser_, self.sshaddr_ )
else:
if debug == True:
return "localhost:\n%s" % ( cmd_send_mail ), 0
else:
return myCommand_fast( cmd_send_mail )
################
# STABILIZE MAIL #
################
class ObjMailStabilize( ObjMailBase ):
def __init__( self ):
super(ObjMailStabilize, self ).__init__( SWFILE_MAILCFG, SWCFG_STABILIZE_SECT )
self.load_cfg()
#############
# PUSH MAIL #
#############
class ObjMailPush( ObjMailBase ):
def __init__( self ):
super(ObjMailPush, self ).__init__( SWFILE_MAILCFG, SWCFG_MAIL_PUSH_SECT )
#override "to"
self.fields_mandatory_[1] = [self.set_to, self.get_to, "to" , SWCFG_MAIL_TO, GITCFG_USERMAIL ]
self.load_cfg()
def main():
for o in ( ObjMailStabilize, ObjMailPush ):
obj = o()
print "\n", '#'*10, o, '#'*10, "\n"
print obj.show_config_options()
print ""
print obj.dump()
print ""
print "Sending mail"
out, errCode = obj.sendmail( "body\nbody", debug = True )
print out
#out, errCode = obj.sendmail( "body\nbody", debug = False )
if __name__ == "__main__":
main()
| andreav/swgit | core/ObjMail.py | Python | gpl-3.0 | 5,260 | 0.028897 |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from django.conf import settings
from common import api
from common import clock
from common import exception
from common import throttle
from common.test import base
from common.test import util as test_util
class ThrottleTest(base.FixturesTestCase):
def setUp(self):
super(ThrottleTest, self).setUp()
self.popular = api.actor_get(api.ROOT, 'popular@example.com')
def test_basic(self):
# lather
# succeed the first two times, fail the third
throttle.throttle(self.popular, 'test', minute=2)
throttle.throttle(self.popular, 'test', minute=2)
def _failPants():
throttle.throttle(self.popular, 'test', minute=2)
self.assertRaises(exception.ApiThrottled, _failPants)
# rinse
# magically advance time by a couple minutes
o = test_util.override_clock(clock, seconds=120)
# repeat
# succeed the first two times, fail the third
throttle.throttle(self.popular, 'test', minute=2)
throttle.throttle(self.popular, 'test', minute=2)
self.assertRaises(exception.ApiThrottled, _failPants)
o.reset()
| codegooglecom/jaikuengine | common/test/throttle.py | Python | apache-2.0 | 1,689 | 0.003552 |
"""
termcolors.py
"""
from django.utils import six
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = {color_names[x]: '3%s' % x for x in range(8)}
background = {color_names[x]: '4%s' % x for x in range(8)}
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in six.iteritems(kwargs):
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'WARNING': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
'MIGRATE_SUCCESS': {},
'MIGRATE_FAILURE': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
'MIGRATE_SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'MIGRATE_FAILURE': {'fg': 'red', 'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
'MIGRATE_SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'MIGRATE_FAILURE': {'fg': 'red', 'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a pallete definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the pallete
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| SujaySKumar/django | django/utils/termcolors.py | Python | bsd-3-clause | 7,479 | 0.000669 |
"""Phone number to time zone mapping functionality
>>> import phonenumbers
>>> from phonenumbers.timezone import time_zones_for_number
>>> ro_number = phonenumbers.parse("+40721234567", "RO")
>>> tzlist = time_zones_for_number(ro_number)
>>> len(tzlist)
1
>>> str(tzlist[0])
'Europe/Bucharest'
>>> mx_number = phonenumbers.parse("+523291234567", "GB")
>>> tzlist = time_zones_for_number(mx_number)
>>> len(tzlist)
2
>>> str(tzlist[0])
'America/Mazatlan'
>>> str(tzlist[1])
'America/Mexico_City'
"""
# Based very loosely on original Java code:
# java/geocoder/src/com/google/i18n/phonenumbers/PhoneNumberToTimeZonesMapper.java
# Copyright (C) 2013 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .util import prnt, u, U_PLUS
from .phonenumberutil import PhoneNumberType, number_type
from .phonenumberutil import PhoneNumberFormat, format_number
from .phonenumberutil import is_number_type_geographical
try:
from .tzdata import TIMEZONE_DATA, TIMEZONE_LONGEST_PREFIX
except ImportError: # pragma no cover
# Before the generated code exists, the carrierdata/ directory is empty.
# The generation process imports this module, creating a circular
# dependency. The hack below works around this.
import os
import sys
if (os.path.basename(sys.argv[0]) == "buildmetadatafromxml.py" or
os.path.basename(sys.argv[0]) == "buildprefixdata.py"):
prnt("Failed to import generated data (but OK as during autogeneration)", file=sys.stderr)
TIMEZONE_DATA = {'4411': u('Europe/London')}
TIMEZONE_LONGEST_PREFIX = 4
else:
raise
__all__ = ['UNKNOWN_TIMEZONE', 'time_zones_for_geographical_number', 'time_zones_for_number']
# This is defined by ICU as the unknown time zone.
UNKNOWN_TIMEZONE = u("Etc/Unknown")
_UNKNOWN_TIME_ZONE_LIST = (UNKNOWN_TIMEZONE,)
def time_zones_for_geographical_number(numobj):
"""Returns a list of time zones to which a phone number belongs.
This method assumes the validity of the number passed in has already been
checked, and that the number is geo-localizable. We consider fixed-line
and mobile numbers possible candidates for geo-localization.
Arguments:
numobj -- a valid phone number for which we want to get the time zones
to which it belongs
Returns a list of the corresponding time zones or a single element list
with the default unknown time zone if no other time zone was found or if
the number was invalid"""
e164_num = format_number(numobj, PhoneNumberFormat.E164)
if not e164_num.startswith(U_PLUS): # pragma no cover
# Can only hit this arm if there's an internal error in the rest of
# the library
raise Exception("Expect E164 number to start with +")
for prefix_len in range(TIMEZONE_LONGEST_PREFIX, 0, -1):
prefix = e164_num[1:(1 + prefix_len)]
if prefix in TIMEZONE_DATA:
return TIMEZONE_DATA[prefix]
return _UNKNOWN_TIME_ZONE_LIST
def time_zones_for_number(numobj):
"""As time_zones_for_geographical_number() but explicitly checks the
validity of the number passed in.
Arguments:
numobj -- a valid phone number for which we want to get the time zones to which it belongs
Returns a list of the corresponding time zones or a single element list with the default
unknown time zone if no other time zone was found or if the number was invalid"""
ntype = number_type(numobj)
if ntype == PhoneNumberType.UNKNOWN:
return _UNKNOWN_TIME_ZONE_LIST
elif not is_number_type_geographical(ntype, numobj.country_code):
return _country_level_time_zones_for_number(numobj)
return time_zones_for_geographical_number(numobj)
def _country_level_time_zones_for_number(numobj):
"""Returns the list of time zones corresponding to the country calling code of a number.
Arguments:
numobj -- the phone number to look up
Returns a list of the corresponding time zones or a single element list with the default
unknown time zone if no other time zone was found or if the number was invalid"""
cc = str(numobj.country_code)
for prefix_len in range(TIMEZONE_LONGEST_PREFIX, 0, -1):
prefix = cc[:(1 + prefix_len)]
if prefix in TIMEZONE_DATA:
return TIMEZONE_DATA[prefix]
return _UNKNOWN_TIME_ZONE_LIST
if __name__ == '__main__': # pragma no cover
import doctest
doctest.testmod()
| vicky2135/lucious | oscar/lib/python2.7/site-packages/phonenumbers/timezone.py | Python | bsd-3-clause | 4,947 | 0.001819 |
VERSION_MAJOR = 0
VERSION_MINOR = 2
VERSION_PATCH = 0
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
version = '%i.%i.%i' % version_info
__version__ = version
__all__ = ['version', 'version_info', '__version__']
| pyblish/pyblish-standalone | pyblish_standalone/version.py | Python | lgpl-3.0 | 229 | 0 |
from pycp2k.inputsection import InputSection
from ._neighbor_lists3 import _neighbor_lists3
from ._subcell1 import _subcell1
from ._ewald_info1 import _ewald_info1
class _print26(InputSection):
def __init__(self):
InputSection.__init__(self)
self.NEIGHBOR_LISTS = _neighbor_lists3()
self.SUBCELL = _subcell1()
self.EWALD_INFO = _ewald_info1()
self._name = "PRINT"
self._subsections = {'EWALD_INFO': 'EWALD_INFO', 'SUBCELL': 'SUBCELL', 'NEIGHBOR_LISTS': 'NEIGHBOR_LISTS'}
| SINGROUP/pycp2k | pycp2k/classes/_print26.py | Python | lgpl-3.0 | 526 | 0.003802 |
# vim: set fileencoding=UTF-8
import re
from datetime import timedelta
from django.forms import Field
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.utils.dateparse import parse_duration
from django.utils.duration import duration_string
from .utils import human_duration_string
MICRO = timedelta(microseconds=1)
MILLIS = timedelta(milliseconds=1)
SECOND = timedelta(seconds=1)
MINUTE = timedelta(minutes=1)
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
WEEK = timedelta(days=7)
MONTH = timedelta(days=30)
YEAR = timedelta(days=365)
# a mapping of regexes to timedeltas
UNITS = {
'moment': timedelta(),
'microsecond': MICRO,
'micro': MICRO,
'mic': MICRO,
'us': MICRO,
'u': MICRO,
u'µs': MICRO,
u'µ': MICRO,
'millisecond': MILLIS,
'mil': MILLIS,
'ms': MILLIS,
'second': SECOND,
'sec': SECOND,
's': SECOND,
'minute': MINUTE,
'min': MINUTE,
'm(?!s)': MINUTE,
'hour': HOUR,
'hr': HOUR,
'h': HOUR,
'day': DAY,
'dy': DAY,
'd': DAY,
'week': WEEK,
'wk': WEEK,
'w': WEEK,
'month': MONTH,
'mon': MONTH,
'mo': MONTH,
'mth': MONTH,
'year': YEAR,
'yr': YEAR,
'y': YEAR
}
class NaturalDurationField(Field):
default_error_messages = {
'invalid': _('Enter a valid duration.'),
}
help_text = _("e.g. '1 hr and 2 milliseconds', '5 minutes, 30 sec', etc")
def __init__(self, human_values=True, default_units=None, *args, **kwargs):
self.human_values = human_values
if isinstance(default_units, timedelta):
self.default_units = default_units
elif default_units == 'm': # the only one in UNITS that's a regex
self.default_units = MINUTE
elif default_units in UNITS:
self.default_units = UNITS[default_units]
elif default_units:
raise RuntimeError(
"Got an invalid default duration unit %s" % default_units
)
else:
self.default_units = None
super(NaturalDurationField, self).__init__(*args, **kwargs)
def to_td(self, match, unit):
value = 0
string = match.group(1)
if 'a' in string:
value = 1
else:
try:
value = int(string)
except ValueError:
value = float(string)
return timedelta(seconds=value * UNITS[unit].total_seconds())
return value * UNITS[unit]
def to_python(self, value):
if value in self.empty_values:
return None
if isinstance(value, timedelta):
return value
value = value.strip()
if not value:
return None # handle values like " "
if self.default_units:
try:
intvalue = int(value)
return self.default_units * intvalue
except ValueError:
pass
try:
floatvalue = float(value)
return timedelta(
seconds=floatvalue * self.default_units.total_seconds()
)
except ValueError:
pass
td = parse_duration(value)
if td is not None:
return td
# The default parser got it. Yay.
# remove niceties
value = re.sub(r'(\.(?!\d)|&|and|,)', " ", value, flags=re.I)
td = timedelta()
for unit in UNITS:
regex = r"((\d+\.\d+)|\d+|(?=\s|\d|\b)a(n(?=\s|\d|\b))?)\s?(" \
+ unit \
+ r"s?(?=\s|\d|\b))"
matches = re.finditer(regex,
value,
flags=re.I | re.U)
for match in matches:
td = td + self.to_td(match, unit)
value = re.sub(regex, "", value, flags=re.I | re.U)
if value.strip():
# there's stuff left. KILL IT
raise ValidationError(self.default_error_messages['invalid'])
return td
def prepare_value(self, value):
# humanize had too much rounding...
# also, always assuming positive for now
if value is None:
return None
if not isinstance(value, timedelta):
return value
if not self.human_values:
return duration_string(value)
elif value == timedelta():
return "a moment"
else:
return human_duration_string(value)
| jmerdich/django-natural-duration | natural_duration/fields.py | Python | bsd-3-clause | 4,597 | 0.000218 |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Insecure client-server interoperability as a unit test."""
import unittest
import grpc
from src.proto.grpc.testing import test_pb2_grpc
from tests.interop import _intraop_test_case
from tests.interop import methods
from tests.interop import server
from tests.unit import test_common
class InsecureIntraopTest(_intraop_test_case.IntraopTestCase,
unittest.TestCase):
def setUp(self):
self.server = test_common.test_server()
test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
self.server)
port = self.server.add_insecure_port('[::]:0')
self.server.start()
self.stub = test_pb2_grpc.TestServiceStub(
grpc.insecure_channel('localhost:{}'.format(port)))
if __name__ == '__main__':
unittest.main(verbosity=2)
| murgatroid99/grpc | src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py | Python | apache-2.0 | 1,452 | 0 |
from setuptools import setup, find_packages
import sys
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
sys.exit('Sorry, Python < 3.5 is not supported')
setup(name='waybackscraper',
version='0.5',
description='Scrapes a website archives on the wayback machine using asyncio.',
author='Arthur Brenaut',
author_email='arthur.brenaut@gmail.com',
packages=find_packages(),
entry_points={
'console_scripts': ['waybackscraper=waybackscraper.cli:main'],
},
install_requires=[
'aiohttp',
'lxml'
],
zip_safe=False)
| abrenaut/waybackscraper | setup.py | Python | mit | 608 | 0.001645 |
"""
SCL; 2011, 2012.
"""
version = "0.2"
from btsynth import *
from gridworld import *
| slivingston/btsynth | btsynth/__init__.py | Python | bsd-3-clause | 89 | 0.022472 |
# FALL 2014 Computer Networks SEECS NUST
# BESE 3
# Dr Nadeem Ahmed
# BadNet2: Errors every 5th Packet
# Usage: BadNet.transmit instead of sendto
from socket import *
class BadNet:
dummy=' '
counter = 1
@staticmethod
def transmit(csocket,message,serverName,serverPort):
# print 'Got a packet' + str(BadNet.counter)
if (BadNet.counter % 5) != 0:
csocket.sendto(message,(serverName,serverPort))
print 'BadNet Sends properly packet No ' + str(BadNet.counter)
else:
print 'BadNet creating packet errors packet No ' + str(BadNet.counter)
mylist=list(message)
# get last char of the string
x=ord(mylist[-1])
if (x&1)==1:
#if first bit set, unset it
x &= ~(1)
else:
#if first bit not set, set it
x |= 1
mylist[-1]=chr(x)
dummy=''.join(mylist)
csocket.sendto(dummy,(serverName,serverPort))
BadNet.counter=BadNet.counter+1
| rupfw/rup1.0 | RUP1.0/Server/Badnet/BadNet2.py | Python | gpl-2.0 | 905 | 0.060773 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler base class that all Schedulers should inherit from
"""
import datetime
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova import utils
from nova.compute import power_state
from nova.compute import vm_states
from nova.api.ec2 import ec2utils
FLAGS = flags.FLAGS
flags.DEFINE_integer('service_down_time', 60,
'maximum time since last checkin for up service')
flags.DECLARE('instances_path', 'nova.compute.manager')
class NoValidHost(exception.Error):
"""There is no valid host for the command."""
pass
class WillNotSchedule(exception.Error):
"""The specified host is not up or doesn't exist."""
pass
class Scheduler(object):
"""The base class that all Scheduler clases should inherit from."""
def __init__(self):
self.zone_manager = None
def set_zone_manager(self, zone_manager):
"""Called by the Scheduler Service to supply a ZoneManager."""
self.zone_manager = zone_manager
@staticmethod
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = utils.utcnow() - last_heartbeat
return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time)
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
services = db.service_get_all_by_topic(context, topic)
return [service.host
for service in services
if self.service_is_up(service)]
def schedule(self, context, topic, *_args, **_kwargs):
"""Must override at least this method for scheduler to work."""
raise NotImplementedError(_("Must implement a fallback schedule"))
def schedule_live_migration(self, context, instance_id, dest,
block_migration=False):
"""Live migration scheduling method.
:param context:
:param instance_id:
:param dest: destination host
:return:
The host where instance is running currently.
Then scheduler send request that host.
"""
# Whether instance exists and is running.
instance_ref = db.instance_get(context, instance_id)
# Checking instance.
self._live_migration_src_check(context, instance_ref)
# Checking destination host.
self._live_migration_dest_check(context, instance_ref,
dest, block_migration)
# Common checking.
self._live_migration_common_check(context, instance_ref,
dest, block_migration)
# Changing instance_state.
values = {"vm_state": vm_states.MIGRATING}
db.instance_update(context, instance_id, values)
# Changing volume state
for volume_ref in instance_ref['volumes']:
db.volume_update(context,
volume_ref['id'],
{'status': 'migrating'})
# Return value is necessary to send request to src
# Check _schedule() in detail.
src = instance_ref['host']
return src
def _live_migration_src_check(self, context, instance_ref):
"""Live migration check routine (for src host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# Checking instance is running.
if instance_ref['power_state'] != power_state.RUNNING:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
raise exception.InstanceNotRunning(instance_id=instance_id)
# Checing volume node is running when any volumes are mounted
# to the instance.
if len(instance_ref['volumes']) != 0:
services = db.service_get_all_by_topic(context, 'volume')
if len(services) < 1 or not self.service_is_up(services[0]):
raise exception.VolumeServiceUnavailable()
# Checking src host exists and compute node
src = instance_ref['host']
services = db.service_get_all_compute_by_host(context, src)
# Checking src host is alive.
if not self.service_is_up(services[0]):
raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest,
block_migration):
"""Live migration check routine (for destination host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Checking dest exists and compute node.
dservice_refs = db.service_get_all_compute_by_host(context, dest)
dservice_ref = dservice_refs[0]
# Checking dest host is alive.
if not self.service_is_up(dservice_ref):
raise exception.ComputeServiceUnavailable(host=dest)
# Checking whether The host where instance is running
# and dest is not same.
src = instance_ref['host']
if dest == src:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
raise exception.UnableToMigrateToSelf(instance_id=instance_id,
host=dest)
# Checking dst host still has enough capacities.
self.assert_compute_node_has_enough_resources(context,
instance_ref,
dest,
block_migration)
def _live_migration_common_check(self, context, instance_ref, dest,
block_migration):
"""Live migration common check routine.
Below checkings are followed by
http://wiki.libvirt.org/page/TodoPreMigrationChecks
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
:param block_migration if True, check for block_migration.
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
try:
self.mounted_on_same_shared_storage(context, instance_ref, dest)
if block_migration:
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=dest)
except exception.FileNotFound:
if not block_migration:
src = instance_ref['host']
ipath = FLAGS.instances_path
logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
"same shared storage between %(src)s "
"and %(dest)s.") % locals())
raise
# Checking dest exists.
dservice_refs = db.service_get_all_compute_by_host(context, dest)
dservice_ref = dservice_refs[0]['compute_node'][0]
# Checking original host( where instance was launched at) exists.
try:
oservice_refs = db.service_get_all_compute_by_host(context,
instance_ref['launched_on'])
except exception.NotFound:
raise exception.SourceHostUnavailable()
oservice_ref = oservice_refs[0]['compute_node'][0]
# Checking hypervisor is same.
orig_hypervisor = oservice_ref['hypervisor_type']
dest_hypervisor = dservice_ref['hypervisor_type']
if orig_hypervisor != dest_hypervisor:
raise exception.InvalidHypervisorType()
# Checkng hypervisor version.
orig_hypervisor = oservice_ref['hypervisor_version']
dest_hypervisor = dservice_ref['hypervisor_version']
if orig_hypervisor > dest_hypervisor:
raise exception.DestinationHypervisorTooOld()
# Checking cpuinfo.
try:
rpc.call(context,
db.queue_get_for(context, FLAGS.compute_topic, dest),
{"method": 'compare_cpu',
"args": {'cpu_info': oservice_ref['cpu_info']}})
except rpc.RemoteError:
src = instance_ref['host']
logging.exception(_("host %(dest)s is not compatible with "
"original host %(src)s.") % locals())
raise
def assert_compute_node_has_enough_resources(self, context, instance_ref,
dest, block_migration):
"""Checks if destination host has enough resource for live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
:param block_migration: if True, disk checking has been done
"""
self.assert_compute_node_has_enough_memory(context, instance_ref, dest)
if not block_migration:
return
self.assert_compute_node_has_enough_disk(context, instance_ref, dest)
def assert_compute_node_has_enough_memory(self, context,
instance_ref, dest):
"""Checks if destination host has enough memory for live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Getting total available memory and disk of host
avail = self._get_compute_info(context, dest, 'memory_mb')
# Getting total used memory and disk of host
# It should be sum of memories that are assigned as max value,
# because overcommiting is risky.
used = 0
instance_refs = db.instance_get_all_by_host(context, dest)
used_list = [i['memory_mb'] for i in instance_refs]
if used_list:
used = reduce(lambda x, y: x + y, used_list)
mem_inst = instance_ref['memory_mb']
avail = avail - used
if avail <= mem_inst:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationError(reason=reason % locals())
def assert_compute_node_has_enough_disk(self, context,
instance_ref, dest):
"""Checks if destination host has enough disk for block migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Getting total available memory and disk of host
avail = self._get_compute_info(context, dest, 'local_gb')
# Getting total used memory and disk of host
# It should be sum of disks that are assigned as max value
# because overcommiting is risky.
used = 0
instance_refs = db.instance_get_all_by_host(context, dest)
used_list = [i['local_gb'] for i in instance_refs]
if used_list:
used = reduce(lambda x, y: x + y, used_list)
disk_inst = instance_ref['local_gb']
avail = avail - used
if avail <= disk_inst:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
"Lack of disk(host:%(avail)s "
"<= instance:%(disk_inst)s)")
raise exception.MigrationError(reason=reason % locals())
def _get_compute_info(self, context, host, key):
"""get compute node's infomation specified by key
:param context: security context
:param host: hostname(must be compute node)
:param key: column name of compute_nodes
:return: value specified by key
"""
compute_node_ref = db.service_get_all_compute_by_host(context, host)
compute_node_ref = compute_node_ref[0]['compute_node'][0]
return compute_node_ref[key]
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
"""Check if the src and dest host mount same shared storage.
At first, dest host creates temp file, and src host can see
it if they mounts same shared storage. Then src host erase it.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
src = instance_ref['host']
dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
src_t = db.queue_get_for(context, FLAGS.compute_topic, src)
try:
# create tmpfile at dest host
filename = rpc.call(context, dst_t,
{"method": 'create_shared_storage_test_file'})
# make sure existence at src host.
ret = rpc.call(context, src_t,
{"method": 'check_shared_storage_test_file',
"args": {'filename': filename}})
if not ret:
raise exception.FileNotFound(file_path=filename)
except exception.FileNotFound:
raise
finally:
rpc.call(context, dst_t,
{"method": 'cleanup_shared_storage_test_file',
"args": {'filename': filename}})
| nii-cloud/dodai-compute | nova/scheduler/driver.py | Python | apache-2.0 | 14,688 | 0.000204 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.share
from frappe import _
from frappe.utils import cstr, now_datetime, cint, flt
from erpnext.controllers.status_updater import StatusUpdater
class UOMMustBeIntegerError(frappe.ValidationError): pass
class TransactionBase(StatusUpdater):
def load_notification_message(self):
dt = self.doctype.lower().replace(" ", "_")
if int(frappe.db.get_value("Notification Control", None, dt) or 0):
self.set("__notification_message",
frappe.db.get_value("Notification Control", None, dt + "_message"))
def validate_posting_time(self):
# set Edit Posting Date and Time to 1 while data import
if frappe.flags.in_import and self.posting_date:
self.set_posting_time = 1
if not getattr(self, 'set_posting_time', None):
now = now_datetime()
self.posting_date = now.strftime('%Y-%m-%d')
self.posting_time = now.strftime('%H:%M:%S')
def add_calendar_event(self, opts, force=False):
if cstr(self.contact_by) != cstr(self._prev.contact_by) or \
cstr(self.contact_date) != cstr(self._prev.contact_date) or force:
self.delete_events()
self._add_calendar_event(opts)
def delete_events(self):
events = frappe.db.sql_list("""select name from `tabEvent`
where ref_type=%s and ref_name=%s""", (self.doctype, self.name))
if events:
frappe.db.sql("delete from `tabEvent` where name in (%s)"
.format(", ".join(['%s']*len(events))), tuple(events))
def _add_calendar_event(self, opts):
opts = frappe._dict(opts)
if self.contact_date:
event = frappe.get_doc({
"doctype": "Event",
"owner": opts.owner or self.owner,
"subject": opts.subject,
"description": opts.description,
"starts_on": self.contact_date,
"event_type": "Private",
"ref_type": self.doctype,
"ref_name": self.name
})
event.insert(ignore_permissions=True)
if frappe.db.exists("User", self.contact_by):
frappe.share.add("Event", event.name, self.contact_by,
flags={"ignore_share_permission": True})
def validate_uom_is_integer(self, uom_field, qty_fields):
validate_uom_is_integer(self, uom_field, qty_fields)
def validate_with_previous_doc(self, ref):
for key, val in ref.items():
is_child = val.get("is_child_table")
ref_doc = {}
item_ref_dn = []
for d in self.get_all_children(self.doctype + " Item"):
ref_dn = d.get(val["ref_dn_field"])
if ref_dn:
if is_child:
self.compare_values({key: [ref_dn]}, val["compare_fields"], d)
if ref_dn not in item_ref_dn:
item_ref_dn.append(ref_dn)
elif not val.get("allow_duplicate_prev_row_id"):
frappe.throw(_("Duplicate row {0} with same {1}").format(d.idx, key))
elif ref_dn:
ref_doc.setdefault(key, [])
if ref_dn not in ref_doc[key]:
ref_doc[key].append(ref_dn)
if ref_doc:
self.compare_values(ref_doc, val["compare_fields"])
def compare_values(self, ref_doc, fields, doc=None):
for reference_doctype, ref_dn_list in ref_doc.items():
for reference_name in ref_dn_list:
prevdoc_values = frappe.db.get_value(reference_doctype, reference_name,
[d[0] for d in fields], as_dict=1)
if not prevdoc_values:
frappe.throw(_("Invalid reference {0} {1}").format(reference_doctype, reference_name))
for field, condition in fields:
if prevdoc_values[field] is not None:
self.validate_value(field, condition, prevdoc_values[field], doc)
def validate_rate_with_reference_doc(self, ref_details):
for ref_dt, ref_dn_field, ref_link_field in ref_details:
for d in self.get("items"):
if d.get(ref_link_field):
ref_rate = frappe.db.get_value(ref_dt + " Item", d.get(ref_link_field), "rate")
if abs(flt(d.rate - ref_rate, d.precision("rate"))) >= .01:
frappe.throw(_("Row #{0}: Rate must be same as {1}: {2} ({3} / {4}) ")
.format(d.idx, ref_dt, d.get(ref_dn_field), d.rate, ref_rate))
def get_link_filters(self, for_doctype):
if hasattr(self, "prev_link_mapper") and self.prev_link_mapper.get(for_doctype):
fieldname = self.prev_link_mapper[for_doctype]["fieldname"]
values = filter(None, tuple([item.as_dict()[fieldname] for item in self.items]))
if values:
ret = {
for_doctype : {
"filters": [[for_doctype, "name", "in", values]]
}
}
else:
ret = None
else:
ret = None
return ret
def delete_events(ref_type, ref_name):
frappe.delete_doc("Event", frappe.db.sql_list("""select name from `tabEvent`
where ref_type=%s and ref_name=%s""", (ref_type, ref_name)), for_reload=True)
def validate_uom_is_integer(doc, uom_field, qty_fields, child_dt=None):
if isinstance(qty_fields, basestring):
qty_fields = [qty_fields]
distinct_uoms = list(set([d.get(uom_field) for d in doc.get_all_children()]))
integer_uoms = filter(lambda uom: frappe.db.get_value("UOM", uom,
"must_be_whole_number") or None, distinct_uoms)
if not integer_uoms:
return
for d in doc.get_all_children(parenttype=child_dt):
if d.get(uom_field) in integer_uoms:
for f in qty_fields:
qty = d.get(f)
if qty:
if abs(cint(qty) - flt(qty)) > 0.0000001:
frappe.throw(_("Quantity ({0}) cannot be a fraction in row {1}").format(qty, d.idx), UOMMustBeIntegerError)
| manqala/erpnext | erpnext/utilities/transaction_base.py | Python | gpl-3.0 | 5,360 | 0.024254 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApiError(Model):
"""Api error.
:param details: The Api error details
:type details:
list[~azure.mgmt.compute.v2016_04_30_preview.models.ApiErrorBase]
:param innererror: The Api inner error
:type innererror:
~azure.mgmt.compute.v2016_04_30_preview.models.InnerError
:param code: The error code.
:type code: str
:param target: The target of the particular error.
:type target: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'details': {'key': 'details', 'type': '[ApiErrorBase]'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, details=None, innererror=None, code=None, target=None, message=None):
super(ApiError, self).__init__()
self.details = details
self.innererror = innererror
self.code = code
self.target = target
self.message = message
| AutorestCI/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/api_error.py | Python | mit | 1,621 | 0.000617 |
#!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Talk to the multipathd cli
from __future__ import print_function
import util
import re
import exceptions
import time
class MPathCLIFail(exceptions.Exception):
def __init__(self):
return
def __str__(self):
print("", "MPath CLI failed")
mpathcmd = ["/usr/sbin/multipathd", "-k"]
def mpexec(cmd):
util.SMlog("mpath cmd: %s" % cmd)
(rc, stdout, stderr) = util.doexec(mpathcmd, cmd)
if stdout != "multipathd> ok\nmultipathd> " \
and stdout != "multipathd> " + cmd + "\nok\nmultipathd> ":
raise MPathCLIFail
def add_path(path):
mpexec("add path %s" % path)
def remove_path(path):
mpexec("remove path %s" % path)
def remove_map(m):
mpexec("remove map %s" % m)
def resize_map(m):
mpexec("resize map %s" % m)
def reconfigure():
mpexec("reconfigure")
regex = re.compile("[0-9]+:[0-9]+:[0-9]+:[0-9]+\s*([a-z]*)")
regex2 = re.compile("multipathd>(\s*[^:]*:)?\s+(.*)")
regex3 = re.compile("switchgroup")
def is_working():
cmd = "help"
try:
(rc, stdout, stderr) = util.doexec(mpathcmd, cmd)
m = regex3.search(stdout)
if m:
return True
else:
return False
except:
return False
def do_get_topology(cmd):
util.SMlog("mpath cmd: %s" % cmd)
(rc, stdout, stderr) = util.doexec(mpathcmd, cmd)
util.SMlog("mpath output: %s" % stdout)
lines = stdout.split('\n')[:-1]
if len(lines):
m = regex2.search(lines[0])
lines[0] = str(m.group(2))
return lines
def get_topology(scsi_id):
cmd = "show map %s topology" % scsi_id
return do_get_topology(cmd)
def get_all_topologies():
cmd = "show topology"
return do_get_topology(cmd)
def list_paths(scsi_id):
lines = get_topology(scsi_id)
matches = []
for line in lines:
m = regex.search(line)
if(m):
matches.append(m.group(1))
return matches
def list_maps():
cmd = "list maps"
util.SMlog("mpath cmd: %s" % cmd)
(rc, stdout, stderr) = util.doexec(mpathcmd, cmd)
util.SMlog("mpath output: %s" % stdout)
return map(lambda x: x.split(' ')[0], stdout.split('\n')[2:-1])
def ensure_map_gone(scsi_id):
while True:
paths = list_paths(scsi_id)
util.SMlog("list_paths succeeded")
if len(paths) == 0:
return
time.sleep(1)
| xapi-project/sm | drivers/mpath_cli.py | Python | lgpl-2.1 | 3,107 | 0.001931 |
"""Support for monitoring OctoPrint sensors."""
from __future__ import annotations
from datetime import datetime, timedelta
import logging
from pyoctoprintapi import OctoprintJobInfo, OctoprintPrinterInfo
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import OctoprintDataUpdateCoordinator
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
JOB_PRINTING_STATES = ["Printing from SD", "Printing"]
def _is_printer_printing(printer: OctoprintPrinterInfo) -> bool:
return (
printer
and printer.state
and printer.state.flags
and printer.state.flags.printing
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the available OctoPrint binary sensors."""
coordinator: OctoprintDataUpdateCoordinator = hass.data[DOMAIN][
config_entry.entry_id
]["coordinator"]
device_id = config_entry.unique_id
assert device_id is not None
entities: list[SensorEntity] = []
if coordinator.data["printer"]:
printer_info = coordinator.data["printer"]
types = ["actual", "target"]
for tool in printer_info.temperatures:
for temp_type in types:
entities.append(
OctoPrintTemperatureSensor(
coordinator,
tool.name,
temp_type,
device_id,
)
)
else:
_LOGGER.error("Printer appears to be offline, skipping temperature sensors")
entities.append(OctoPrintStatusSensor(coordinator, device_id))
entities.append(OctoPrintJobPercentageSensor(coordinator, device_id))
entities.append(OctoPrintEstimatedFinishTimeSensor(coordinator, device_id))
entities.append(OctoPrintStartTimeSensor(coordinator, device_id))
async_add_entities(entities)
class OctoPrintSensorBase(CoordinatorEntity, SensorEntity):
"""Representation of an OctoPrint sensor."""
coordinator: OctoprintDataUpdateCoordinator
def __init__(
self,
coordinator: OctoprintDataUpdateCoordinator,
sensor_type: str,
device_id: str,
) -> None:
"""Initialize a new OctoPrint sensor."""
super().__init__(coordinator)
self._device_id = device_id
self._attr_name = f"OctoPrint {sensor_type}"
self._attr_unique_id = f"{sensor_type}-{device_id}"
@property
def device_info(self):
"""Device info."""
return self.coordinator.device_info
class OctoPrintStatusSensor(OctoPrintSensorBase):
"""Representation of an OctoPrint sensor."""
_attr_icon = "mdi:printer-3d"
def __init__(
self, coordinator: OctoprintDataUpdateCoordinator, device_id: str
) -> None:
"""Initialize a new OctoPrint sensor."""
super().__init__(coordinator, "Current State", device_id)
@property
def native_value(self):
"""Return sensor state."""
printer: OctoprintPrinterInfo = self.coordinator.data["printer"]
if not printer:
return None
return printer.state.text
@property
def available(self) -> bool:
"""Return if entity is available."""
return self.coordinator.last_update_success and self.coordinator.data["printer"]
class OctoPrintJobPercentageSensor(OctoPrintSensorBase):
"""Representation of an OctoPrint sensor."""
_attr_native_unit_of_measurement = PERCENTAGE
_attr_icon = "mdi:file-percent"
def __init__(
self, coordinator: OctoprintDataUpdateCoordinator, device_id: str
) -> None:
"""Initialize a new OctoPrint sensor."""
super().__init__(coordinator, "Job Percentage", device_id)
@property
def native_value(self):
"""Return sensor state."""
job: OctoprintJobInfo = self.coordinator.data["job"]
if not job:
return None
if not (state := job.progress.completion):
return 0
return round(state, 2)
class OctoPrintEstimatedFinishTimeSensor(OctoPrintSensorBase):
"""Representation of an OctoPrint sensor."""
_attr_device_class = SensorDeviceClass.TIMESTAMP
def __init__(
self, coordinator: OctoprintDataUpdateCoordinator, device_id: str
) -> None:
"""Initialize a new OctoPrint sensor."""
super().__init__(coordinator, "Estimated Finish Time", device_id)
@property
def native_value(self) -> datetime | None:
"""Return sensor state."""
job: OctoprintJobInfo = self.coordinator.data["job"]
if (
not job
or not job.progress.print_time_left
or not _is_printer_printing(self.coordinator.data["printer"])
):
return None
read_time = self.coordinator.data["last_read_time"]
return read_time + timedelta(seconds=job.progress.print_time_left)
class OctoPrintStartTimeSensor(OctoPrintSensorBase):
"""Representation of an OctoPrint sensor."""
_attr_device_class = SensorDeviceClass.TIMESTAMP
def __init__(
self, coordinator: OctoprintDataUpdateCoordinator, device_id: str
) -> None:
"""Initialize a new OctoPrint sensor."""
super().__init__(coordinator, "Start Time", device_id)
@property
def native_value(self) -> datetime | None:
"""Return sensor state."""
job: OctoprintJobInfo = self.coordinator.data["job"]
if (
not job
or not job.progress.print_time
or not _is_printer_printing(self.coordinator.data["printer"])
):
return None
read_time = self.coordinator.data["last_read_time"]
return read_time - timedelta(seconds=job.progress.print_time)
class OctoPrintTemperatureSensor(OctoPrintSensorBase):
"""Representation of an OctoPrint sensor."""
_attr_native_unit_of_measurement = TEMP_CELSIUS
_attr_device_class = SensorDeviceClass.TEMPERATURE
_attr_state_class = SensorStateClass.MEASUREMENT
def __init__(
self,
coordinator: OctoprintDataUpdateCoordinator,
tool: str,
temp_type: str,
device_id: str,
) -> None:
"""Initialize a new OctoPrint sensor."""
super().__init__(coordinator, f"{temp_type} {tool} temp", device_id)
self._temp_type = temp_type
self._api_tool = tool
@property
def native_value(self):
"""Return sensor state."""
printer: OctoprintPrinterInfo = self.coordinator.data["printer"]
if not printer:
return None
for temp in printer.temperatures:
if temp.name == self._api_tool:
val = (
temp.actual_temp
if self._temp_type == "actual"
else temp.target_temp
)
if val is None:
return None
return round(val, 2)
return None
@property
def available(self) -> bool:
"""Return if entity is available."""
return self.coordinator.last_update_success and self.coordinator.data["printer"]
| home-assistant/home-assistant | homeassistant/components/octoprint/sensor.py | Python | apache-2.0 | 7,576 | 0.000396 |
#!/usr/bin/env python3
"""
test/unit_tests_d/ut_daemon.py: unit test for the MMGen suite's Daemon class
"""
from subprocess import run,DEVNULL
from mmgen.common import *
from mmgen.daemon import *
from mmgen.protocol import init_proto
def test_flags():
d = CoinDaemon('eth')
vmsg(f'Available opts: {fmt_list(d.avail_opts,fmt="bare")}')
vmsg(f'Available flags: {fmt_list(d.avail_flags,fmt="bare")}')
vals = namedtuple('vals',['online','no_daemonize','keep_cfg_file'])
def gen():
for opts,flags,val in (
(None,None, vals(False,False,False)),
(None,['keep_cfg_file'], vals(False,False,True)),
(['online'],['keep_cfg_file'], vals(True,False,True)),
(['online','no_daemonize'],['keep_cfg_file'], vals(True,True,True)),
):
d = CoinDaemon('eth',opts=opts,flags=flags)
assert d.flag.keep_cfg_file == val.keep_cfg_file
assert d.opt.online == val.online
assert d.opt.no_daemonize == val.no_daemonize
d.flag.keep_cfg_file = not val.keep_cfg_file
d.flag.keep_cfg_file = val.keep_cfg_file
yield d
return tuple(gen())
def test_flags_err(ut,d):
def bad1(): d[0].flag.foo = False
def bad2(): d[0].opt.foo = False
def bad3(): d[0].opt.no_daemonize = True
def bad4(): d[0].flag.keep_cfg_file = 'x'
def bad5(): d[0].opt.no_daemonize = 'x'
def bad6(): d[0].flag.keep_cfg_file = False
def bad7(): d[1].flag.keep_cfg_file = True
ut.process_bad_data((
('flag (1)', 'ClassFlagsError', 'unrecognized flag', bad1 ),
('opt (1)', 'ClassFlagsError', 'unrecognized opt', bad2 ),
('opt (2)', 'AttributeError', 'is read-only', bad3 ),
('flag (2)', 'AssertionError', 'not boolean', bad4 ),
('opt (3)', 'AttributeError', 'is read-only', bad5 ),
('flag (3)', 'ClassFlagsError', 'not set', bad6 ),
('flag (4)', 'ClassFlagsError', 'already set', bad7 ),
))
arm_skip_daemons = ('openethereum','parity')
def test_cmds(op):
network_ids = CoinDaemon.get_network_ids()
import mmgen.daemon as daemon_mod
for test_suite in [True,False] if op == 'print' else [True]:
vmsg(orange(f'Start commands (op={op}, test_suite={test_suite}):'))
for coin,data in CoinDaemon.coins.items():
for daemon_id in data.daemon_ids:
if daemon_id in arm_skip_daemons:
continue
for network in data.networks:
if opt.no_altcoin_deps and coin != 'BTC':
continue
d = CoinDaemon(
proto=init_proto(coin=coin,network=network),
daemon_id = daemon_id,
test_suite = test_suite )
if op == 'print':
for cmd in d.start_cmds:
vmsg(' '.join(cmd))
elif op == 'check':
try:
cp = run([d.exec_fn,'--help'],stdout=PIPE,stderr=PIPE)
except:
die(2,f'Unable to execute {d.exec_fn}')
if cp.returncode:
die(2,f'Unable to execute {d.exec_fn}')
else:
vmsg('{:16} {}'.format(
d.exec_fn+':',
cp.stdout.decode().splitlines()[0] ))
else:
if opt.quiet:
msg_r('.')
if op == 'stop' and hasattr(d,'rpc'):
run_session(d.rpc.stop_daemon(quiet=opt.quiet))
else:
getattr(d,op)(silent=opt.quiet)
class unit_tests:
win_skip = ('start','status','stop')
def flags(self,name,ut):
qmsg_r('Testing flags and opts...')
vmsg('')
daemons = test_flags()
qmsg('OK')
qmsg_r('Testing error handling for flags and opts...')
vmsg('')
test_flags_err(ut,daemons)
qmsg('OK')
return True
def cmds(self,name,ut):
qmsg_r('Testing start commands for coin daemons...')
vmsg('')
test_cmds('print')
qmsg('OK')
return True
def exec(self,name,ut):
qmsg_r('Testing availability of coin daemons...')
vmsg('')
test_cmds('check')
qmsg('OK')
return True
def start(self,name,ut):
msg_r('Starting coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def status(self,name,ut):
msg_r('Checking status of coin daemons...')
qmsg('')
test_cmds('start')
msg('OK')
return True
def stop(self,name,ut):
msg_r('Stopping coin daemons...')
qmsg('')
test_cmds('stop')
msg('OK')
return True
| mmgen/mmgen | test/unit_tests_d/ut_daemon.py | Python | gpl-3.0 | 4,118 | 0.043468 |
import re
import os.path
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
README_PATH = os.path.join(HERE, 'README.md')
try:
with open(README_PATH) as fd:
README = fd.read()
except IOError:
README = ''
INIT_PATH = os.path.join(HERE, 'rollbar/__init__.py')
with open(INIT_PATH) as fd:
INIT_DATA = fd.read()
VERSION = re.search(r"^__version__ = ['\"]([^'\"]+)['\"]", INIT_DATA, re.MULTILINE).group(1)
tests_require = [
'webob',
'blinker',
'unittest2',
'mock<=3.0.5; python_version < "3.3"',
'enum34; python_version < "3.4"',
'httpx; python_version >= "3.6"',
'aiocontextvars; python_version == "3.6"'
]
setup(
name='rollbar',
packages=find_packages(),
version=VERSION,
entry_points={
'paste.filter_app_factory': [
'pyramid=rollbar.contrib.pyramid:create_rollbar_middleware'
],
'console_scripts': ['rollbar=rollbar.cli:main']
},
description='Easy and powerful exception tracking with Rollbar. Send '
'messages and exceptions with arbitrary context, get back '
'aggregates, and debug production issues quickly.',
long_description=README,
long_description_content_type="text/markdown",
author='Rollbar, Inc.',
author_email='support@rollbar.com',
test_suite='rollbar.test.discover',
url='http://github.com/rollbar/pyrollbar',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: AsyncIO",
"Framework :: Bottle",
"Framework :: Django",
"Framework :: Flask",
"Framework :: Pylons",
"Framework :: Pyramid",
"Framework :: Twisted",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development",
"Topic :: Software Development :: Bug Tracking",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Quality Assurance",
"Topic :: System :: Logging",
"Topic :: System :: Monitoring",
],
install_requires=[
# The currently used version of `setuptools` has a bug,
# so the version requirements are not properly respected.
#
# In the current version, `requests>= 0.12.1`
# always installs the latest version of the package.
'requests>=0.12.1; python_version == "2.7"',
'requests>=0.12.1; python_version >= "3.6"',
'requests<2.26,>=0.12.1; python_version == "3.5"',
'requests<2.22,>=0.12.1; python_version == "3.4"',
'six>=1.9.0'
],
tests_require=tests_require,
)
| rollbar/pyrollbar | setup.py | Python | mit | 3,249 | 0.000308 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The sax module contains a collection of classes that provide a
(D)ocument (O)bject (M)odel representation of an XML document.
The goal is to provide an easy, intuative interface for managing XML
documents. Although, the term, DOM, is used above, this model is
B{far} better.
XML namespaces in suds are represented using a (2) element tuple
containing the prefix and the URI. Eg: I{('tns', 'http://myns')}
"""
from logging import getLogger
import suds.metrics
from suds import *
from suds.sax import *
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sax.text import Text
from suds.sax.attribute import Attribute
from xml.sax import make_parser, InputSource, ContentHandler
from xml.sax.handler import feature_external_ges
from cStringIO import StringIO
log = getLogger(__name__)
class Handler(ContentHandler):
""" sax hanlder """
def __init__(self):
self.nodes = [Document()]
def startElement(self, name, attrs):
top = self.top()
node = Element(unicode(name), parent=top)
for a in attrs.getNames():
n = unicode(a)
v = unicode(attrs.getValue(a))
attribute = Attribute(n,v)
if self.mapPrefix(node, attribute):
continue
node.append(attribute)
node.charbuffer = []
top.append(node)
self.push(node)
def mapPrefix(self, node, attribute):
skip = False
if attribute.name == 'xmlns':
if len(attribute.value):
node.expns = unicode(attribute.value)
skip = True
elif attribute.prefix == 'xmlns':
prefix = attribute.name
node.nsprefixes[prefix] = unicode(attribute.value)
skip = True
return skip
def endElement(self, name):
name = unicode(name)
current = self.top()
if len(current.charbuffer):
current.text = Text(u''.join(current.charbuffer))
del current.charbuffer
if len(current):
current.trim()
currentqname = current.qname()
if name == currentqname:
self.pop()
else:
raise Exception('malformed document')
def characters(self, content):
text = unicode(content)
node = self.top()
node.charbuffer.append(text)
def push(self, node):
self.nodes.append(node)
return node
def pop(self):
return self.nodes.pop()
def top(self):
return self.nodes[len(self.nodes)-1]
class Parser:
""" SAX Parser """
@classmethod
def saxparser(cls):
p = make_parser()
p.setFeature(feature_external_ges, 0)
h = Handler()
p.setContentHandler(h)
return (p, h)
def parse(self, file=None, string=None):
"""
SAX parse XML text.
@param file: Parse a python I{file-like} object.
@type file: I{file-like} object.
@param string: Parse string XML.
@type string: str
"""
timer = metrics.Timer()
timer.start()
sax, handler = self.saxparser()
if file is not None:
sax.parse(file)
timer.stop()
#metrics.log.debug('sax (%s) duration: %s', file, timer)
return handler.nodes[0]
if string is not None:
source = InputSource(None)
source.setByteStream(StringIO(string))
sax.parse(source)
timer.stop()
#metrics.log.debug('%s\nsax duration: %s', string, timer)
return handler.nodes[0]
| jumoconnect/openjumo | jumodjango/lib/suds/sax/parser.py | Python | mit | 4,435 | 0.000676 |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
hash256,
)
from io import BytesIO
ADDRESS = "tcp://127.0.0.1:28332"
class ZMQSubscriber:
def __init__(self, socket, topic):
self.sequence = 0
self.socket = socket
self.topic = topic
import zmq
self.socket.setsockopt(zmq.SUBSCRIBE, self.topic)
def receive(self):
topic, body, seq = self.socket.recv_multipart()
# Topic should match the subscriber topic.
assert_equal(topic, self.topic)
# Sequence should be incremental.
assert_equal(struct.unpack('<I', seq)[-1], self.sequence)
self.sequence += 1
return body
class ZMQTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_py3_zmq()
self.skip_if_no_bitcoind_zmq()
def setup_nodes(self):
import zmq
# Initialize ZMQ context and socket.
# All messages are received in the same socket which means
# that this test fails if the publishing order changes.
# Note that the publishing order is not defined in the documentation and
# is subject to change.
self.zmq_context = zmq.Context()
socket = self.zmq_context.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
socket.connect(ADDRESS)
# Subscribe to all available topics.
self.hashblock = ZMQSubscriber(socket, b"hashblock")
self.hashtx = ZMQSubscriber(socket, b"hashtx")
self.rawblock = ZMQSubscriber(socket, b"rawblock")
self.rawtx = ZMQSubscriber(socket, b"rawtx")
self.extra_args = [
["-zmqpub%s=%s" % (sub.topic.decode(), ADDRESS) for sub in [self.hashblock, self.hashtx, self.rawblock, self.rawtx]],
[],
]
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
try:
self._zmq_test()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.zmq_context.destroy(linger=None)
def _zmq_test(self):
num_blocks = 5
self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks})
genhashes = self.nodes[0].generatetoaddress(num_blocks, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all()
for x in range(num_blocks):
# Should receive the coinbase txid.
txid = self.hashtx.receive()
# Should receive the coinbase raw transaction.
hex = self.rawtx.receive()
tx = CTransaction()
tx.deserialize(BytesIO(hex))
tx.calc_sha256()
assert_equal(tx.hash, bytes_to_hex_str(txid))
# Should receive the generated block hash.
hash = bytes_to_hex_str(self.hashblock.receive())
assert_equal(genhashes[x], hash)
# The block should only have the coinbase txid.
assert_equal([bytes_to_hex_str(txid)], self.nodes[1].getblock(hash)["tx"])
# Should receive the generated raw block.
block = self.rawblock.receive()
assert_equal(genhashes[x], bytes_to_hex_str(hash256(block[:80])))
if self.is_wallet_compiled():
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# Should receive the broadcasted txid.
txid = self.hashtx.receive()
assert_equal(payment_txid, bytes_to_hex_str(txid))
# Should receive the broadcasted raw transaction.
hex = self.rawtx.receive()
assert_equal(payment_txid, bytes_to_hex_str(hash256(hex)))
self.log.info("Test the getzmqnotifications RPC")
assert_equal(self.nodes[0].getzmqnotifications(), [
{"type": "pubhashblock", "address": ADDRESS},
{"type": "pubhashtx", "address": ADDRESS},
{"type": "pubrawblock", "address": ADDRESS},
{"type": "pubrawtx", "address": ADDRESS},
])
assert_equal(self.nodes[1].getzmqnotifications(), [])
if __name__ == '__main__':
ZMQTest().main()
| ericshawlinux/bitcoin | test/functional/interface_zmq.py | Python | mit | 4,763 | 0.00189 |
#!/usr/bin/env python
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit import prompt
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
history = InMemoryHistory()
while True:
text = prompt("> ", history=history, auto_suggest=AutoSuggestFromHistory())
if text == 'quit':
print("Goodbye...")
break
else:
print('You said: {}'.format(text))
| tleonhardt/CodingPlayground | python/prompt-toolkit/auto_suggestion.py | Python | mit | 410 | 0 |
from .oauth import BaseOAuth2
class EventbriteOAuth2(BaseOAuth2):
"""Eventbrite OAuth2 authentication backend"""
name = 'eventbrite'
AUTHORIZATION_URL = 'https://www.eventbrite.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://www.eventbrite.com/oauth/token'
METADATA_URL = 'https://www.eventbriteapi.com/v3/users/me'
ACCESS_TOKEN_METHOD = 'POST'
STATE_PARAMETER = False
REDIRECT_STATE = False
def get_user_details(self, response):
"""Return user details from an Eventbrite metadata response"""
email = next(filter(lambda x: x['primary'], response['emails']))['email']
return {
'username': email,
'email': email,
'first_name': response['first_name'],
'last_name': response['last_name']
}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data and datacenter information from service"""
return self.get_json(self.METADATA_URL, headers={
'Authorization': 'Bearer ' + access_token
})
| abhikumar22/MYBLOG | blg/Lib/site-packages/social_core/backends/eventbrite.py | Python | gpl-3.0 | 1,055 | 0.000948 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2022 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
An extremely simple log viewer, suitable for debugging
"""
import time
import json
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
from openquake.baselib import sap
def viewlog(calc_id, host='localhost', port=8000):
"""
Extract the log of the given calculation ID from the WebUI
"""
base_url = 'http://%s:%s/v1/calc/' % (host, port)
start = 0
psize = 10 # page size
try:
while True:
url = base_url + '%d/log/%d:%d' % (calc_id, start, start + psize)
rows = json.load(urlopen(url))
for row in rows:
print(' '.join(row))
start += len(rows)
time.sleep(1)
except BaseException:
pass
if __name__ == '__main__':
viewlog.calc_id = 'calculation ID'
viewlog.host = 'hostname of the engine server'
viewlog.port = 'port of the engine server'
sap.run(viewlog)
| gem/oq-engine | openquake/engine/tools/viewlog.py | Python | agpl-3.0 | 1,703 | 0 |
# -*- coding: utf-8 -*-
'''
/**************************************************************************************************************************
SemiAutomaticClassificationPlugin
The Semi-Automatic Classification Plugin for QGIS allows for the supervised classification of remote sensing images,
providing tools for the download, the preprocessing and postprocessing of images.
-------------------
begin : 2012-12-29
copyright : (C) 2012-2021 by Luca Congedo
email : ing.congedoluca@gmail.com
**************************************************************************************************************************/
/**************************************************************************************************************************
*
* This file is part of Semi-Automatic Classification Plugin
*
* Semi-Automatic Classification Plugin is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software Foundation,
* version 3 of the License.
*
* Semi-Automatic Classification Plugin is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with
* Semi-Automatic Classification Plugin. If not, see <http://www.gnu.org/licenses/>.
*
**************************************************************************************************************************/
'''
cfg = __import__(str(__name__).split('.')[0] + '.core.config', fromlist=[''])
class EditRaster:
def __init__(self):
pass
# set raster value
def setRasterValueAction(self):
self.setRasterValue()
# set raster value
def setRasterValue(self, batch = 'No', rasterInput = None, vectorInput = None, vectorFieldName = None):
if cfg.ui.edit_val_use_ROI_radioButton.isChecked() and cfg.lstROI is None:
cfg.mx.msg22()
return
else:
if batch == 'No':
self.rstrNm = cfg.ui.edit_raster_name_combo.currentText()
b = cfg.utls.selectLayerbyName(self.rstrNm, 'Yes')
else:
b = 'No'
if b is not None:
if batch == 'No':
rSource = cfg.utls.layerSource(b)
else:
rSource = rasterInput
cfg.ui.undo_edit_Button.setEnabled(False)
cfg.undoEditRasterToolbar_toolButton.setEnabled(False)
# create feature list
rId = []
f = cfg.qgisCoreSCP.QgsFeature()
# using vector
if cfg.ui.edit_val_use_vector_radioButton.isChecked():
if batch == 'No':
shapeNm = cfg.ui.vector_name_combo_2.currentText()
shape = cfg.utls.selectLayerbyName(shapeNm)
else:
shape = cfg.utls.addVectorLayer(vectorInput , cfg.utls.fileName(vectorInput), "ogr")
if shape is None:
return
for f in shape.getFeatures():
rId.append(f.id())
vector = shape
# using ROI polygon
elif cfg.ui.edit_val_use_ROI_radioButton.isChecked():
for f in cfg.lstROI.getFeatures():
rId.append(f.id())
vector = cfg.lstROI
# hide ROI
cfg.show_ROI_radioButton.setChecked(False)
cfg.SCPD.showHideROI()
self.setValueRaster(rSource, vector, rId, batch, vectorFieldName)
if b != 'No':
b.reload()
b.triggerRepaint()
cfg.cnvs.refresh()
if batch == 'No':
pass
else:
cfg.utls.removeLayerByLayer(shape)
else:
cfg.utls.refreshClassificationLayer()
cfg.mx.msgErr9()
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), "Error raster not found")
# logger
cfg.utls.logCondition(str(__name__) + "-" + str(cfg.inspectSCP.stack()[0][3])+ " " + cfg.utls.lineOfCode())
# set value raster
def setValueRaster(self, inputRaster, inputVectorQGIS, qgisVectorFeatureList, batch = 'No', vectorFieldName = None, toolbarValue = None):
crs = cfg.utls.getCrs(inputVectorQGIS)
# using ROI polygon
if cfg.ui.edit_val_use_ROI_radioButton.isChecked() or toolbarValue is not None:
# temporary layer
tLP = cfg.utls.createTempRasterPath('gpkg')
# create a temp shapefile with a field
cfg.utls.createEmptyShapefile(crs, tLP, format = 'GPKG')
vector = cfg.utls.addVectorLayer(tLP, cfg.utls.fileName(tLP), "ogr")
for pI in qgisVectorFeatureList:
cfg.utls.copyFeatureToLayer(inputVectorQGIS, pI, vector)
if toolbarValue is None:
toolbarValue = cfg.ui.value_spinBox.value()
self.performEdit(inputRaster, tLP, toolbarValue)
cfg.ui.undo_edit_Button.setEnabled(True)
cfg.undoEditRasterToolbar_toolButton.setEnabled(True)
# using vector
else:
if batch == 'No':
cfg.uiUtls.addProgressBar()
progress = 0
progressStep = 100 / (len(qgisVectorFeatureList) + 1)
n = 0
for pI in qgisVectorFeatureList:
n = n + 1
progress = progress + progressStep
cfg.uiUtls.updateBar(progress)
# temporary layer
tLP = cfg.utls.createTempRasterPath('gpkg')
# create a temp shapefile with a field
cfg.utls.createEmptyShapefile(crs, tLP, format = 'GPKG')
vector = cfg.utls.addVectorLayer(tLP, cfg.utls.fileName(tLP), 'ogr')
cfg.utls.copyFeatureToLayer(inputVectorQGIS, pI, vector)
if cfg.ui.use_constant_val_checkBox.isChecked() is True:
value = cfg.ui.value_spinBox.value()
else:
if vectorFieldName is None:
fd = cfg.ui.field_comboBox_2.currentText()
else:
fd = vectorFieldName
if len(fd) == 0:
cfg.utls.refreshVectorLayer()
if batch == 'No':
cfg.uiUtls.removeProgressBar()
return 'No'
fId = cfg.utls.fieldID(inputVectorQGIS, fd)
f = cfg.utls.getFeaturebyID(inputVectorQGIS, pI)
value = f.attributes()[fId]
self.performEdit(inputRaster, tLP, value)
if batch == 'No':
cfg.uiUtls.removeProgressBar()
# perform raster edit
def performEdit(self, inputRasterPath, inputVectorPath, editValue = None):
# convert polygon to raster
tRxs = cfg.utls.createTempRasterPath('tif')
check = cfg.utls.vectorToRaster(cfg.emptyFN, str(inputVectorPath), cfg.emptyFN, tRxs, str(inputRasterPath), None, 'GTiff', 1)
# open input with GDAL
rD = cfg.gdalSCP.Open(inputRasterPath, cfg.gdalSCP.GA_Update)
if rD is None:
return 'No'
rD2 = cfg.gdalSCP.Open(tRxs, cfg.gdalSCP.GA_ReadOnly)
if rD2 is None:
return 'No'
# pixel size and origin
rGT = rD.GetGeoTransform()
tLX = rGT[0]
tLY = rGT[3]
pSX = rGT[1]
pSY = rGT[5]
rGT2 = rD2.GetGeoTransform()
tLX2 = rGT2[0]
tLY2 = rGT2[3]
# number of x pixels
rC = rD.RasterXSize
rC2 = rD2.RasterXSize
# number of y pixels
rR = rD.RasterYSize
rR2 = rD2.RasterYSize
if tLX2 < tLX:
startX = tLX
else:
startX = tLX2
if tLY2 > tLY:
startY = tLY
else:
startY = tLY2
self.pixelStartColumn = abs(int((tLX - startX) / pSX ))
self.pixelStartRow = abs(int((tLY - startY) / pSY ))
startColumn2 = abs(int((tLX2 - startX) / pSX ))
startRow2 = abs(int((tLY2 - startY) / pSY ))
columnNum = rC2 - startColumn2
rowNum = rR2 - startRow2
if columnNum < 1 or rowNum < 1:
return
if self.pixelStartColumn + columnNum > rC:
columnNum1 = rC - self.pixelStartColumn
else:
columnNum1 = columnNum
if columnNum1 < 0:
return
if self.pixelStartRow + rowNum > rR:
rowNum1 = rR - self.pixelStartRow
else:
rowNum1 = rowNum
if rowNum1 < 0:
return
# read raster
iRB = rD.GetRasterBand(1)
try:
o = iRB.GetOffset()
s = iRB.GetScale()
if o is None:
o = 0
if s is None:
s = 1
except:
o = 0
s = 1
a = iRB.ReadAsArray(self.pixelStartColumn, self.pixelStartRow, columnNum1, rowNum1)
off = o
sca = s
self.a1 = a
iRB2 = rD2.GetRasterBand(1)
try:
o = iRB2.GetOffset()
s = iRB2.GetScale()
if o is None:
o = 0
if s is None:
s = 1
except:
o = 0
s = 1
b = iRB2.ReadAsArray(startColumn2, startRow2, columnNum1, rowNum1)
a2 = b*s+o
# expression
if cfg.ui.use_expression_checkBox.isChecked() is True:
expression = ' ' + cfg.ui.expression_lineEdit.text() + ' '
e = self.checkExpression(expression, editValue)
if e == 'No':
return 'No'
else:
dataArray = eval(e)
else:
value = editValue
dataArray = cfg.np.where(a2 >0 , value, self.a1*sca+off)
iRB = None
iRB2 = None
self.writeArrayBlock(rD, 1, dataArray/sca-off, self.pixelStartColumn, self.pixelStartRow)
rD = None
rD2 = None
# reload vector list
def reloadVectorList(self):
cfg.utls.refreshVectorLayer()
# text changed
def textChanged(self):
expression = ' ' + cfg.ui.expression_lineEdit.text() + ' '
self.checkExpression(expression, 0)
# check the expression and return it
def checkExpression(self, expression, editValue):
expr = expression
expr = expr.replace(cfg.variableName, 'self.a1')
# replace numpy operators
expr = cfg.utls.replaceNumpyOperators(expr)
# value from vector
expr = expr.replace(cfg.vectorVariableName, str(editValue))
e = 'cfg.np.where(a2 >0 ,' + expr + ', self.a1)'
# test
ar1 = cfg.np.arange(9).reshape(3, 3)
eCopy = e
eCopy = eCopy.replace('self.a1', 'ar1')
eCopy = eCopy.replace('a2', 'ar1')
try:
o = eval(eCopy)
cfg.ui.expression_lineEdit.setStyleSheet('color : green')
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
return e
except Exception as err:
cfg.ui.expression_lineEdit.setStyleSheet('color : red')
# logger
cfg.utls.logCondition(str(__name__) + '-' + (cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), ' ERROR exception: ' + str(err))
return 'No'
# undo edit
def undoEdit(self):
try:
b = cfg.utls.selectLayerbyName(self.rstrNm, 'Yes')
rSource = cfg.utls.layerSource(b)
# open input with GDAL
rD = cfg.gdalSCP.Open(rSource, cfg.gdalSCP.GA_Update)
if rD is None:
return 'No'
self.writeArrayBlock(rD, 1, self.a1, self.pixelStartColumn, self.pixelStartRow)
rD = None
b.reload()
b.triggerRepaint()
cfg.cnvs.refresh()
cfg.ui.undo_edit_Button.setEnabled(False)
cfg.undoEditRasterToolbar_toolButton.setEnabled(False)
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
except:
pass
# write an array to band
def writeArrayBlock(self, gdalRaster, bandNumber, dataArray, pixelStartColumn, pixelStartRow):
b = gdalRaster.GetRasterBand(bandNumber)
b.WriteArray(dataArray, pixelStartColumn, pixelStartRow)
b.FlushCache()
b = None
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
# checkbox changed
def checkboxVectorFieldChanged(self):
cfg.ui.use_constant_val_checkBox.blockSignals(True)
cfg.ui.use_expression_checkBox.blockSignals(True)
cfg.ui.use_field_vector_checkBox.blockSignals(True)
if cfg.ui.use_field_vector_checkBox.isChecked():
cfg.ui.use_expression_checkBox.setCheckState(0)
cfg.ui.use_constant_val_checkBox.setCheckState(0)
else:
cfg.ui.use_field_vector_checkBox.setCheckState(2)
cfg.ui.use_expression_checkBox.blockSignals(False)
cfg.ui.use_constant_val_checkBox.blockSignals(False)
cfg.ui.use_field_vector_checkBox.blockSignals(False)
cfg.ui.edit_val_use_vector_radioButton.setChecked(True)
# checkbox changed
def checkboxConstantValChanged(self):
cfg.ui.use_constant_val_checkBox.blockSignals(True)
cfg.ui.use_expression_checkBox.blockSignals(True)
cfg.ui.use_field_vector_checkBox.blockSignals(True)
if cfg.ui.use_constant_val_checkBox.isChecked():
cfg.ui.use_expression_checkBox.setCheckState(0)
cfg.ui.use_field_vector_checkBox.setCheckState(0)
else:
cfg.ui.use_constant_val_checkBox.setCheckState(2)
cfg.ui.use_expression_checkBox.blockSignals(False)
cfg.ui.use_constant_val_checkBox.blockSignals(False)
cfg.ui.use_field_vector_checkBox.blockSignals(False)
# checkbox changed
def checkboxUseExpressionChanged(self):
cfg.ui.use_expression_checkBox.blockSignals(True)
cfg.ui.use_constant_val_checkBox.blockSignals(True)
cfg.ui.use_field_vector_checkBox.blockSignals(True)
if cfg.ui.use_expression_checkBox.isChecked():
cfg.ui.use_constant_val_checkBox.setCheckState(0)
cfg.ui.use_field_vector_checkBox.setCheckState(0)
else:
cfg.ui.use_expression_checkBox.setCheckState(2)
cfg.ui.use_expression_checkBox.blockSignals(False)
cfg.ui.use_constant_val_checkBox.blockSignals(False)
cfg.ui.use_field_vector_checkBox.blockSignals(False)
# radio button changed
def radioUseROIPolygonChanged(self):
cfg.ui.edit_val_use_ROI_radioButton.blockSignals(True)
cfg.ui.edit_val_use_vector_radioButton.blockSignals(True)
if cfg.ui.edit_val_use_ROI_radioButton.isChecked():
cfg.ui.edit_val_use_vector_radioButton.setChecked(False)
else:
cfg.ui.edit_val_use_vector_radioButton.setChecked(True)
cfg.ui.edit_val_use_ROI_radioButton.blockSignals(False)
cfg.ui.edit_val_use_vector_radioButton.blockSignals(False)
if cfg.ui.use_field_vector_checkBox.isChecked():
cfg.ui.use_constant_val_checkBox.setCheckState(2)
# radio button changed
def radioUseVectorChanged(self):
cfg.ui.edit_val_use_ROI_radioButton.blockSignals(True)
cfg.ui.edit_val_use_vector_radioButton.blockSignals(True)
if cfg.ui.edit_val_use_vector_radioButton.isChecked():
cfg.ui.edit_val_use_ROI_radioButton.setChecked(False)
else:
cfg.ui.edit_val_use_ROI_radioButton.setChecked(True)
cfg.ui.edit_val_use_ROI_radioButton.blockSignals(False)
cfg.ui.edit_val_use_vector_radioButton.blockSignals(False)
# edit using toolbar values
def toolbarEditValue(self, toolbarValue):
if cfg.lstROI is None:
cfg.mx.msg22()
return
self.rstrNm = cfg.ui.edit_raster_name_combo.currentText()
b = cfg.utls.selectLayerbyName(self.rstrNm, 'Yes')
if b is not None:
rSource = cfg.utls.layerSource(b)
cfg.ui.undo_edit_Button.setEnabled(False)
cfg.undoEditRasterToolbar_toolButton.setEnabled(False)
# create feature list
rId = []
f = cfg.qgisCoreSCP.QgsFeature()
for f in cfg.lstROI.getFeatures():
rId.append(f.id())
vector = cfg.lstROI
# hide ROI
cfg.show_ROI_radioButton.setChecked(False)
cfg.SCPD.showHideROI()
self.setValueRaster(rSource, vector, rId, 'No', None, toolbarValue)
if b != 'No':
b.reload()
b.triggerRepaint()
cfg.cnvs.refresh()
# toolbar value 0
def toolbarValue0(self):
self.toolbarEditValue(int(cfg.val0_spin.value()))
# toolbar value 1
def toolbarValue1(self):
self.toolbarEditValue(int(cfg.val1_spin.value()))
# toolbar value 2
def toolbarValue2(self):
self.toolbarEditValue(int(cfg.val2_spin.value()))
| semiautomaticgit/SemiAutomaticClassificationPlugin | maininterface/editraster.py | Python | gpl-3.0 | 15,191 | 0.032585 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.